-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun_lf.py
More file actions
executable file
·103 lines (82 loc) · 3.36 KB
/
run_lf.py
File metadata and controls
executable file
·103 lines (82 loc) · 3.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/env python3
import sys
import requests
import json
import spacy
from spacy.tokens import DocBin
from itertools import islice
import inspect
from collections import defaultdict
def run_classification(record_dict_list):
lf_results_by_record_id = {}
for record_dict in record_dict_list:
label_name = lf(record_dict["data"])
if label_name is not None:
lf_results_by_record_id[record_dict["id"]] = [1.0, label_name]
return lf_results_by_record_id
def run_extraction(record_dict_list):
lf_results_by_record_id = defaultdict(list)
for record_dict in record_dict_list:
for label_name, start_idx, end_idx in lf(record_dict["data"]):
lf_results_by_record_id[record_dict["id"]].append(
[1.0, label_name, start_idx, end_idx]
)
lf_results_by_record_id = dict(lf_results_by_record_id)
return lf_results_by_record_id
def run_checks(progress):
if progress:
print(
f"Tokenization is still in progress. Currently {progress}% done.",
flush=True,
)
print("Function will run with reduced set.", flush=True)
# https://www.delftstack.com/howto/python/python-split-list-into-chunks/#split-list-in-python-to-chunks-using-the-lambda-islice-method
def chunk_data(lst, chunk_size):
lst = iter(lst)
return iter(lambda: tuple(islice(lst, chunk_size)), ())
def load_data_dict(record):
if record["bytes"][:2] == "\\x":
record["bytes"] = record["bytes"][2:]
else:
raise ValueError("Unknown byte format in DocBin. Please contact the support.")
byte = bytes.fromhex(record["bytes"])
doc_bin_loaded = DocBin().from_bytes(byte)
docs = list(doc_bin_loaded.get_docs(vocab))
data_dict = {}
for col, doc in zip(record["columns"], docs):
data_dict[col] = doc
for key in record:
if key in ["record_id", "bytes", "columns"]:
continue
data_dict[key] = record[key]
return data_dict
def parse_data_to_record_dict(record_chunk):
result = []
for r in record_chunk:
result.append({"id": r["record_id"], "data": load_data_dict(r)})
return result
if __name__ == "__main__":
_, progress, iso2_code, payload_url = sys.argv
run_checks(progress)
print("Preparing data for labeling function.", flush=True)
# This import statement will always be highlighted as a potential error, as during devtime,
# the script `labeling_functions` does not exist. It will be inserted at runtime
from labeling_functions import lf
vocab = spacy.blank(iso2_code).vocab
with open("docbin_full.json", "r") as infile:
docbin_data = json.load(infile)
is_extraction = inspect.isgeneratorfunction(lf)
print("Running labeling function.", flush=True)
workload = len(docbin_data)
lf_results_by_record_id = {}
chunk_size = 100
for idx, chunk in enumerate(chunk_data(docbin_data, chunk_size)):
record_dict_list = parse_data_to_record_dict(chunk)
if is_extraction:
lf_results_by_record_id.update(run_extraction(record_dict_list))
else:
lf_results_by_record_id.update(run_classification(record_dict_list))
progress = (idx * chunk_size) / workload
print("progress: ", progress, flush=True)
print("Finished execution.", flush=True)
requests.put(payload_url, json=lf_results_by_record_id)