-
Notifications
You must be signed in to change notification settings - Fork 68
Expand file tree
/
Copy pathtoken_classification.py
More file actions
52 lines (41 loc) · 1.84 KB
/
token_classification.py
File metadata and controls
52 lines (41 loc) · 1.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from typing import Any, Dict, List
from app.pipelines import Pipeline
from thirdai import bolt
from huggingface_hub import hf_hub_download
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
print("LMFAO", model_id)
model_path = hf_hub_download(model_id, "model.bin", library_name="thirdai")
self.model = bolt.UniversalDeepTransformer.NER.load(model_path)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A rubstring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
split_inputs = inputs.split(" ")
outputs = self.model.predict(split_inputs)
entities = []
offset = 0
for entity_results, word in zip(outputs, split_inputs):
best_prediction = entity_results[0]
current_entity = {
"entity_group": best_prediction[0],
"word": word,
"start": offset,
"end": offset + len(word),
"score": best_prediction[1],
}
entities.append(current_entity)
offset += len(word) + 1
return entities