-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfasttext_emb.py
More file actions
79 lines (65 loc) · 2.47 KB
/
fasttext_emb.py
File metadata and controls
79 lines (65 loc) · 2.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import gzip
from pathlib import Path
import torch
import torch.nn as nn
import fasttext
def _save_to(path: Path, lines: list):
if path.suffix == ".gz":
with gzip.open(str(path), "wt") as f:
for line in lines:
f.write(line)
f.write('\n')
else:
with open(str(path), 'w') as f:
for line in lines:
f.write(line)
f.write('\n')
def _load_from(path: Path) -> list:
if path.suffix == ".gz":
with gzip.open(str(path), "rt") as f:
lines = f.readlines()
else:
with open(str(path), 'r') as f:
lines = f.readlines()
return lines
def _save_vec_file(path: Path, words: list, vectors: list):
lines = []
for word, vector in zip(words, vectors):
vector_str = ' '.join([str(num) for num in vector])
line = f"{word} {vector_str}"
lines.append(line)
_save_to(path, lines)
def _load_vec_file(path: Path) -> (dict, list):
word_indices = {}
vectors = []
lines = _load_from(path)
for i, line in enumerate(lines):
parts = line.split()
word = parts[0]
vector = [float(v) for v in parts[1:]]
vectors.append(vector)
word_indices[word] = i
return word_indices, vectors
ft_model = None
def load_embedding_weight_matrix(model_file_path, vec_file_path: Path, words: list) -> nn.Embedding:
if vec_file_path.exists():
print(f"Loading FastText vectors from {vec_file_path}")
indices, vectors = _load_vec_file(vec_file_path)
new_words = [word for word in words if word not in indices]
word_vectors = [vectors[indices[word]] for word in words if word in indices]
if len(new_words) > 0:
new_word_vectors = get_word_vectors(model_file_path, new_words)
word_vectors.extend(new_word_vectors)
else:
word_vectors = get_word_vectors(model_file_path, words)
if words[0] == '<PAD>':
word_vectors[0] = [0] * len(word_vectors[0])
_save_vec_file(vec_file_path, words, word_vectors)
word_matrix = torch.tensor(word_vectors, dtype=torch.float)
return nn.Embedding.from_pretrained(word_matrix, padding_idx=0)
def get_word_vectors(model_path: Path, words: list) -> list:
global ft_model
if not ft_model:
print(f"Loading FastText model from {model_path}")
ft_model = fasttext.load_model(f'{model_path}')
return [ft_model.get_word_vector(word) for word in words]