forked from lancopku/SGM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocess.py
More file actions
227 lines (175 loc) · 9 KB
/
preprocess.py
File metadata and controls
227 lines (175 loc) · 9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
import os
import argparse
import codecs
import utils
import pickle
parser = argparse.ArgumentParser(description='preprocess.py')
parser.add_argument('-load_data', type=str, required=True,
help="input file dir for the data")
parser.add_argument('-save_data', type=str, required=True,
help="output file dir for the processed data")
parser.add_argument('-src_vocab_size', type=int, default=50000,
help="size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=50000,
help="size of the target vocabulary")
parser.add_argument('-src_filter', type=int, default=0,
help="maximum source sequence length")
parser.add_argument('-tgt_filter', type=int, default=0,
help="maximum target sequence length")
parser.add_argument('-src_trun', type=int, default=0,
help="truncate source sequence length")
parser.add_argument('-tgt_trun', type=int, default=0,
help="truncate target sequence length")
parser.add_argument('-src_char', action='store_true',
help='character based encoding')
parser.add_argument('-tgt_char', action='store_true',
help='character based decoding')
parser.add_argument('-src_suf', default='src',
help="the suffix of the source filename")
parser.add_argument('-tgt_suf', default='tgt',
help="the suffix of the target filename")
parser.add_argument('-share', action='store_true',
help='share the vocabulary between source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="report status every this many sentences")
opt = parser.parse_args()
def makeVocabulary(filename, trun_length, filter_length, char, vocab, size):
print("%s: length limit = %d, truncate length = %d" % (filename, filter_length, trun_length))
max_length = 0
# codecs.open() 讀取文件內容會自動轉換為內部的unicode, python2.x的方法
with codecs.open(filename, 'r', 'utf-8') as f:
for sent in f.readlines():
if char:
tokens = list(sent.strip())
else:
tokens = sent.strip().split()
# filter預設為0此行不會成立,若小於設定的filter_length,不會將此sentence放入字典
if 0 < filter_length < len(sent.strip().split()):
continue
max_length = max(max_length, len(tokens))
# sentence 的長度超過trun_length,只儲存範圍內的
if trun_length > 0:
tokens = tokens[:trun_length]
for word in tokens:
vocab.add(word)
print('Max length of %s = %d' % (filename, max_length))
if size > 0:
originalSize = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), originalSize))
return vocab
def saveVocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def makeData(srcFile, tgtFile, srcDicts, tgtDicts, save_srcFile, save_tgtFile, lim=0):
sizes = 0
count, empty_ignored, limit_ignored = 0, 0, 0
print('Processing %s & %s ...' % (srcFile, tgtFile))
srcF = open(srcFile, encoding='utf8')
tgtF = open(tgtFile, encoding='utf8')
srcIdF = open(save_srcFile + '.id', 'w')
tgtIdF = open(save_tgtFile + '.id', 'w')
srcStrF = open(save_srcFile + '.str', 'w', encoding='utf8')
tgtStrF = open(save_tgtFile + '.str', 'w', encoding='utf8')
while True:
sline = srcF.readline()
tline = tgtF.readline()
# normal end of file
if sline == "" and tline == "":
break
# source or target does not have same number of lines
if sline == "" or tline == "":
print('WARNING: source and target do not have the same number of sentences')
break
sline = sline.strip()
tline = tline.strip()
# source and/or target are empty
if sline == "" or tline == "":
print('WARNING: ignoring an empty line ('+str(count+1)+')')
empty_ignored += 1
continue
sline = sline.lower()
tline = tline.lower()
#opt.src_char是true,則回傳整個list(sline)
srcWords = sline.split() if not opt.src_char else list(sline)
tgtWords = tline.split() if not opt.tgt_char else list(tline)
#如果sentence的長度小於短於要過濾的長度
if (opt.src_filter == 0 or len(sline.split()) <= opt.src_filter) and \
(opt.tgt_filter == 0 or len(tline.split()) <= opt.tgt_filter):
#如果有設定斷字的長度,進行斷自處理
if opt.src_trun > 0:
srcWords = srcWords[:opt.src_trun]
if opt.tgt_trun > 0:
tgtWords = tgtWords[:opt.tgt_trun]
srcIds = srcDicts.convertToIdx(srcWords, utils.UNK_WORD)
tgtIds = tgtDicts.convertToIdx(tgtWords, utils.UNK_WORD, utils.BOS_WORD, utils.EOS_WORD)
srcIdF.write(" ".join(list(map(str, srcIds)))+'\n')
tgtIdF.write(" ".join(list(map(str, tgtIds)))+'\n')
#如果不是char,word之間會加空格
if not opt.src_char:
srcStrF.write(" ".join(srcWords)+'\n')
else:
srcStrF.write("".join(srcWords) + '\n')
if not opt.tgt_char:
tgtStrF.write(" ".join(tgtWords)+'\n')
else:
tgtStrF.write("".join(tgtWords) + '\n')
sizes += 1
else:
limit_ignored += 1
count += 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
srcF.close()
tgtF.close()
srcStrF.close()
tgtStrF.close()
srcIdF.close()
tgtIdF.close()
print('Prepared %d sentences (%d and %d ignored due to length == 0 or > )' %
(sizes, empty_ignored, limit_ignored))
return {'srcF': save_srcFile + '.id', 'tgtF': save_tgtFile + '.id',
'original_srcF': save_srcFile + '.str', 'original_tgtF': save_tgtFile + '.str',
'length': sizes}
def main():
if not os.path.exists(opt.save_data):
os.makedirs(opt.save_data)
dicts = {}
train_src, train_tgt = opt.load_data + 'train.' + opt.src_suf, opt.load_data + 'train.' + opt.tgt_suf
valid_src, valid_tgt = opt.load_data + 'valid.' + opt.src_suf, opt.load_data + 'valid.' + opt.tgt_suf
test_src, test_tgt = opt.load_data + 'test.' + opt.src_suf, opt.load_data + 'test.' + opt.tgt_suf
save_train_src, save_train_tgt = opt.save_data + 'train.' + opt.src_suf, opt.save_data + 'train.' + opt.tgt_suf
save_valid_src, save_valid_tgt = opt.save_data + 'valid.' + opt.src_suf, opt.save_data + 'valid.' + opt.tgt_suf
save_test_src, save_test_tgt = opt.save_data + 'test.' + opt.src_suf, opt.save_data + 'test.' + opt.tgt_suf
src_dict, tgt_dict = opt.save_data + 'src.dict', opt.save_data + 'tgt.dict'
if opt.share:
assert opt.src_vocab_size == opt.tgt_vocab_size
print('Building source and target vocabulary...')
#將'src'和'tgt'直接訂為一整個資料的的key
dicts['src'] = dicts['tgt'] = utils.Dict([utils.PAD_WORD, utils.UNK_WORD, utils.BOS_WORD, utils.EOS_WORD])
dicts['src'] = makeVocabulary(train_src, opt.src_trun, opt.src_filter, opt.src_char, dicts['src'], opt.src_vocab_size)
dicts['src'] = dicts['tgt'] = makeVocabulary(train_tgt, opt.tgt_trun, opt.tgt_filter, opt.tgt_char, dicts['tgt'], opt.tgt_vocab_size)
else:
print('Building source vocabulary...')
#將PAD, UNK, BOS, EOS 先依序(0,1,2,3)放入字典中
dicts['src'] = utils.Dict([utils.PAD_WORD, utils.UNK_WORD, utils.BOS_WORD, utils.EOS_WORD])
dicts['src'] = makeVocabulary(train_src, opt.src_trun, opt.src_filter, opt.src_char, dicts['src'], opt.src_vocab_size)
print('Building target vocabulary...')
dicts['tgt'] = utils.Dict([utils.PAD_WORD, utils.UNK_WORD, utils.BOS_WORD, utils.EOS_WORD])
dicts['tgt'] = makeVocabulary(train_tgt, opt.tgt_trun, opt.tgt_filter, opt.tgt_char, dicts['tgt'], opt.tgt_vocab_size)
print('Preparing training ...')
train = makeData(train_src, train_tgt, dicts['src'], dicts['tgt'], save_train_src, save_train_tgt)
print('Preparing validation ...')
valid = makeData(valid_src, valid_tgt, dicts['src'], dicts['tgt'], save_valid_src, save_valid_tgt)
print('Preparing test ...')
test = makeData(test_src, test_tgt, dicts['src'], dicts['tgt'], save_test_src, save_test_tgt)
print('Saving source vocabulary to \'' + src_dict + '\'...')
dicts['src'].writeFile(src_dict)
print('Saving source vocabulary to \'' + tgt_dict + '\'...')
dicts['tgt'].writeFile(tgt_dict)
data = {'train': train, 'valid': valid,
'test': test, 'dict': dicts}
pickle.dump(data, open(opt.save_data+'data.pkl', 'wb'))
if __name__ == "__main__":
main()