-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathextract_sentences.py
More file actions
165 lines (130 loc) · 4.14 KB
/
extract_sentences.py
File metadata and controls
165 lines (130 loc) · 4.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import unicodecsv as csv
from pickle import Pickler, Unpickler
import sys
import os
import errno
import shutil
import string
import re
import pdb
# Data directory to write out to
DATA_DIR = "data/blog"
# Raw data source
SOURCE = "data/training.csv"
# Data Vocab Source
VOCAB_SOURCE = "data/vocab/r.4997.vocab.txt"
# Number of sentences that comprise a paragraph
K = 40
# Max digits for training examples' filenames
MAX_FILENAME_LEN = 4
def get_vocab_set():
with open(VOCAB_SOURCE) as f:
return set([x.strip() for x in f])
vocab_set = get_vocab_set()
# Given relative path, resolves full data path
def get_data_path(dirname):
return os.path.join(DATA_DIR, dirname)
# Equivalent to mkdir -p
def mkdir_p(directory):
try:
os.mkdir(directory)
except OSError, e:
if e.errno != errno.EEXIST:
raise e
pass
# Equivalent to rm -rf dir
def rm_rf(directory):
try:
shutil.rmtree(directory)
except OSError, e:
pass
# Sanitize each sentence to map to a
# vocab our neural net implementation
# can recognize
def clean_word(word):
global vocab_set
if word not in vocab_set:
return "UUUNKKK"
else:
return word
def only_ascii(char):
if ord(char) == 32:
return char
elif ord(char) < 48 or ord(char) > 127:
return ''
else:
return char
# Split up single blog post into sentences, then
# get a K run of sentences
def extract_phrases(doc):
global K
in_ascii = filter(only_ascii, doc)
doc = str(in_ascii).translate(string.maketrans("",""), string.punctuation)
doc = re.sub("\d", " DG ", doc)
# Finish doc processing; now do words
words = map(clean_word, doc.lower().split(" "))
paginated_lines = []
for s in range(0, len(words), K):
next_k_words = words[s:s+K]
phrase = " ".join(next_k_words)
paginated_lines.append(phrase.strip())
return paginated_lines
# Generates filenames given an integer
# numeric counter. Left-fills based on the
# global constant MAX_FILENAME_LEN
def generate_filename(counter):
global MAX_FILENAME_LEN
l = MAX_FILENAME_LEN - len(str(counter))
if l > 0:
return ("0" * l) + str(counter) + ".txt"
else:
return str(counter) + ".txt"
# Writes a set of sentences to text
def write_to_file(fname, phrase):
if len(phrase) == 0:
return
# f = codecs.open(fname, "w", encoding="utf-8")
f = open(fname, "w")
f.write(phrase)
if __name__ == "__main__":
rm_rf(get_data_path("pos"))
rm_rf(get_data_path("neg"))
rm_rf(get_data_path("unlabeled"))
mkdir_p(get_data_path("pos"))
mkdir_p(get_data_path("neg"))
mkdir_p(get_data_path("unlabeled"))
with open(SOURCE, "r") as fin:
reader = csv.reader(fin, encoding="utf-8", delimiter="|")
# Start at "0001.txt"
p_counter = 1
n_counter = 1
u_counter = 1
line_counter = 0
for x in reader:
line_counter += 1
# Extract sentences
doc = x[0]
label = int(x[1])
if label == 1:
# Output doc to pos file dir
phrases = extract_phrases(doc)
for phrase in phrases:
incr_filename = generate_filename(p_counter)
success = write_to_file(get_data_path("pos") + "/" + incr_filename, phrase)
p_counter += 1
elif label == 0:
# Output doc to neg file dir
phrases = extract_phrases(doc)
for phrase in phrases:
incr_filename = generate_filename(n_counter)
success = write_to_file(get_data_path("neg") + "/" + incr_filename, phrase)
n_counter += 1
else:
# Other
# Output doc to unlabeled file
phrases = extract_phrases(doc)
for phrase in phrases:
incr_filename = generate_filename(u_counter)
success = write_to_file(get_data_path("unlabeled") + "/" + incr_filename, phrase)
u_counter += 1
print "lines: ", str(line_counter)