@@ -32,12 +32,12 @@ def read_dataset(filename):
3232 labels_file .write (i2w [i ] + '\n ' )
3333
3434# Start DyNet and define trainer
35- model = dy .Model ()
35+ model = dy .ParameterCollection ()
3636trainer = dy .SimpleSGDTrainer (model , learning_rate = 0.1 )
3737
3838# Define the model
3939W_c_p = model .add_lookup_parameters ((nwords , EMB_SIZE )) # Word weights at each position
40- W_w_p = model .add_parameters ((nwords , EMB_SIZE )) # Weights of the softmax
40+ W_w = model .add_parameters ((nwords , EMB_SIZE )) # Weights of the softmax
4141
4242# Calculate the loss value for the entire sentence
4343def calc_sent_loss (sent ):
@@ -48,8 +48,6 @@ def calc_sent_loss(sent):
4848 #as we need to predict the eos as well, the future window at that point is N past it
4949 emb = [W_c_p [x ] for x in sent ]
5050
51- W_w = dy .parameter (W_w_p )
52-
5351 # Step through the sentence
5452 all_losses = []
5553 for i , my_emb in enumerate (emb ):
@@ -74,7 +72,7 @@ def calc_sent_loss(sent):
7472 train_words += len (sent )
7573 my_loss .backward ()
7674 trainer .update ()
77- if (sent_id + 1 ) % 5000 == 0 :
75+ if (sent_id + 1 ) % 500 == 0 :
7876 print ("--finished %r sentences" % (sent_id + 1 ))
7977 print ("iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs" % (ITER , train_loss / train_words , math .exp (train_loss / train_words ), time .time ()- start ))
8078 # Evaluate on dev set
@@ -89,7 +87,7 @@ def calc_sent_loss(sent):
8987
9088 print ("saving embedding files" )
9189 with open (embeddings_location , 'w' ) as embeddings_file :
92- W_w_np = W_w_p .as_array ()
90+ W_w_a = W_w .as_array ()
9391 for i in range (nwords ):
94- ith_embedding = '\t ' .join (map (str , W_w_np [i ]))
92+ ith_embedding = '\t ' .join (map (str , W_w_a [i ]))
9593 embeddings_file .write (ith_embedding + '\n ' )
0 commit comments