@@ -30,15 +30,15 @@ def read_dataset(filename):
3030nwords = len (w2i )
3131
3232# Start DyNet and define trainer
33- model = dy .Model ()
33+ model = dy .ParameterCollection ()
3434trainer = dy .AdamTrainer (model , alpha = 0.001 )
3535
3636# Define the model
3737W_emb = model .add_lookup_parameters ((nwords , EMB_SIZE )) # Word weights at each position
38- W_h_p = model .add_parameters ((HID_SIZE , EMB_SIZE * N )) # Weights of the softmax
39- b_h_p = model .add_parameters ((HID_SIZE )) # Weights of the softmax
40- W_sm_p = model .add_parameters ((nwords , HID_SIZE )) # Weights of the softmax
41- b_sm_p = model .add_parameters ((nwords )) # Softmax bias
38+ W_h = model .add_parameters ((HID_SIZE , EMB_SIZE * N )) # Weights of the softmax
39+ b_h = model .add_parameters ((HID_SIZE )) # Weights of the softmax
40+ W_sm = model .add_parameters ((nwords , HID_SIZE )) # Weights of the softmax
41+ b_sm = model .add_parameters ((nwords )) # Softmax bias
4242
4343# A function to calculate scores for one value
4444def calc_score_of_histories (words , dropout = 0.0 ):
@@ -47,15 +47,11 @@ def calc_score_of_histories(words, dropout=0.0):
4747 # Lookup the embeddings and concatenate them
4848 emb = dy .concatenate ([dy .lookup_batch (W_emb , x ) for x in words ])
4949 # Create the hidden layer
50- W_h = dy .parameter (W_h_p )
51- b_h = dy .parameter (b_h_p )
5250 h = dy .tanh (dy .affine_transform ([b_h , W_h , emb ]))
5351 # Perform dropout
5452 if dropout != 0.0 :
5553 h = dy .dropout (h , dropout )
5654 # Calculate the score and return
57- W_sm = dy .parameter (W_sm_p )
58- b_sm = dy .parameter (b_sm_p )
5955 return dy .affine_transform ([b_sm , W_sm , h ])
6056
6157# Calculate the loss value for the entire sentence
@@ -113,7 +109,6 @@ def generate_sent():
113109 my_loss = calc_sent_loss (sent )
114110 dev_loss += my_loss .value ()
115111 dev_words += len (sent )
116- trainer .update ()
117112 # Keep track of the development accuracy and reduce the learning rate if it got worse
118113 if last_dev < dev_loss :
119114 trainer .learning_rate /= 2
0 commit comments