-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
153 lines (141 loc) · 6.97 KB
/
main.py
File metadata and controls
153 lines (141 loc) · 6.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from src.utils import get_max_lengths, get_evaluation
from src.dataset import MyDataset
from src.hierarchical_att_model import HierAttNet
from tensorboardX import SummaryWriter
import argparse
import shutil
import numpy as np
def get_args():
parser = argparse.ArgumentParser(
"""Implementation of the model described in the paper: Hierarchical Attention Networks for Document Classification""")
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--num_epoches", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.1)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--word_hidden_size", type=int, default=50)
parser.add_argument("--sent_hidden_size", type=int, default=50)
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter: minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=5,
help="Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
parser.add_argument("--train_set", type=str, default="data/train.csv")
parser.add_argument("--test_set", type=str, default="data/test.csv")
parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases")
parser.add_argument("--word2vec_path", type=str, default="data/glove.6B.50d.txt")
parser.add_argument("--log_path", type=str, default="tensorboard/han_voc")
parser.add_argument("--saved_path", type=str, default="trained_models")
args = parser.parse_args()
return args
def train(opt):
if torch.cuda.is_available():
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
#os.sep 是根據作業系統給出'\' or '/'
output_file = open(opt.saved_path + os.sep + "logs.txt", "w")
output_file.write("Model's parameters: {}".format(vars(opt)))
training_params = {"batch_size": opt.batch_size,
"shuffle": True,
"drop_last": True}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False}
max_word_length, max_sent_length = get_max_lengths(opt.train_set)
training_set = MyDataset(opt.train_set, opt.word2vec_path, max_sent_length, max_word_length)
training_generator = DataLoader(training_set, **training_params)
test_set = MyDataset(opt.test_set, opt.word2vec_path, max_sent_length, max_word_length)
test_generator = DataLoader(test_set, **test_params)
model = HierAttNet(opt.word_hidden_size, opt.sent_hidden_size, opt.batch_size, training_set.num_classes,
opt.word2vec_path, max_sent_length, max_word_length)
#刪除一整個完整的目錄樹
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
#用於產生tensorboard
writer = SummaryWriter(opt.log_path)
# writer.add_graph(model, torch.zeros(opt.batch_size, max_sent_length, max_word_length))
if torch.cuda.is_available():
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=opt.lr, momentum=opt.momentum)
best_loss = 1e5
best_epoch = 0
model.train()
#總共有幾組batch
num_iter_per_epoch = len(training_generator)
for epoch in range(opt.num_epoches):
for iter, (feature, label) in enumerate(training_generator):
if torch.cuda.is_available():
feature = feature.cuda()
label = label.cuda()
optimizer.zero_grad()
model._init_hidden_state()
predictions = model(feature)
loss = criterion(predictions, label)
loss.backward()
optimizer.step()
training_metrics = get_evaluation(label.cpu().numpy(), predictions.cpu().detach().numpy(), list_metrics=["accuracy"])
print("Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss: {}, Accuracy: {}".format(
epoch + 1,
opt.num_epoches,
iter + 1,
num_iter_per_epoch,
optimizer.param_groups[0]['lr'],
loss, training_metrics["accuracy"]))
writer.add_scalar('Train/Loss', loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Accuracy', training_metrics["accuracy"], epoch * num_iter_per_epoch + iter)
#每隔opt.test_interval就進行test,這個做法比較像validation
if epoch % opt.test_interval == 0:
model.eval()
loss_ls = []
te_label_ls = []
te_pred_ls = []
for te_feature, te_label in test_generator:
num_sample = len(te_label)
if torch.cuda.is_available():
te_feature = te_feature.cuda()
te_label = te_label.cuda()
with torch.no_grad():
model._init_hidden_state(num_sample)
te_predictions = model(te_feature)
te_loss = criterion(te_predictions, te_label)
loss_ls.append(te_loss * num_sample)
te_label_ls.extend(te_label.clone().cpu())
te_pred_ls.append(te_predictions.clone().cpu())
te_loss = sum(loss_ls) / test_set.__len__()
te_pred = torch.cat(te_pred_ls, 0)
te_label = np.array(te_label_ls)
test_metrics = get_evaluation(te_label, te_pred.numpy(), list_metrics=["accuracy", "confusion_matrix"])
output_file.write(
"Epoch: {}/{} \nTest loss: {} Test accuracy: {} \nTest confusion matrix: \n{}\n\n".format(
epoch + 1, opt.num_epoches,
te_loss,
test_metrics["accuracy"],
test_metrics["confusion_matrix"]))
print("Epoch: {}/{}, Lr: {}, Loss: {}, Accuracy: {}".format(
epoch + 1,
opt.num_epoches,
optimizer.param_groups[0]['lr'],
te_loss, test_metrics["accuracy"]))
writer.add_scalar('Test/Loss', te_loss, epoch)
writer.add_scalar('Test/Accuracy', test_metrics["accuracy"], epoch)
model.train()
#儲存最低的loss模型
if te_loss + opt.es_min_delta < best_loss:
best_loss = te_loss
best_epoch = epoch
torch.save(model, opt.saved_path + os.sep + "whole_model_han")
# Early stopping
if epoch - best_epoch > opt.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, te_loss))
break
if __name__ == "__main__":
opt = get_args()
train(opt)