9
9
10
10
11
11
class Net (nn .Module ):
12
-
13
12
def __init__ (self ):
13
+
14
+
14
15
super (Net , self ).__init__ ()
15
- self .conv1 = nn .Conv2d (1 , 32 , 3 , 1 )
16
- self .conv2 = nn .Conv2d (32 , 64 , 3 , 1 )
16
+ self .conv1 = nn .Conv2d (1 , 32 , 3 , 1 )
17
+ self .conv2 = nn .Conv2d ( 32 , 64 , 3 , 1 )
17
18
self .dropout1 = nn .Dropout (0.25 )
18
19
self .dropout2 = nn .Dropout (0.5 )
19
20
self .fc1 = nn .Linear (9216 , 128 )
@@ -35,14 +36,14 @@ def forward(self, x):
35
36
return output
36
37
37
38
38
- def train (args , model , device , train_loader , optimizer , epoch ):
39
+ def train (args , model , device , train_loader , optimizer , epoch ):
39
40
model .train ()
40
41
for batch_idx , (data , target ) in enumerate (train_loader ):
41
42
42
- data , target = data .to (device ), target .to (device )
43
+ data , target = data .to (device ), target .to (device )
43
44
optimizer .zero_grad ()
44
45
output = model (data )
45
- loss = F .nll_loss (output , target )
46
+ loss = F .nll_loss ( output , target )
46
47
loss .backward ()
47
48
optimizer .step ()
48
49
if batch_idx % args .log_interval == 0 :
@@ -51,7 +52,7 @@ def train(args, model, device, train_loader, optimizer, epoch):
51
52
epoch ,
52
53
batch_idx * len (data ),
53
54
len (train_loader .dataset ),
54
- 100.0 * batch_idx / len (train_loader ),
55
+ 100.0 * batch_idx / len (train_loader ),
55
56
loss .item (),
56
57
)
57
58
)
@@ -69,11 +70,11 @@ def test(model, device, test_loader, epoch):
69
70
70
71
data , target = data .to (device ), target .to (device )
71
72
output = model (data )
72
- test_loss += F .nll_loss (output , target , reduction = "sum" ).item () # sum up batch loss
73
+ test_loss += F .nll_loss ( output , target , reduction = "sum" ).item () # sum up batch loss
73
74
pred = output .argmax (dim = 1 , keepdim = True ) # get the index of the max log-probability
74
- correct += pred .eq (target .view_as (pred )).sum ().item ()
75
+ correct += pred .eq (target .view_as (pred ) ).sum ().item ()
75
76
76
- test_loss /= len (test_loader .dataset )
77
+ test_loss /= len (test_loader .dataset )
77
78
78
79
print (
79
80
"\n Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n " .format (
@@ -131,7 +132,7 @@ def main():
131
132
model = Net ().to (device )
132
133
optimizer = optim .Adam (model .parameters (), lr = args .lr )
133
134
134
- scheduler = StepLR (optimizer , step_size = 1 , gamma = args .gamma )
135
+ scheduler = StepLR (optimizer , step_size = 1 , gamma = args .gamma )
135
136
for epoch in range (args .epochs ):
136
137
train (args , model , device , train_loader , optimizer , epoch )
137
138
test (model , device , test_loader , epoch )
0 commit comments