Skip to content

Commit af1277c

Browse files
committed
added formatting
1 parent 7f7037a commit af1277c

File tree

2 files changed

+35
-11
lines changed

2 files changed

+35
-11
lines changed

.github/workflows/formatting.yaml

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
name: Formatting
2+
on: [push, pull_request]
3+
jobs:
4+
check-formatting:
5+
runs-on: ubuntu-latest
6+
steps:
7+
- uses: actions/checkout@v1
8+
- name: Black Code Formatter
9+
uses: lgeiger/black-action@master
10+
with:
11+
args: "--line-length 120 . --check"
12+
name: black-action
13+
14+
auto-formatting:
15+
name: runner / black
16+
runs-on: ubuntu-latest
17+
steps:
18+
- uses: actions/checkout@v2
19+
- name: Check files using the black formatter
20+
uses: rickstaa/action-black@v1
21+
id: action_black
22+
with:
23+
black_args: "--line-length 120 ."

train.py

+12-11
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@
99

1010

1111
class Net(nn.Module):
12-
1312
def __init__(self):
13+
14+
1415
super(Net, self).__init__()
15-
self.conv1 = nn.Conv2d(1, 32, 3, 1)
16-
self.conv2 = nn.Conv2d(32, 64, 3, 1)
16+
self.conv1 = nn.Conv2d(1, 32, 3, 1)
17+
self.conv2 = nn.Conv2d( 32, 64, 3, 1)
1718
self.dropout1 = nn.Dropout(0.25)
1819
self.dropout2 = nn.Dropout(0.5)
1920
self.fc1 = nn.Linear(9216, 128)
@@ -35,14 +36,14 @@ def forward(self, x):
3536
return output
3637

3738

38-
def train(args, model, device, train_loader, optimizer, epoch):
39+
def train(args, model, device, train_loader, optimizer, epoch):
3940
model.train()
4041
for batch_idx, (data, target) in enumerate(train_loader):
4142

42-
data, target = data.to(device), target.to(device)
43+
data, target = data.to(device), target.to(device)
4344
optimizer.zero_grad()
4445
output = model(data)
45-
loss = F.nll_loss(output, target)
46+
loss = F.nll_loss( output, target)
4647
loss.backward()
4748
optimizer.step()
4849
if batch_idx % args.log_interval == 0:
@@ -51,7 +52,7 @@ def train(args, model, device, train_loader, optimizer, epoch):
5152
epoch,
5253
batch_idx * len(data),
5354
len(train_loader.dataset),
54-
100.0 * batch_idx / len(train_loader),
55+
100.0 * batch_idx / len(train_loader),
5556
loss.item(),
5657
)
5758
)
@@ -69,11 +70,11 @@ def test(model, device, test_loader, epoch):
6970

7071
data, target = data.to(device), target.to(device)
7172
output = model(data)
72-
test_loss += F.nll_loss(output, target, reduction="sum").item() # sum up batch loss
73+
test_loss += F.nll_loss( output, target, reduction="sum").item() # sum up batch loss
7374
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
74-
correct += pred.eq(target.view_as(pred)).sum().item()
75+
correct += pred.eq(target.view_as(pred) ).sum().item()
7576

76-
test_loss /= len(test_loader.dataset)
77+
test_loss /= len(test_loader.dataset )
7778

7879
print(
7980
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
@@ -131,7 +132,7 @@ def main():
131132
model = Net().to(device)
132133
optimizer = optim.Adam(model.parameters(), lr=args.lr)
133134

134-
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
135+
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
135136
for epoch in range(args.epochs):
136137
train(args, model, device, train_loader, optimizer, epoch)
137138
test(model, device, test_loader, epoch)

0 commit comments

Comments
 (0)