-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
executable file
·68 lines (53 loc) · 1.95 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#!/usr/bin/env python3
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch import nn
# Definition of the model. For now a 1 neuron network
class Model(nn.Module):
def __init__(self):
super().__init__()
# bx3x224x224 input images
self.layer1 = nn.Sequential(
# 3 input channels, 16 output depth, padding and stride
nn.Conv2d(3, 16, kernel_size=3, padding=0, stride=2),
# normalizes the batch data setting the average to 0 and std to 1
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2) # similar to image pyrdown, reduces size
)
# bx16x (244
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, padding=0, stride=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.layer3 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, padding=0, stride=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc1 = nn.Linear(3 * 3 * 64, 10)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(10, 51)
self.relu = nn.ReLU()
def forward(self, x):
# print('input x = ' + str(x.shape))
out = self.layer1(x)
# print('layer 1 out = ' + str(out.shape))
out = self.layer2(out)
# print('layer 2 out = ' + str(out.shape))
out = self.layer3(out)
# print('layer 3 out = ' + str(out.shape))
out = out.view(out.size(0),-1) # flatten to keep batch dimension and compact all others into the second dimension
# print('out after view = ' + str(out.shape))
out = self.relu(self.fc1(out))
# print('fc1 out = ' + str(out.shape))
out = self.fc2(out)
# print('fc2 out = ' + str(out.shape))
# exit(0)
return out