-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
96 lines (75 loc) · 3.01 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# -*- coding: utf-8 -*-
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from tqdm import tqdm
from dataset import get_loaders
class Siamese(nn.Module):
def __init__(self):
super(Siamese, self).__init__()
self.c1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.c2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.c3 = nn.Conv2d(64, 128, kernel_size=3, padding=0)
self.bn3 = nn.BatchNorm2d(128)
self.fc4 = nn.Linear(128, 64)
self.bn4 = nn.BatchNorm2d(64)
self.fc5 = nn.Linear(64, 2)
def forward(self, x):
h = F.max_pool2d(self.bn1(self.c1(x)), 2)
h = F.max_pool2d(self.bn2(self.c2(h)), 2)
h = F.avg_pool2d(self.bn3(self.c3(h)), 5)
h = self.bn4(self.fc4(h.view(h.size(0), -1)))
return self.fc5(h)
def contractive_loss(o1, o2, y):
g, margin = F.pairwise_distance(o1, o2), 5.0
loss = (1 - y) * (g ** 2) + y * (torch.clamp(margin - g, min=0) ** 2)
return torch.mean(loss)
def main(args):
# Set up dataset
train_loader, test_loader = get_loaders(args.batch_size)
model = Siamese().cuda()
opt = optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(opt, [5, 10], 0.1)
cudnn.benckmark = True
print("\t".join(["Epoch", "TrainLoss", "TestLoss"]))
for e in range(args.epochs):
scheduler.step()
model.train()
train_loss, train_n = 0, 0
for x1, x2, y in tqdm(train_loader, total=len(train_loader), leave=False):
x1, x2 = Variable(x1.cuda()), Variable(x2.cuda())
y = Variable(y.float().cuda()).view(y.size(0), 1)
o1, o2 = model(x1), model(x2)
loss = contractive_loss(o1, o2, y)
opt.zero_grad()
loss.backward()
opt.step()
train_loss = loss.data[0] * y.size(0)
train_n += y.size(0)
model.eval()
test_loss, test_n = 0, 0
for x1, x2, y in tqdm(test_loader, total=len(test_loader), leave=False):
x1, x2 = Variable(x1.cuda()), Variable(x2.cuda())
y = Variable(y.float().cuda()).view(y.size(0), 1)
o1, o2 = model(x1), model(x2)
loss = contractive_loss(o1, o2, y)
test_loss = loss.data[0] * y.size(0)
test_n += y.size(0)
if (e + 1) % 5 == 0:
torch.save(model, "./checkpoint/{}.tar".format(e+1))
print("{}\t{:.6f}\t{:.6f}".format(e, train_loss / train_n, test_loss / test_n))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--lr", type=float, default=0.01)
args = parser.parse_args()
main(args)