-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
112 lines (97 loc) · 3.9 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import random
import time
import datetime
import sys
import torch
from visdom import Visdom
import numpy as np
class Logger():
def __init__(self, n_epochs, batches_epoch):
self.viz = Visdom()
self.n_epochs = n_epochs
self.batches_epoch = batches_epoch
self.epoch = 1
self.batch = 1
self.prev_time = time.time()
self.mean_period = 0
self.losses = {}
self.loss_windows = {}
self.image_windows = {}
def log(self, losses=None):
self.mean_period += (time.time() - self.prev_time)
self.prev_time = time.time()
sys.stdout.write('\rEpoch %03d/%03d \
[%04d/%04d] --' % (self.epoch, self.n_epochs, self.batch, self.batches_epoch))
for i, loss_name in enumerate(losses.keys()):
if loss_name not in self.losses:
self.losses[loss_name] = losses[loss_name].item()
else:
self.losses[loss_name] += losses[loss_name].item()
if (i+1) == len(losses.keys()):
sys.stdout.write('%s: %.4f\
--' % (loss_name, self.losses[loss_name]/self.batch))
else:
sys.stdout.write('%s: %.4f\
| ' % (loss_name, self.losses[loss_name]/self.batch))
batches_done = self.batches_epoch*(self.epoch - 1) + self.batch
batches_left = self.batches_epoch*(self.n_epochs - self.epoch) + \
self.batches_epoch - self.batch
sys.stdout.write('ETA: %s' % (datetime.timedelta(
seconds=batches_left*self.mean_period/batches_done)))
# End of epoch
if (self.batch % self.batches_epoch) == 0:
# Plot losses
for loss_name, loss in self.losses.items():
if loss_name not in self.loss_windows:
self.loss_windows[loss_name] = self.viz.line(
X=np.array([self.epoch]),
Y=np.array([loss/self.batch]),
opts={'xlabel': 'epochs', 'ylabel': loss_name,
'title': loss_name})
else:
self.viz.line(X=np.array([self.epoch]),
Y=np.array([loss/self.batch]),
win=self.loss_windows[loss_name],
update='append')
self.losses[loss_name] = 0.0
self.epoch += 1
self.batch = 1
sys.stdout.write('\n')
else:
self.batch += 1
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Trying to create an empty buffer'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return torch.cat(to_return)
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start \
before the training session ends"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
num = max(0, epoch + self.offset - self.decay_start_epoch)
den = self.n_epochs - self.decay_start_epoch
return 1.0 - num/den
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.07)
torch.nn.init.constant_(m.bias.data, 0.0)