Skip to content

Commit

Permalink
new critic and generator
Browse files Browse the repository at this point in the history
  • Loading branch information
EnginEren committed Nov 14, 2019
1 parent 80a2524 commit bfd1b79
Show file tree
Hide file tree
Showing 2 changed files with 103 additions and 125 deletions.
95 changes: 55 additions & 40 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.utils import data
#from torch.utils import data
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
Expand Down Expand Up @@ -57,17 +57,25 @@


## Load and make them iterable
loader_params = {'batch_size': opt.batchSize, 'shuffle': True, 'num_workers': 6}
#loader_params = {'batch_size': opt.batchSize, 'shuffle': True, 'num_workers': 6}
path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG.hdf5'
#path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG-50GeV.hdf5'
d = H.HDF5Dataset(path, '30x30/layers')
e = H.HDF5Dataset(path, '30x30/energy')
dataloader_layer = data.DataLoader(d, **loader_params)
dataloader_energy = data.DataLoader(e, **loader_params)
#d = H.HDF5Dataset(path, '30x30/layers')
#e = H.HDF5Dataset(path, '30x30/energy')
#dataloader_layer = data.DataLoader(d, **loader_params)
#dataloader_energy = data.DataLoader(e, **loader_params)

data_layer = iter(dataloader_layer)
data_energy = iter(dataloader_energy)
#data_layer = iter(dataloader_layer)
#data_energy = iter(dataloader_energy)

data = H.HDF5Dataset(path, '30x30')
energies = data['energy'][:].reshape(len(data['energy']))
layers = data['layers'][:].sum(axis=1)

training_dataset = tuple(zip(layers, energies))


dataloader = torch.utils.data.DataLoader(training_dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=6)

ngpu = int(opt.ngpu)
nz = int(opt.nz)
Expand Down Expand Up @@ -145,38 +153,43 @@ def weights_init(m):

gen_iterations = 0
for epoch in range(opt.niter):
data_layer = iter(dataloader_layer)
data_energy = iter(dataloader_energy)
#data_layer = iter(dataloader_layer)
#data_energy = iter(dataloader_energy)
i = 0
while i < len(dataloader_layer):
while i < len(dataloader):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update

# train the discriminator Diters times
if gen_iterations < 25 or gen_iterations % 100 == 0:
Diters = 15
if gen_iterations < 25 or gen_iterations % 500 == 0:
Diters = 50
else:
Diters = opt.Diters
j = 0
while j < Diters and i < len(dataloader_layer):
while j < Diters and i < len(dataloader):
j += 1

# clamp parameters to a cube
for p in netD.parameters():
p.data.clamp_(-0.01, 0.01)

### input size matters. Reshape if we want 30x30
if opt.full :
layer = data_layer.next()
else :
tmp = data_layer.next() ## [Bs, 30, 30 , 30 ]
layer = torch.sum(tmp, dim=1)
layer = layer.unsqueeze(1) ## [Bs, 1, 30 , 30 ]
#if opt.full :
# layer = data_layer.next()
#else :
# tmp = data_layer.next() ## [Bs, 30, 30 , 30 ]
# layer = torch.sum(tmp, dim=1)
# layer = layer.unsqueeze(1) ## [Bs, 1, 30 , 30 ]

energy = data_energy.next()

layer, energy = iter(dataloader).next()
layer = layer.unsqueeze(1) ## [Bs, 1, 30 , 30 ]
energy = energy.unsqueeze(-1) ## [Bs, 1 ]

#energy = data_energy.next()
i += 1

#print ("Updating D network, step: {}".format(j))
Expand All @@ -201,33 +214,35 @@ def weights_init(m):
## input energy
input_energy.resize_as_(real_cpu_e.float()).copy_(real_cpu_e.float())




if torch.cuda.is_available():
inputv_layer = Variable(input_layer.cuda())
inputv_e = Variable(input_energy.cuda())
else :
inputv_layer = Variable(input_layer)
inputv_e = Variable(input_energy)



errD_real = netD(inputv_layer, inputv_e)
errD_real.backward(one)

# train with fake
noise.resize_(batch_size, nz).normal_(0, 1)
#input_energy.resize_(batch_size, 1).uniform_(10, 100)
input_energy.resize_(batch_size, 1).uniform_(10, 100)

if torch.cuda.is_available():
inputv_e = Variable(input_energy.cuda())
noisev = Variable(noise.cuda(), volatile = True) # totally freeze netG
else :
inputv_e = Variable(input_energy)
noisev = Variable(noise, volatile = True) # totally freeze netG
with torch.no_grad():
if torch.cuda.is_available():
inputv_e = Variable(input_energy.cuda())
noisev = Variable(noise.cuda()) # totally freeze netG
else :
inputv_e = Variable(input_energy)
noisev = Variable(noise) # totally freeze netG



errD_fake = netD(netG(noisev, inputv_e), inputv_e)
h = noisev * inputv_e

errD_fake = netD(netG(h), inputv_e)
errD_fake.backward(mone)
errD = errD_real - errD_fake
optimizerD.step()
Expand All @@ -242,21 +257,21 @@ def weights_init(m):
# in case our last batch was the tail batch of the dataloader,
# make sure we feed a full batch of noise
noise.resize_(batch_size, nz).normal_(0, 1)
#input_energy.resize_(batch_size, 1).uniform_(10, 100)
input_energy.resize_(batch_size, 1).uniform_(10, 100)

noisev = noise
if torch.cuda.is_available():
noisev = Variable(noise.cuda())
else :
noisev = Variable(noise)
noisev = noisev.cuda()




errG = netD(netG(noisev, inputv_e), inputv_e)
errG = netD(netG(noisev * inputv_e), inputv_e)
errG.backward(one)
optimizerG.step()
gen_iterations += 1
print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
% (epoch, opt.niter, i, len(dataloader_layer), gen_iterations,
% (epoch, opt.niter, i, len(dataloader), gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0]))


Expand Down
133 changes: 48 additions & 85 deletions models/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,60 +18,48 @@ def __init__(self, isize, nc, ndf):
self.isize = isize
self.nc = nc

## linear layers
self.cond1 = torch.nn.Linear(1, 10)
self.cond2 = torch.nn.Linear(10, isize*isize)


### convolution
self.conv1 = torch.nn.Conv2d(nc+1, ndf*8, kernel_size=3, stride=1, padding=1, bias=False)
self.conv1 = torch.nn.Conv2d(nc, ndf*8, kernel_size=4, stride=2, padding=1, bias=False)
## batch-normalization
self.bn1 = torch.nn.BatchNorm2d(ndf*8)
## convolution
self.conv2 = torch.nn.Conv2d(ndf*8, ndf*4, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = torch.nn.Conv2d(ndf*8, ndf*4, kernel_size=4, stride=1, padding=1, bias=False)
## batch-normalization
self.bn2 = torch.nn.BatchNorm2d(ndf*4)
#convolution
self.conv3 = torch.nn.Conv2d(ndf*4, ndf*2, kernel_size=3, stride=1, padding=1, bias=False)
self.conv3 = torch.nn.Conv2d(ndf*4, ndf*2, kernel_size=4, stride=1, padding=1, bias=False)
## batch-normalization
self.bn3 = torch.nn.BatchNorm2d(ndf*2)
#convolution
self.conv4 = torch.nn.Conv2d(ndf*2, ndf*2, kernel_size=3, stride=1, padding=1, bias=False)

## batch-normalization
self.bn4 = torch.nn.BatchNorm2d(ndf*2)
#convolution
self.conv5 = torch.nn.Conv2d(ndf*2, ndf, kernel_size=3, stride=1, padding=1, bias=False)
self.conv4 = torch.nn.Conv2d(ndf*2, ndf, kernel_size=4, stride=2, padding=1, bias=False)

# Read-out layer : ndf * isize * isize input features, ndf output features
self.fc1 = torch.nn.Linear(ndf * isize * isize, 1)
# Read-out layer : ndf * 2 * 2 input features, ndf output features
self.fc1 = torch.nn.Linear((ndf * 6 * 6)+1, 50)
self.fc2 = torch.nn.Linear(50, 25)
self.fc3 = torch.nn.Linear(25, 1)

def forward(self, x, energy):

## conditioning on energy
t = F.leaky_relu(self.cond1(energy), 0.2, inplace=True)
t = F.leaky_relu(self.cond2(t))

## reshape into two 2D
t = t.view(-1, 1, self.isize, self.isize)

## concentration with input : N+1 (Nlayers + 1 cond) x 30 x 30
x = torch.cat((x, t), 1)

x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, inplace=True)
x = F.leaky_relu(self.conv5(x), 0.2, inplace=True)
#Size changes from (nc+1, 30, 30) to (ndf, 30, 30)
x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True) # 15 x 15
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, inplace=True) # 14 x 14
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, inplace=True) # 13 x 13
x = F.leaky_relu(self.conv4(x), 0.2, inplace=True) # 6x6

#After series of convlutions --> size changes from (nc, 30, 30) to (ndf, 6, 6)


x = x.view(-1, self.ndf * self.isize * self.isize)
# Size changes from (ndf, 30, 30) to (1, ndf * 30 * 30)
x = x.view(-1, self.ndf * 6 * 6)
x = torch.cat((x, energy), 1)

# Size changes from (ndf, 30, 30) to (1, (ndf * 6 * 6) + 1)
#Recall that the -1 infers this dimension from the other given dimension


# Read-out layer
x = self.fc1(x)
x = F.leaky_relu(self.fc1(x), 0.2, inplace=True)
x = F.leaky_relu(self.fc2(x), 0.2, inplace=True)
x = self.fc3(x)

x = x.mean(0)
return x.view(1)
Expand All @@ -89,79 +77,54 @@ def __init__(self, nc, ngf, z):
self.nc = nc
self.z = z

self.cond1 = torch.nn.Linear(self.z+1, 100)
self.cond2 = torch.nn.Linear(100, 10*10*ngf)
## linear projection
self.cond = torch.nn.Linear(self.z, 5*5*ngf*8)

## deconvolution
self.deconv1 = torch.nn.ConvTranspose2d(ngf, ngf*2, kernel_size=3, stride=3, padding=1, bias=False)
self.deconv1 = torch.nn.ConvTranspose2d(ngf*8, ngf*4, kernel_size=2, stride=3, padding=1, bias=False)
## batch-normalization
self.bn1 = torch.nn.BatchNorm2d(ngf*2)
self.bn1 = torch.nn.BatchNorm2d(ngf*4)
## deconvolution
self.deconv2 = torch.nn.ConvTranspose2d(ngf*2, ngf*4, kernel_size=3, stride=2, padding=1, bias=False)
self.deconv2 = torch.nn.ConvTranspose2d(ngf*4, ngf*2, kernel_size=2, stride=2, padding=1, bias=False)
## batch-normalization
self.bn2 = torch.nn.BatchNorm2d(ngf*4)
self.bn2 = torch.nn.BatchNorm2d(ngf*2)
# deconvolution
self.deconv3 = torch.nn.ConvTranspose2d(ngf*4, ngf*8, kernel_size=3, stride=2, padding=1, bias=False)
self.deconv3 = torch.nn.ConvTranspose2d(ngf*2, ngf, kernel_size=6, stride=1, padding=1, bias=False)
## batch-normalization
self.bn3 = torch.nn.BatchNorm2d(ngf*8)

## convolution
self.conv0 = torch.nn.Conv2d(ngf*8, 1, kernel_size=3, stride=4, padding=1, bias=False)
## batch-normalisation
self.bn0 = torch.nn.BatchNorm2d(1)

## convolution
self.conv1 = torch.nn.Conv2d(nc, ngf*4, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn01 = torch.nn.BatchNorm2d(ngf*4)

## convolution
self.conv2 = torch.nn.Conv2d(ngf*4, ngf*8, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn02 = torch.nn.BatchNorm2d(ngf*8)

## convolution
self.conv3 = torch.nn.Conv2d(ngf*8, ngf*4, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn03 = torch.nn.BatchNorm2d(ngf*4)
self.bn3 = torch.nn.BatchNorm2d(ngf)
# deconvolution
self.deconv4 = torch.nn.ConvTranspose2d(ngf, 1, kernel_size=8, stride=1, padding=1, bias=False)

## convolution
self.conv4 = torch.nn.Conv2d(ngf*4, nc, kernel_size=3, stride=1, padding=1, bias=False)




def forward(self, noise, energy):
def forward(self, noise):

layer = []
### need to do generated 30 layers, hence the loop!
## need to do generate N layers, hence the loop!
for i in range(self.nc):
## conditioning on energy
x = F.leaky_relu(self.cond1(torch.cat((energy, noise), 1)), 0.2, inplace=True)
x = F.leaky_relu(self.cond2(x), 0.2, inplace=True)
## change size for deconv2d network. Image is 10x10
x = x.view(-1,self.ngf,10,10)

#noise
x = F.leaky_relu(self.cond(noise), 0.2, inplace=True)

## change size for deconv2d network. Image is 5x5
x = x.view(-1,self.ngf*8,5,5)

## apply series of deconv2d and batch-norm
x = F.leaky_relu(self.bn1(self.deconv1(x, output_size=[x.size(0), x.size(1) , 30, 30])), 0.2, inplace=True)
x = F.leaky_relu(self.bn2(self.deconv2(x, output_size=[x.size(0), x.size(1) , 60, 60])), 0.2, inplace=True)
x = F.leaky_relu(self.bn3(self.deconv3(x, output_size=[x.size(0), x.size(1) , 120, 120])), 0.2, inplace=True)

##Image is 120x120
x = F.leaky_relu(self.bn1(self.deconv1(x, output_size=[x.size(0), x.size(1) , 12, 12])), 0.2, inplace=True)
x = F.leaky_relu(self.bn2(self.deconv2(x, output_size=[x.size(0), x.size(1) , 22, 22])), 0.2, inplace=True)
x = F.leaky_relu(self.bn3(self.deconv3(x, output_size=[x.size(0), x.size(1) , 25, 25])), 0.2, inplace=True)
x = F.relu(self.deconv4(x, output_size=[x.size(0), x.size(1) , 30, 30]))

##Image is 30x30 now

## one standard conv and batch-norm layer (I dont know why :) )
x = F.leaky_relu(self.bn0(self.conv0(x)), 0.2, inplace=True)


layer.append(x)


## concentration of the layers
x = torch.cat([layer[i] for l in range(self.nc)], 1)

## Further apply series of conv and batch norm layers
x = F.leaky_relu(self.bn01(self.conv1(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn02(self.conv2(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn03(self.conv3(x)), 0.2, inplace=True)
x = F.relu(self.conv4(x), inplace=True)

return x

0 comments on commit bfd1b79

Please sign in to comment.