Skip to content

Commit

Permalink
as of 11Nov.
Browse files Browse the repository at this point in the history
  • Loading branch information
EnginEren committed Nov 11, 2019
1 parent 09d2691 commit 80a2524
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 46 deletions.
36 changes: 13 additions & 23 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
## Load and make them iterable
loader_params = {'batch_size': opt.batchSize, 'shuffle': True, 'num_workers': 6}
path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG.hdf5'
#path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG-50GeV.hdf5'
d = H.HDF5Dataset(path, '30x30/layers')
e = H.HDF5Dataset(path, '30x30/energy')
dataloader_layer = data.DataLoader(d, **loader_params)
Expand Down Expand Up @@ -118,9 +119,7 @@ def weights_init(m):

## incoming energy
input_energy = torch.FloatTensor(opt.batchSize,1)




noise = torch.FloatTensor(opt.batchSize, nz)
fixed_noise = torch.FloatTensor(opt.batchSize, nz).normal_(0, 1)
one = torch.FloatTensor([1])
Expand Down Expand Up @@ -157,8 +156,8 @@ def weights_init(m):
p.requires_grad = True # they are set to False below in netG update

# train the discriminator Diters times
if gen_iterations < 25 or gen_iterations % 500 == 0:
Diters = 200
if gen_iterations < 25 or gen_iterations % 100 == 0:
Diters = 15
else:
Diters = opt.Diters
j = 0
Expand Down Expand Up @@ -202,27 +201,23 @@ def weights_init(m):
## input energy
input_energy.resize_as_(real_cpu_e.float()).copy_(real_cpu_e.float())

## quick and dirt fix for imp parameter
impoint = torch.FloatTensor(batch_size,2).uniform_(-1.5, 1.5)


if torch.cuda.is_available():
inputv_layer = Variable(input_layer.cuda())
inputv_e = Variable(input_energy.cuda())
inputv_imp = Variable(impoint.cuda()) ## input impact point
else :
inputv_layer = Variable(input_layer)
inputv_e = Variable(input_energy)
inputv_imp = Variable(impoint) ## input impact point


#print (epoch, inputv_e.shape)
#print (epoch, inputv_imp.shape)
errD_real = netD(inputv_layer, inputv_e, inputv_imp)

errD_real = netD(inputv_layer, inputv_e)
errD_real.backward(one)

# train with fake
noise.resize_(batch_size, nz).normal_(0, 1)
input_energy.resize_(batch_size, 1).uniform_(10,100)
#input_energy.resize_(batch_size, 1).uniform_(10, 100)

if torch.cuda.is_available():
inputv_e = Variable(input_energy.cuda())
Expand All @@ -231,9 +226,8 @@ def weights_init(m):
inputv_e = Variable(input_energy)
noisev = Variable(noise, volatile = True) # totally freeze netG

fake = Variable(netG(noisev, inputv_e, inputv_imp).data)
inputv_layer = fake
errD_fake = netD(inputv_layer, inputv_e, inputv_imp)

errD_fake = netD(netG(noisev, inputv_e), inputv_e)
errD_fake.backward(mone)
errD = errD_real - errD_fake
optimizerD.step()
Expand All @@ -248,20 +242,16 @@ def weights_init(m):
# in case our last batch was the tail batch of the dataloader,
# make sure we feed a full batch of noise
noise.resize_(batch_size, nz).normal_(0, 1)
input_energy.resize_(batch_size, 1).uniform_(10, 100)
#input_energy.resize_(batch_size, 1).uniform_(10, 100)

if torch.cuda.is_available():
noisev = Variable(noise.cuda())
inputv_e = Variable(input_energy.cuda())
inputv_imp = Variable(impoint.cuda()) ## input impact point
else :
noisev = Variable(noise)
inputv_e = Variable(input_energy)
inputv_imp = Variable(impoint) ## input impact point



fake = netG(noisev, inputv_e, inputv_imp)
errG = netD(fake, inputv_e, inputv_imp)
errG = netD(netG(noisev, inputv_e), inputv_e)
errG.backward(one)
optimizerG.step()
gen_iterations += 1
Expand Down
52 changes: 29 additions & 23 deletions models/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,43 +19,49 @@ def __init__(self, isize, nc, ndf):
self.nc = nc

## linear layers
self.cond1 = torch.nn.Linear(3, 10)
self.cond1 = torch.nn.Linear(1, 10)
self.cond2 = torch.nn.Linear(10, isize*isize)

### convolution
self.conv1 = torch.nn.Conv2d(nc+1, ndf*8, kernel_size=3, stride=1, padding=1)
self.conv1 = torch.nn.Conv2d(nc+1, ndf*8, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalization
self.bn1 = torch.nn.BatchNorm2d(ndf*8)
## convolution
self.conv2 = torch.nn.Conv2d(ndf*8, ndf*4, kernel_size=3, stride=1, padding=1)
self.conv2 = torch.nn.Conv2d(ndf*8, ndf*4, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalization
self.bn2 = torch.nn.BatchNorm2d(ndf*4)
#convolution
self.conv3 = torch.nn.Conv2d(ndf*4, ndf*2, kernel_size=3, stride=1, padding=1)
self.conv3 = torch.nn.Conv2d(ndf*4, ndf*2, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalization
self.bn3 = torch.nn.BatchNorm2d(ndf*2)
#convolution
self.conv4 = torch.nn.Conv2d(ndf*2, ndf, kernel_size=3, stride=1, padding=1)
self.conv4 = torch.nn.Conv2d(ndf*2, ndf*2, kernel_size=3, stride=1, padding=1, bias=False)

## batch-normalization
self.bn4 = torch.nn.BatchNorm2d(ndf*2)
#convolution
self.conv5 = torch.nn.Conv2d(ndf*2, ndf, kernel_size=3, stride=1, padding=1, bias=False)

# Read-out layer : ndf * isize * isize input features, ndf output features
self.fc1 = torch.nn.Linear(ndf * isize * isize, 1)

def forward(self, x, energy, impactPoint):
def forward(self, x, energy):

## conditioning on energy and impact parameter
t = F.leaky_relu(self.cond1(torch.cat((energy, impactPoint), 1)), 0.2, inplace=True)
## conditioning on energy
t = F.leaky_relu(self.cond1(energy), 0.2, inplace=True)
t = F.leaky_relu(self.cond2(t))

## reshape into two 2D
t = t.view(-1, 1, self.isize, self.isize)

## concentration with input : 31 (30layers + 1 cond) x 30 x 30
## concentration with input : N+1 (Nlayers + 1 cond) x 30 x 30
x = torch.cat((x, t), 1)

x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, inplace=True)
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, inplace=True)
x = F.leaky_relu(self.conv4(x), 0.2, inplace=True)
x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, inplace=True)
x = F.leaky_relu(self.conv5(x), 0.2, inplace=True)
#Size changes from (nc+1, 30, 30) to (ndf, 30, 30)


Expand Down Expand Up @@ -83,54 +89,54 @@ def __init__(self, nc, ngf, z):
self.nc = nc
self.z = z

self.cond1 = torch.nn.Linear(self.z+3, 50)
self.cond2 = torch.nn.Linear(50, 10*10*ngf)
self.cond1 = torch.nn.Linear(self.z+1, 100)
self.cond2 = torch.nn.Linear(100, 10*10*ngf)

## deconvolution
self.deconv1 = torch.nn.ConvTranspose2d(ngf, ngf*2, kernel_size=3, stride=3, padding=1)
self.deconv1 = torch.nn.ConvTranspose2d(ngf, ngf*2, kernel_size=3, stride=3, padding=1, bias=False)
## batch-normalization
self.bn1 = torch.nn.BatchNorm2d(ngf*2)
## deconvolution
self.deconv2 = torch.nn.ConvTranspose2d(ngf*2, ngf*4, kernel_size=3, stride=2, padding=1)
self.deconv2 = torch.nn.ConvTranspose2d(ngf*2, ngf*4, kernel_size=3, stride=2, padding=1, bias=False)
## batch-normalization
self.bn2 = torch.nn.BatchNorm2d(ngf*4)
# deconvolution
self.deconv3 = torch.nn.ConvTranspose2d(ngf*4, ngf*8, kernel_size=3, stride=2, padding=1)
self.deconv3 = torch.nn.ConvTranspose2d(ngf*4, ngf*8, kernel_size=3, stride=2, padding=1, bias=False)
## batch-normalization
self.bn3 = torch.nn.BatchNorm2d(ngf*8)

## convolution
self.conv0 = torch.nn.Conv2d(ngf*8, 1, kernel_size=3, stride=4, padding=1)
self.conv0 = torch.nn.Conv2d(ngf*8, 1, kernel_size=3, stride=4, padding=1, bias=False)
## batch-normalisation
self.bn0 = torch.nn.BatchNorm2d(1)

## convolution
self.conv1 = torch.nn.Conv2d(nc, ngf*4, kernel_size=3, stride=1, padding=1)
self.conv1 = torch.nn.Conv2d(nc, ngf*4, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn01 = torch.nn.BatchNorm2d(ngf*4)

## convolution
self.conv2 = torch.nn.Conv2d(ngf*4, ngf*8, kernel_size=3, stride=1, padding=1)
self.conv2 = torch.nn.Conv2d(ngf*4, ngf*8, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn02 = torch.nn.BatchNorm2d(ngf*8)

## convolution
self.conv3 = torch.nn.Conv2d(ngf*8, ngf*4, kernel_size=3, stride=1, padding=1)
self.conv3 = torch.nn.Conv2d(ngf*8, ngf*4, kernel_size=3, stride=1, padding=1, bias=False)
## batch-normalisation
self.bn03 = torch.nn.BatchNorm2d(ngf*4)

## convolution
self.conv4 = torch.nn.Conv2d(ngf*4, nc, kernel_size=3, stride=1, padding=1)
self.conv4 = torch.nn.Conv2d(ngf*4, nc, kernel_size=3, stride=1, padding=1, bias=False)



def forward(self, noise, energy, impactPoint):
def forward(self, noise, energy):

layer = []
### need to do generated 30 layers, hence the loop!
for i in range(self.nc):
## conditioning on energy, impact parameter and noise
x = F.leaky_relu(self.cond1(torch.cat((energy, impactPoint, noise), 1)), 0.2, inplace=True)
## conditioning on energy
x = F.leaky_relu(self.cond1(torch.cat((energy, noise), 1)), 0.2, inplace=True)
x = F.leaky_relu(self.cond2(x), 0.2, inplace=True)

## change size for deconv2d network. Image is 10x10
Expand Down

0 comments on commit 80a2524

Please sign in to comment.