Skip to content

Commit

Permalink
recent changes due to docker
Browse files Browse the repository at this point in the history
  • Loading branch information
EnginEren committed Nov 26, 2019
1 parent bfd1b79 commit 7898d49
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 70 deletions.
2 changes: 1 addition & 1 deletion convert_to_hdf5/create_hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def E0(strt,end):


#Open HDF5 file for writing
hf = h5py.File('../data/gamma-fullG-50GeV.hdf5', 'w')
hf = h5py.File('../data/gamma-fullG-fixed50-10GeV.hdf5', 'w')
grp = hf.create_group("30x30")


Expand Down
2 changes: 1 addition & 1 deletion convert_to_hdf5/create_hdf5.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
cd /beegfs/desy/user/eren/WassersteinGAN/convert_to_hdf5

# start 0
python create_hdf5.py --ncpu 16 --rootfile ../data/calo_hits-50GeV.root --branch photonSIM
python create_hdf5.py --ncpu 32 --rootfile ../data/calo_hits-10_50fixed-2.root --branch photonSIM

exit 0;

Expand Down
9 changes: 1 addition & 8 deletions Dockerfile → docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,17 +1,10 @@
FROM pytorch/pytorch:latest

#Install dependencies
FROM engineren/pytorch:test

RUN pip install --upgrade pip

COPY requirements.txt requirements.txt
RUN pip install --upgrade --no-cache-dir -r requirements.txt && \
rm requirements.txt

RUN yum -y update \
&& yum install -y \
git \
vim

WORKDIR /home

2 changes: 2 additions & 0 deletions requirements.txt → docker/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,5 @@ scipy
h5py
jupyter
torchsummary
tensorflow
tensorboardX
50 changes: 22 additions & 28 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import json
from pathlib import Path
import models.HDF5Dataset as H

import numpy as np
import models.dcgan as dcgan
import models.mlp as mlp

Expand Down Expand Up @@ -57,15 +57,8 @@


## Load and make them iterable
#loader_params = {'batch_size': opt.batchSize, 'shuffle': True, 'num_workers': 6}
path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG.hdf5'
#d = H.HDF5Dataset(path, '30x30/layers')
#e = H.HDF5Dataset(path, '30x30/energy')
#dataloader_layer = data.DataLoader(d, **loader_params)
#dataloader_energy = data.DataLoader(e, **loader_params)

#data_layer = iter(dataloader_layer)
#data_energy = iter(dataloader_energy)

path = '/beegfs/desy/user/eren/WassersteinGAN/data/gamma-fullG-fixed50-10GeV.hdf5'

data = H.HDF5Dataset(path, '30x30')
energies = data['energy'][:].reshape(len(data['energy']))
Expand Down Expand Up @@ -165,7 +158,7 @@ def weights_init(m):

# train the discriminator Diters times
if gen_iterations < 25 or gen_iterations % 500 == 0:
Diters = 50
Diters = 25
else:
Diters = opt.Diters
j = 0
Expand All @@ -175,15 +168,6 @@ def weights_init(m):
# clamp parameters to a cube
for p in netD.parameters():
p.data.clamp_(-0.01, 0.01)

### input size matters. Reshape if we want 30x30
#if opt.full :
# layer = data_layer.next()
#else :
# tmp = data_layer.next() ## [Bs, 30, 30 , 30 ]
# layer = torch.sum(tmp, dim=1)
# layer = layer.unsqueeze(1) ## [Bs, 1, 30 , 30 ]


layer, energy = iter(dataloader).next()
layer = layer.unsqueeze(1) ## [Bs, 1, 30 , 30 ]
Expand Down Expand Up @@ -223,13 +207,16 @@ def weights_init(m):
inputv_e = Variable(input_energy)



#train with real images
errD_real = netD(inputv_layer, inputv_e)
errD_real.backward(one)

# train with fake
noise.resize_(batch_size, nz).normal_(0, 1)
input_energy.resize_(batch_size, 1).uniform_(10, 100)
inc_energy_label = [10, 50]
energy_labelv = np.random.choice(inc_energy_label, (batch_size,1), p=[0.5, 0.5])
energy_labelv = torch.from_numpy(energy_labelv).float()
input_energy.resize_(batch_size, 1).copy_(energy_labelv)

with torch.no_grad():
if torch.cuda.is_available():
Expand All @@ -239,7 +226,7 @@ def weights_init(m):
inputv_e = Variable(input_energy)
noisev = Variable(noise) # totally freeze netG


inputv_e = inputv_e * 100.00
h = noisev * inputv_e

errD_fake = netD(netG(h), inputv_e)
Expand All @@ -257,14 +244,21 @@ def weights_init(m):
# in case our last batch was the tail batch of the dataloader,
# make sure we feed a full batch of noise
noise.resize_(batch_size, nz).normal_(0, 1)
input_energy.resize_(batch_size, 1).uniform_(10, 100)

noisev = noise
if torch.cuda.is_available():
noisev = noisev.cuda()

inc_energy_label = [10, 50]
energy_labelv = np.random.choice(inc_energy_label, (batch_size,1), p=[0.5, 0.5])
energy_labelv = torch.from_numpy(energy_labelv).float()
input_energy.resize_(batch_size, 1).copy_(energy_labelv)

if torch.cuda.is_available():
inputv_e = Variable(input_energy.cuda())
noisev = Variable(noise.cuda())
else :
inputv_e = Variable(input_energy)
noisev = Variable(noise)


inputv_e = inputv_e * 100.00

errG = netD(netG(noisev * inputv_e), inputv_e)
errG.backward(one)
Expand Down
67 changes: 35 additions & 32 deletions models/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,39 +33,41 @@ def __init__(self, isize, nc, ndf):
self.bn3 = torch.nn.BatchNorm2d(ndf*2)
#convolution
self.conv4 = torch.nn.Conv2d(ndf*2, ndf, kernel_size=4, stride=2, padding=1, bias=False)
## batch-normalization
self.bn4 = torch.nn.BatchNorm2d(ndf)
#convolution
self.conv5 = torch.nn.Conv2d(ndf, 1, kernel_size=2, stride=2, padding=1, bias=False)
# Read-out layer : 4 * 4 input features, ndf output features
self.fc = torch.nn.Linear((4 * 4)+1, 1)


# Read-out layer : ndf * 2 * 2 input features, ndf output features
self.fc1 = torch.nn.Linear((ndf * 6 * 6)+1, 50)
self.fc2 = torch.nn.Linear(50, 25)
self.fc3 = torch.nn.Linear(25, 1)

def forward(self, x, energy):

x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True) # 15 x 15
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, inplace=True) # 14 x 14
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, inplace=True) # 13 x 13
x = F.leaky_relu(self.conv4(x), 0.2, inplace=True) # 6x6
x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, inplace=True) # 6x6
x = F.leaky_relu(self.conv5(x), 0.2, inplace=True) # 4x4

#After series of convlutions --> size changes from (nc, 30, 30) to (ndf, 6, 6)
#After series of convlutions --> size changes from (nc, 30, 30) to (1, 4, 4)


x = x.view(-1, self.ndf * 6 * 6)
x = x.view(-1, 4 * 4)
x = torch.cat((x, energy), 1)

# Size changes from (ndf, 30, 30) to (1, (ndf * 6 * 6) + 1)
# Size changes from (ndf, 30, 30) to (1, (4 * 4) + 1)
#Recall that the -1 infers this dimension from the other given dimension


# Read-out layer
x = F.leaky_relu(self.fc1(x), 0.2, inplace=True)
x = F.leaky_relu(self.fc2(x), 0.2, inplace=True)
x = self.fc3(x)

x = F.leaky_relu(self.fc(x), 0.2, inplace=True)
x = x.mean(0)
return x.view(1)




class DCGAN_G(nn.Module):
"""
generator component of WGAN
Expand All @@ -77,44 +79,45 @@ def __init__(self, nc, ngf, z):
self.nc = nc
self.z = z

## linear projection
self.cond = torch.nn.Linear(self.z, 5*5*ngf*8)


## deconvolution
self.deconv1 = torch.nn.ConvTranspose2d(ngf*8, ngf*4, kernel_size=2, stride=3, padding=1, bias=False)
self.deconv1 = torch.nn.ConvTranspose2d(z, ngf*8, kernel_size=4, stride=1, padding=0, bias=False)
## batch-normalization
self.bn1 = torch.nn.BatchNorm2d(ngf*4)
self.bn1 = torch.nn.BatchNorm2d(ngf*8)
## deconvolution
self.deconv2 = torch.nn.ConvTranspose2d(ngf*4, ngf*2, kernel_size=2, stride=2, padding=1, bias=False)
self.deconv2 = torch.nn.ConvTranspose2d(ngf*8, ngf*4, kernel_size=4, stride=2, padding=1, bias=False)
## batch-normalization
self.bn2 = torch.nn.BatchNorm2d(ngf*2)
self.bn2 = torch.nn.BatchNorm2d(ngf*4)
# deconvolution
self.deconv3 = torch.nn.ConvTranspose2d(ngf*2, ngf, kernel_size=6, stride=1, padding=1, bias=False)
self.deconv3 = torch.nn.ConvTranspose2d(ngf*4, ngf*2, kernel_size=4, stride=2, padding=1, bias=False)
## batch-normalization
self.bn3 = torch.nn.BatchNorm2d(ngf)
self.bn3 = torch.nn.BatchNorm2d(ngf*2)
# deconvolution
self.deconv4 = torch.nn.ConvTranspose2d(ngf, 1, kernel_size=8, stride=1, padding=1, bias=False)

self.deconv4 = torch.nn.ConvTranspose2d(ngf*2, ngf, kernel_size=4, stride=2, padding=1, bias=False)
## batch-normalization
self.bn4 = torch.nn.BatchNorm2d(ngf)
# deconvolution
self.deconv5 = torch.nn.ConvTranspose2d(ngf, 1, kernel_size=1, stride=1, padding=1, bias=False)




def forward(self, z):

def forward(self, noise):
z = z.view(-1,self.z,1,1)

layer = []
## need to do generate N layers, hence the loop!
for i in range(self.nc):

#noise
x = F.leaky_relu(self.cond(noise), 0.2, inplace=True)

## change size for deconv2d network. Image is 5x5
x = x.view(-1,self.ngf*8,5,5)

## apply series of deconv2d and batch-norm
x = F.leaky_relu(self.bn1(self.deconv1(x, output_size=[x.size(0), x.size(1) , 12, 12])), 0.2, inplace=True)
x = F.leaky_relu(self.bn2(self.deconv2(x, output_size=[x.size(0), x.size(1) , 22, 22])), 0.2, inplace=True)
x = F.leaky_relu(self.bn3(self.deconv3(x, output_size=[x.size(0), x.size(1) , 25, 25])), 0.2, inplace=True)
x = F.relu(self.deconv4(x, output_size=[x.size(0), x.size(1) , 30, 30]))
x = F.leaky_relu(self.bn1(self.deconv1(z)), 0.2, inplace=True) # 4 x 4
x = F.leaky_relu(self.bn2(self.deconv2(x)), 0.2, inplace=True) # 8 x 8
x = F.leaky_relu(self.bn3(self.deconv3(x)), 0.2, inplace=True) # 16 x 16
x = F.leaky_relu(self.bn4(self.deconv4(x)), 0.2, inplace=True) # 32 x 32
x = F.relu(self.deconv5(x)) # 30 x 30

##Image is 30x30 now

Expand Down

0 comments on commit 7898d49

Please sign in to comment.