diff --git a/wgan2-AC-lasagne.ipynb b/wgan2-AC-lasagne.ipynb index 6e35596..a138e55 100644 --- a/wgan2-AC-lasagne.ipynb +++ b/wgan2-AC-lasagne.ipynb @@ -324,16 +324,16 @@ "loss_D_real = output_D.mean()\n", "loss_D_fake = output_D_fake.mean()\n", "loss_D = loss_D_fake - loss_D_real\n", - "loss_D_gp = loss_D + 10 * grad_penalty + loss_C_real\n", + "loss_D_gp = loss_D + 10 * grad_penalty + 0.01*loss_C_real\n", "loss_G = -loss_D_fake + loss_C_fake\n", "\n", "\n", "params_netD = lasagne.layers.get_all_params(netD, trainable=True) \n", "params_netC = lasagne.layers.get_all_params(netC, trainable=True) \n", "params_netG = lasagne.layers.get_all_params(netG, trainable=True)\n", - "optimize_G = lasagne.updates.adam(loss_G, params_netG, learning_rate=lrG, beta1=0.2)\n", + "optimize_G = lasagne.updates.adam(loss_G, params_netG, learning_rate=lrG, beta1=0.)\n", "optimize_D = lasagne.updates.adam(loss_D_gp, params_netD+params_netC, \n", - " learning_rate=lrD, beta1=0.2)\n", + " learning_rate=lrD, beta1=0.)\n", "train_G_fn = theano.function([input_var_G], [loss_G], updates=optimize_G)\n", "train_D_fn = theano.function([input_var_D, input_var_G, input_var_Y, ϵ], \n", " [loss_D_gp, loss_D, loss_D_real, loss_D_fake, loss_C_fake], \n",