生成對抗網絡(DCGAN, LSGAN, WGAN, WGAN-GP, SNGAN, RSGAN)TensorFlow實現
【轉載】 MarTin Guo
html
Paper:python
DCGAN: Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networksgit
WGAN: Wasserstein GANgithub
WGAN-GP: Improved Training of Wasserstein GANs網絡
LSGAN: Least Squares Generative Adversarial Networksapp
SNGAN: Spectral normalization for generative adversarial networksdom
RSGAN: The relativistic discriminator: a key element missing from standard GANspa
五種GAN不一樣迭代次數生成樣本,對比結果以下圖所示:
.net
代碼具體請參看Github:code
https://github.com/MingtaoGuo/DCGAN_WGAN_WGAN-GP_LSGAN_SNGAN_RSGAN_RaSGAN_TensorFlow
class GAN: #Architecture of generator and discriminator just like DCGAN. def __init__(self): self.Z = tf.placeholder("float", [batchsize, 100]) self.img = tf.placeholder("float", [batchsize, img_H, img_W, img_C]) D = Discriminator("discriminator") G = Generator("generator") self.fake_img = G(self.Z) if GAN_type == "DCGAN": #DCGAN, paper: UNSUPERVISED REPRESENTATION LEARNING WITH DEEP CONVOLUTIONAL GENERATIVE ADVERSARIAL NETWORKS self.fake_logit = tf.nn.sigmoid(D(self.fake_img)) self.real_logit = tf.nn.sigmoid(D(self.img, reuse=True)) self.d_loss = - (tf.reduce_mean(tf.log(self.real_logit + epsilon)) + tf.reduce_mean(tf.log(1 - self.fake_logit + epsilon))) self.g_loss = - tf.reduce_mean(tf.log(self.fake_logit + epsilon)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "WGAN": #WGAN, paper: Wasserstein GAN self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) self.d_loss = -tf.reduce_mean(self.real_logit) + tf.reduce_mean(self.fake_logit) self.g_loss = -tf.reduce_mean(self.fake_logit) self.clip = [] for _, var in enumerate(D.var): self.clip.append(tf.clip_by_value(var, -0.01, 0.01)) self.opt_D = tf.train.RMSPropOptimizer(5e-5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.RMSPropOptimizer(5e-5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "WGAN-GP": #WGAN-GP, paper: Improved Training of Wasserstein GANs self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) e = tf.random_uniform([batchsize, 1, 1, 1], 0, 1) x_hat = e * self.img + (1 - e) * self.fake_img grad = tf.gradients(D(x_hat, reuse=True), x_hat)[0] self.d_loss = tf.reduce_mean(self.fake_logit - self.real_logit) + 10 * tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2, 3])) - 1)) self.g_loss = tf.reduce_mean(-self.fake_logit) self.opt_D = tf.train.AdamOptimizer(1e-4, beta1=0., beta2=0.9).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(1e-4, beta1=0., beta2=0.9).minimize(self.g_loss, var_list=G.var) elif GAN_type == "LSGAN": #LSGAN, paper: Least Squares Generative Adversarial Networks self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) self.d_loss = tf.reduce_mean(0.5 * tf.square(self.real_logit - 1) + 0.5 * tf.square(self.fake_logit)) self.g_loss = tf.reduce_mean(0.5 * tf.square(self.fake_logit - 1)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "SNGAN": #SNGAN, paper: SPECTRAL NORMALIZATION FOR GENERATIVE ADVERSARIAL NETWORKS self.fake_logit = tf.nn.sigmoid(D(self.fake_img, is_sn=True)) self.real_logit = tf.nn.sigmoid(D(self.img, reuse=True, is_sn=True)) self.d_loss = - (tf.reduce_mean(tf.log(self.real_logit + epsilon) + tf.log(1 - self.fake_logit + epsilon))) self.g_loss = - tf.reduce_mean(tf.log(self.fake_logit + epsilon)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "RSGAN": # RSGAN, paper: The relativistic discriminator: a key element missing from standard GAN self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) self.d_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.real_logit - self.fake_logit) + epsilon)) self.g_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.fake_logit - self.real_logit) + epsilon)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "RaHingeGAN": self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) d_tiled_r = self.real_logit - tf.reduce_mean(self.fake_logit, 0) d_tiled_f = self.fake_logit - tf.reduce_mean(self.real_logit, 0) self.d_loss = tf.reduce_mean(tf.maximum(0., 1. - d_tiled_r)) + tf.reduce_mean(tf.maximum(0., 1. + d_tiled_f)) self.g_loss = tf.reduce_mean(tf.maximum(0., 1. - d_tiled_f)) + tf.reduce_mean(tf.maximum(0., 1. + d_tiled_r)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) elif GAN_type == "RSGAN-GP": self.fake_logit = D(self.fake_img) self.real_logit = D(self.img, reuse=True) e = tf.random_uniform([batchsize, 1, 1, 1], 0, 1) x_hat = e * self.img + (1 - e) * self.fake_img grad = tf.gradients(D(x_hat, reuse=True), x_hat)[0] self.d_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.real_logit - self.fake_logit) + epsilon)) + 10 * tf.reduce_mean(tf.square(tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2, 3])) - 1)) self.g_loss = - tf.reduce_mean(tf.log(tf.nn.sigmoid(self.fake_logit - self.real_logit) + epsilon)) self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var) self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var) self.sess = tf.Session() self.sess.run(tf.global_variables_initializer())
————————————————
版權聲明:本文爲CSDN博主「MarTin Guo」的原創文章