Created
October 22, 2019 14:21
-
-
Save himanshugoel2797/2a3be2f463f377ca51ad025d98b10e8b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#%% | |
import keras.backend as K | |
dtype='float32' | |
K.set_floatx(dtype) | |
# default is 1e-7 which is too small for float16. Without adjusting the epsilon, we will get NaN predictions because of divide by zero problems | |
#K.set_epsilon(1e-4) | |
import keras | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import matplotlib.gridspec as gridspec | |
import os | |
import random | |
from tqdm import tqdm | |
from keras.utils import np_utils | |
from keras.models import Sequential, Model, load_model | |
from keras.layers import Dense, Activation, LeakyReLU, Dropout, Input, Flatten, Reshape, Conv2DTranspose, UpSampling2D, Conv2D, BatchNormalization | |
from keras.datasets import mnist | |
from keras.optimizers import Adam | |
from keras.preprocessing.image import load_img, img_to_array, array_to_img, ImageDataGenerator | |
from IPython import display | |
#%%matplotlib inline | |
print(keras.__version__) | |
batch_size = 16 | |
examples = 16 | |
latent_side = 128 | |
side = 96 | |
channels = 3 | |
lr = 0.0002 | |
#%% | |
seed = np.random.normal(size = [examples, latent_side]) | |
#%% | |
train_data = [] | |
filenames = set() | |
base_filenames = set() | |
for (root0, _, files) in os.walk('D:\\Datasets\\Gelbooru-faces_96_cleaned'): | |
for filename in files: | |
(_, ext) = os.path.splitext(filename) | |
filenames.add(os.path.join(root0, filename)) | |
for (root0, _, files) in os.walk('D:\\Datasets\\Gelbooru-faces_96-wb'): | |
for filename in files: | |
(_, ext) = os.path.splitext(filename) | |
filenames.add(os.path.join(root0, filename)) | |
for (root0, _, files) in os.walk('D:\\Datasets\\Lewds-faces_96_cleaned'): | |
for filename in files: | |
(_, ext) = os.path.splitext(filename) | |
filenames.add(os.path.join(root0, filename)) | |
for (root0, _, files) in os.walk('D:\\Datasets\\Pixiv-faces_96_cleaned'): | |
for filename in files: | |
(_, ext) = os.path.splitext(filename) | |
filenames.add(os.path.join(root0, filename)) | |
f_idx = 0 | |
for filename in filenames: | |
img_l = load_img(filename, target_size=[side, side]) | |
train_data.append(img_to_array(img_l) / 127.5 - 1) | |
base_filenames.clear() | |
filenames.clear() | |
random.shuffle(train_data) | |
train_net = np.stack(train_data) | |
#%% | |
generator = Sequential([ | |
Dense(16 * 16 * 64, input_dim = latent_side), | |
BatchNormalization(), | |
LeakyReLU(alpha=0.2), | |
Reshape([16, 16, 64]), #Start of generating blocks | |
Conv2DTranspose(64, 9), #24 | |
LeakyReLU(alpha = 0.2), | |
#Block | |
Conv2DTranspose(64, 9), #26 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(64, 3, strides=(2,2), padding='same'), #64 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(64, 3), #66 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(64, 3), #68 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #70 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #72 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #74 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #76 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #78 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(32, 3), #80 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(16, 3), #82 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(16, 3), #84 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(16, 3), #86 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(16, 3), #88 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2DTranspose(3, 9, activation='tanh')]) #96 | |
discriminator = Sequential([ | |
Conv2D(32, 3, input_shape = [side, side, channels]), #94 | |
LeakyReLU(alpha = 0.2), | |
Conv2D(32, 3, strides = (2,2)), #46 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(32, 3), #44 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(64, 3, strides = (2,2)), #21 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(64, 3), #19 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(64, 3), #17 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(128, 3), #15 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(128, 3, strides = (2, 2)), #7 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(256, 3), #5 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(256, 3), #3 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Conv2D(512, 3), #1 | |
BatchNormalization(), | |
LeakyReLU(alpha = 0.2), | |
Flatten(), | |
Dense(1, activation = 'sigmoid')]) | |
#generator = load_model("generator.hdf5") | |
#discriminator = load_model("discriminator.hdf5") | |
g_opt = Adam(lr=lr, beta_1=0.5) | |
generator.compile(optimizer = g_opt, loss = "binary_crossentropy") | |
generator.summary() | |
d_opt = Adam(lr=lr, beta_1=0.5) | |
discriminator.compile(optimizer = d_opt, loss = "binary_crossentropy") | |
discriminator.summary() | |
gan_input = Input(shape = [latent_side,]) | |
x = generator(gan_input) | |
gan_output = discriminator(x) | |
gan = Model(inputs = gan_input, outputs = gan_output) | |
gan_opt = Adam(lr=lr, beta_1=0.5) | |
gan.compile(optimizer = gan_opt, loss = "binary_crossentropy") | |
gan.summary() | |
#%% | |
disc_losses = [] | |
gan_losses = [] | |
epochs = [] | |
fig = plt.figure(constrained_layout = True, figsize=(16,10)) | |
gen_imgs_grid = fig.add_gridspec(4, 16) | |
fig_imgs_ax = [] | |
for i in range(4): | |
for j in range(4): | |
fig_imgs_ax.append(fig.add_subplot(gen_imgs_grid[i,j])) | |
loss_ax = fig.add_subplot(gen_imgs_grid[:,4:]) | |
d_ax, = loss_ax.semilogy(disc_losses, label = 'd_loss') | |
g_ax, = loss_ax.semilogy(gan_losses, label = 'g_loss') | |
loss_ax.legend() | |
def plot_generated_images(epoch, generator): | |
predictions = generator.predict(seed) | |
for i in range(predictions.shape[0]): | |
fig_imgs_ax[i].imshow(predictions[i, :, :, :] * 0.5 + 0.5) | |
fig_imgs_ax[i].axis('off') | |
#free this memory | |
train_data.clear() | |
for e in range(0, 20000): | |
print ('Epoch %d' % e) | |
noise = np.random.normal(0,1, [batch_size, latent_side]) | |
generated_images = generator.predict(noise) | |
image_batch = train_net[np.random.randint(low=0,high=train_net.shape[0],size=batch_size)] | |
X = np.concatenate([image_batch, generated_images]) | |
y_dis = np.zeros(2 * batch_size) | |
y_dis[:batch_size] = np.random.normal(loc = 0.925, scale=0.015, size = batch_size) | |
y_dis[batch_size:2*batch_size] = np.random.normal(loc=0.075, scale=0.015, size = batch_size) | |
discriminator.trainable = True | |
discriminator.compile(optimizer = d_opt, loss = "binary_crossentropy") | |
generator.compile(optimizer = g_opt, loss = "binary_crossentropy") | |
gan.compile(optimizer = gan_opt, loss = "binary_crossentropy") | |
d_loss = discriminator.train_on_batch(X, y_dis) | |
noise = np.random.normal(0,1, [batch_size, latent_side]) | |
y_gen = np.ones(batch_size) | |
# During the training of gan, | |
# the weights of discriminator should be fixed. | |
#We can enforce that by setting the trainable flag | |
discriminator.trainable = False | |
discriminator.compile(optimizer = d_opt, loss = "binary_crossentropy") | |
generator.compile(optimizer = g_opt, loss = "binary_crossentropy") | |
gan.compile(optimizer = gan_opt, loss = "binary_crossentropy") | |
#training the GAN by alternating the training of the Discriminator | |
#and training the chained GAN model with Discriminator’s weights | |
#freezed. | |
g_loss = gan.train_on_batch(noise, y_gen) | |
gan_losses.append(g_loss) | |
disc_losses.append(d_loss) | |
epochs.append(e) | |
if e % 25 == 0: | |
display.clear_output(wait=True) | |
plot_generated_images(e, generator) | |
d_ax.set_xdata(epochs) | |
d_ax.set_ydata(disc_losses) | |
g_ax.set_xdata(epochs) | |
g_ax.set_ydata(gan_losses) | |
fig.canvas.draw() | |
fig.canvas.flush_events() | |
plt.show() | |
discriminator.save("discriminator.hdf5") | |
generator.save("generator.hdf5") | |
gan.save("gan.hdf5") | |
fig.savefig('image_at_epoch_{:04d}.png'.format(e), dpi=300) | |
#%% |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment