Skip to content

Instantly share code, notes, and snippets.

View MLWhiz's full-sized avatar
🤓
Focusing

Rahul Agarwal MLWhiz

🤓
Focusing
View GitHub Profile
filenames = glob.glob('animeface-character-dataset/*/*.pn*')
plt.figure(figsize=(10, 8))
for i in range(5):
img = plt.imread(filenames[i], 0)
plt.subplot(4, 5, i+1)
plt.imshow(img)
plt.title(img.shape)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
# A function to normalize image pixels.
def norm_img(img):
'''A function to Normalize Images.
Input:
img : Original image as numpy array.
Output: Normailized Image as numpy array
'''
img = (img / 127.5) - 1
return img
def denorm_img(img):
def gen_noise(batch_size, noise_shape):
''' Generates a numpy vector sampled from normal distribution of shape (batch_size,noise_shape)
Input:
batch_size : size of batch
noise_shape: shape of noise vector, normally kept as 100
Output:a numpy vector sampled from normal distribution of shape (batch_size,noise_shape)
'''
return np.random.normal(0, 1, size=(batch_size,)+noise_shape)
def get_gen_normal(noise_shape):
''' This function takes as input shape of the noise vector and creates the Keras generator architecture.
'''
kernel_init = 'glorot_uniform'
gen_input = Input(shape = noise_shape)
# Transpose 2D conv layer 1.
generator = Conv2DTranspose(filters = 512, kernel_size = (4,4), strides = (1,1), padding = "valid", data_format = "channels_last", kernel_initializer = kernel_init)(gen_input)
generator = BatchNormalization(momentum = 0.5)(generator)
generator = LeakyReLU(0.2)(generator)
def get_disc_normal(image_shape=(64,64,3)):
dropout_prob = 0.4
kernel_init = 'glorot_uniform'
dis_input = Input(shape = image_shape)
# Conv layer 1:
discriminator = Conv2D(filters = 64, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(dis_input)
discriminator = LeakyReLU(0.2)(discriminator)
# Conv layer 2:
discriminator = Conv2D(filters = 128, kernel_size = (4,4), strides = (2,2), padding = "same", data_format = "channels_last", kernel_initializer = kernel_init)(discriminator)
# Use a fixed noise vector to see how the GAN Images transition through time on a fixed noise.
fixed_noise = gen_noise(16,noise_shape)
# To keep Track of losses
avg_disc_fake_loss = []
avg_disc_real_loss = []
avg_GAN_loss = []
# We will run for num_steps iterations
for step in range(num_steps):
# Generating GIF from PNGs
import imageio
# create a list of PNGs
generated_images = [img_save_dir+str(x).zfill(4)+"_image.png" for x in range(0,num_steps,100)]
images = []
for filename in generated_images:
images.append(imageio.imread(filename))
imageio.mimsave(img_save_dir+'movie.gif', images)
from IPython.display import Image
with open(img_save_dir+'movie.gif','rb') as f:
# create a list of 20 PNGs to show
generated_images = [img_save_dir+str(x).zfill(4)+"fixed_image.png" for x in range(0,num_steps,1500)]
print("Displaying generated images")
# You might need to change grid size and figure size here according to num images.
plt.figure(figsize=(16,20))
gs1 = gridspec.GridSpec(5, 4)
gs1.update(wspace=0, hspace=0)
for i,image in enumerate(generated_images):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
# AIM: To Decrypt a text using MCMC approach. i.e. find decryption key which we will call cipher from now on.
import string
import math
import random
# This function takes as input a decryption key and creates a dict for key where each letter in the decryption key
# maps to a alphabet For example if the decryption key is "DGHJKL...." this function will create a dict like {D:A,G:B,H:C....}
def create_cipher_dict(cipher):
cipher_dict = {}
alphabet_list = list(string.ascii_uppercase)
# This function takes as input a path to a long text and creates scoring_params dict which contains the
# number of time each pair of alphabet appears together
# Ex. {'AB':234,'TH':2343,'CD':23 ..}
def create_scoring_params_dict(longtext_path):
scoring_params = {}
alphabet_list = list(string.ascii_uppercase)
with open(longtext_path) as fp:
for line in fp:
data = list(line.strip())
for i in range(len(data)-1):