This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
tf.reset_default_graph() | |
real_images=tf.placeholder(tf.float32,shape=[None,784]) | |
z=tf.placeholder(tf.float32,shape=[None,100]) | |
G=generator(z) | |
D_output_real,D_logits_real=discriminator(real_images) | |
D_output_fake,D_logits_fake=discriminator(G,reuse=True) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def generator(z,reuse=None): | |
with tf.variable_scope('gen',reuse=reuse): | |
hidden1=tf.layers.dense(inputs=z,units=128,activation=tf.nn.leaky_relu) | |
hidden2=tf.layers.dense(inputs=hidden1,units=128,activation=tf.nn.leaky_relu) | |
output=tf.layers.dense(inputs=hidden2,units=784,activation=tf.nn.tanh) | |
return output | |
def discriminator(X,reuse=None): | |
with tf.variable_scope('dis',reuse=reuse): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from tensorflow.examples.tutorials.mnist import input_data | |
mnist=input_data.read_data_sets("MNIST_data") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
results=output_layer.eval(feed_dict={X:mnist.test.images[:num_test_images]}) | |
#Comparing original images with reconstructions | |
f,a=plt.subplots(2,10,figsize=(20,4)) | |
for i in range(num_test_images): | |
a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28))) | |
a[1][i].imshow(np.reshape(results[i],(28,28))) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
num_epoch=5 | |
batch_size=150 | |
num_test_images=10 | |
with tf.Session() as sess: | |
sess.run(init) | |
for epoch in range(num_epoch): | |
num_batches=mnist.train.num_examples//batch_size | |
for iteration in range(num_batches): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
loss=tf.reduce_mean(tf.square(output_layer-X)) | |
optimizer=tf.train.AdamOptimizer(lr) | |
train=optimizer.minimize(loss) | |
init=tf.global_variables_initializer() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
X=tf.placeholder(tf.float32,shape=[None,num_inputs]) | |
initializer=tf.variance_scaling_initializer() | |
w1=tf.Variable(initializer([num_inputs,num_hid1]),dtype=tf.float32) | |
w2=tf.Variable(initializer([num_hid1,num_hid2]),dtype=tf.float32) | |
w3=tf.Variable(initializer([num_hid2,num_hid3]),dtype=tf.float32) | |
w4=tf.Variable(initializer([num_hid3,num_output]),dtype=tf.float32) | |
b1=tf.Variable(tf.zeros(num_hid1)) | |
b2=tf.Variable(tf.zeros(num_hid2)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
num_inputs=784 #28x28 pixels | |
num_hid1=392 | |
num_hid2=196 | |
num_hid3=num_hid1 | |
num_output=num_inputs | |
lr=0.01 | |
actf=tf.nn.relu |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import pandas as pd | |
import matplotlib.pyplot as plt | |
import tensorflow as tf | |
from tensorflow.examples.tutorials.mnist import input_data | |
from tensorflow.contrib.layers import fully_connected | |
mnist=input_data.read_data_sets("/MNIST_data/",one_hot=True) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
plt.title("TESTING THE MODEL") | |
#TRAINING INSTANCE | |
plt.plot(train_inst[:-1],np.sin(train_inst[:-1]),"bo",markersize=15,alpha=0.5,label="TRAINING INST") | |
#TARGET TO PREDICT | |
plt.plot(train_inst[1:],np.sin(train_inst[1:]),"ko",markersize=8,label="TARGET") | |
#MODEL PREDCTION | |
plt.plot(train_inst[1:],y_pred[0,:,0],"r.",markersize=7,label="PREDICTIONS") |