Created
November 13, 2020 04:11
-
-
Save nniranjhana/9f7555a26dbe13e52f0e67b0fc8e7e15 to your computer and use it in GitHub Desktop.
TF v1 vs v2
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/python | |
""" Neural Network. | |
A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) | |
implementation with TensorFlow. This example is using the MNIST database | |
of handwritten digits (http://yann.lecun.com/exdb/mnist/). | |
Links: | |
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/). | |
Author: Aymeric Damien | |
Project: https://github.com/aymericdamien/TensorFlow-Examples/ | |
""" | |
from __future__ import print_function | |
# Import MNIST data | |
from tensorflow.examples.tutorials.mnist import input_data | |
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) | |
import tensorflow as tf | |
import math | |
import sys | |
# Parameters | |
learning_rate = 0.1 | |
num_steps = 500 | |
batch_size = 128 | |
display_step = 100 | |
# Network Parameters | |
n_hidden_1 = 256 # 1st layer number of neurons | |
n_hidden_2 = 256 # 2nd layer number of neurons | |
num_input = 784 # MNIST data input (img shape: 28*28) | |
num_classes = 10 # MNIST total classes (0-9 digits) | |
# tf Graph input | |
X = tf.placeholder("float", [None, num_input]) | |
Y = tf.placeholder("float", [None, num_classes]) | |
# Store layers weight & bias | |
weights = { | |
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), | |
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), | |
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) | |
} | |
biases = { | |
'b1': tf.Variable(tf.random_normal([n_hidden_1])), | |
'b2': tf.Variable(tf.random_normal([n_hidden_2])), | |
'out': tf.Variable(tf.random_normal([num_classes])) | |
} | |
# Create model | |
def neural_net(x): | |
# Hidden fully connected layer with 256 neurons | |
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) | |
# Hidden fully connected layer with 256 neurons | |
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) | |
# Output fully connected layer with a neuron for each class | |
out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] | |
return out_layer | |
# Construct model | |
logits = neural_net(X) | |
prediction = tf.nn.softmax(logits) | |
# Define loss and optimizer | |
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( | |
logits=logits, labels=Y)) | |
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) | |
train_op = optimizer.minimize(loss_op) | |
# Evaluate model | |
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) | |
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) | |
# Initialize the variables (i.e. assign their default value) | |
init = tf.global_variables_initializer() | |
# Start training | |
with tf.Session() as sess: | |
# Run the initializer | |
sess.run(init) | |
print("Training now ...") | |
for step in range(1, num_steps+1): | |
batch_x, batch_y = mnist.train.next_batch(batch_size) | |
# Run optimization op (backprop) | |
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) | |
if step % display_step == 0 or step == 1: | |
# Calculate batch loss and accuracy | |
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, | |
Y: batch_y}) | |
print("Step " + str(step) + ", Minibatch Loss= " + \ | |
"{:.4f}".format(loss) + ", Training Accuracy= " + \ | |
"{:.3f}".format(acc)) | |
print("Training finished! Testing now ...") | |
print("Accuracy (with no injections):", \ | |
accuracy.eval({X: mnist.test.images[:256], Y: mnist.test.labels[:256]})) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
from tensorflow.keras import datasets, layers, models | |
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data() | |
train_images, test_images = train_images / 255.0, test_images / 255.0 | |
model = models.Sequential([ | |
tf.keras.layers.Flatten(input_shape=(28, 28)), | |
tf.keras.layers.Dense(128, activation='relu'), | |
tf.keras.layers.Dropout(0.2), | |
tf.keras.layers.Dense(10) | |
]) | |
model.compile(optimizer='adam', | |
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), | |
metrics=['accuracy']) | |
model.fit(train_images, train_labels, epochs=5, | |
validation_data=(test_images, test_labels)) | |
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) | |
print("Accuracy with the original dataset:", test_acc) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment