Created
June 13, 2019 01:52
-
-
Save EJSohn/4d2787ee4a9ee9a196ae2a37c0acee51 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from tensorflow.examples.tutorials.mnist import input_data | |
| import tensorflow as tf | |
| import numpy as np | |
| from helper import batches # Helper function created in Mini-batching section | |
| def print_epoch_stats(epoch_i, sess, last_features, last_labels): | |
| """ | |
| Print cost and validation accuracy of an epoch | |
| """ | |
| current_cost = sess.run( | |
| cost, | |
| feed_dict={features: last_features, labels: last_labels}) | |
| valid_accuracy = sess.run( | |
| accuracy, | |
| feed_dict={features: valid_features, labels: valid_labels}) | |
| print('Epoch: {:<4} - Cost: {:<8.3} Valid Accuracy: {:<5.3}'.format( | |
| epoch_i, | |
| current_cost, | |
| valid_accuracy)) | |
| n_input = 784 # MNIST data input (img shape: 28*28) | |
| n_classes = 10 # MNIST total classes (0-9 digits) | |
| # Import MNIST data | |
| mnist = input_data.read_data_sets('/datasets/ud730/mnist', one_hot=True) | |
| # The features are already scaled and the data is shuffled | |
| train_features = mnist.train.images | |
| valid_features = mnist.validation.images | |
| test_features = mnist.test.images | |
| train_labels = mnist.train.labels.astype(np.float32) | |
| valid_labels = mnist.validation.labels.astype(np.float32) | |
| test_labels = mnist.test.labels.astype(np.float32) | |
| # Features and Labels | |
| features = tf.placeholder(tf.float32, [None, n_input]) | |
| labels = tf.placeholder(tf.float32, [None, n_classes]) | |
| # Weights & bias | |
| weights = tf.Variable(tf.random_normal([n_input, n_classes])) | |
| bias = tf.Variable(tf.random_normal([n_classes])) | |
| # Logits - xW + b | |
| logits = tf.add(tf.matmul(features, weights), bias) | |
| # Define loss and optimizer | |
| learning_rate = tf.placeholder(tf.float32) | |
| cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)) | |
| optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) | |
| # Calculate accuracy | |
| correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) | |
| accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
| init = tf.global_variables_initializer() | |
| batch_size = 128 | |
| epochs = 10 | |
| learn_rate = 0.001 | |
| train_batches = batches(batch_size, train_features, train_labels) | |
| with tf.Session() as sess: | |
| sess.run(init) | |
| # Training cycle | |
| for epoch_i in range(epochs): | |
| # Loop over all batches | |
| for batch_features, batch_labels in train_batches: | |
| train_feed_dict = { | |
| features: batch_features, | |
| labels: batch_labels, | |
| learning_rate: learn_rate} | |
| sess.run(optimizer, feed_dict=train_feed_dict) | |
| # Print cost and validation accuracy of an epoch | |
| print_epoch_stats(epoch_i, sess, batch_features, batch_labels) | |
| # Calculate accuracy for test dataset | |
| test_accuracy = sess.run( | |
| accuracy, | |
| feed_dict={features: test_features, labels: test_labels}) | |
| print('Test Accuracy: {}'.format(test_accuracy)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment