Last active
December 18, 2015 01:06
-
-
Save b38tn1k/a46a9fcc0ed5b5de68ff to your computer and use it in GitHub Desktop.
Google tensorflow tutorial 1 in a file with comments and stuff
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import input_data | |
import tensorflow as tf | |
# http://www.tensorflow.org/tutorials/mnist/beginners/index.html | |
print '\033[93m' + 'Tutorial1 from:\nhttp://www.tensorflow.org/tutorials/mnist/beginners/index.html' + '\033[0m' | |
print 'MNIST INPUT DATA' | |
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) | |
'PLACE HOLDERS' | |
# Make a placeholder / value to input on run | |
x = tf.placeholder("float", [None, 784]) | |
# x is a 2D float tensor, None means the first dimension can be any length. | |
# The MNIST images have been converted into a 784-dimensional vector | |
'VARIABLES' | |
# Make some variables (weights and biases) | |
W = tf.Variable(tf.zeros([784, 10])) | |
# The shape of W determines the shape of the output. | |
# Want 10 catergories cause there are 10 digits | |
b = tf.Variable(tf.zeros([10])) | |
# bias adds to output | |
'MODEL' | |
# softmax regression | |
# evidence = the sum of [(Weight * input_data) + bias] | |
# softmax converts evidence set into probability distribution | |
# or in other words: | |
# softmax(x) = normalize(exp(x)) | |
y = tf.nn.softmax(tf.matmul(x, W) + b) | |
# matmul multiples the x and W tensors together.... | |
# adding bias as described above | |
# these two values contribute to the evidence tally | |
# Softmax normalises evidence and converts set to probablility distribution | |
'TRAINING PREP' | |
y_ = tf.placeholder("float", [None, 10]) | |
# y_ is the predicted probability distribution | |
# y is the true distribution | |
# Cross Entropy measures the inefficiency of the predictions made by the model (cost) | |
cross_entropy = -tf.reduce_sum(y_*tf.log(y)) | |
# tf.log computes the log of each element in y | |
# multiplied by y_ and reduce_sum | |
# backpropagation algorithim | |
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) | |
# minimise cost (ineffiency) in cross_entropy | |
print 'RUNNING' | |
init = tf.initialize_all_variables() # get the placeholders ready | |
sess = tf.Session() # make a session | |
sess.run(init) | |
for i in range(1000): | |
batch_xs, batch_ys = mnist.train.next_batch(100) # get batch of 100 random data points | |
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) | |
# run the previously defined train_step and feed it data that replaces the placeholder data | |
'EVALUATE THE MODEL' | |
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) | |
# argmax gives the index of the highest entry in a tensor along some axis | |
# does the output match the actual result? | |
# output of tf equal is bool array | |
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) | |
# bool array converted to float array, mean across all outputs gives accuracy | |
print '\033[93mACCURACY ON TEST DATA:' | |
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}) | |
# again to run the session, we feed in the | |
print '\033[0m' |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment