Skip to content

Instantly share code, notes, and snippets.

@bigsnarfdude
Last active November 2, 2016 15:23
Show Gist options
  • Save bigsnarfdude/14f34c2b8153d9b4f336df8535b3416a to your computer and use it in GitHub Desktop.
Save bigsnarfdude/14f34c2b8153d9b4f336df8535b3416a to your computer and use it in GitHub Desktop.
[mnist] tensorboard demo
# if error writing `export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64"`
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def init_weights(shape, name):
return tf.Variable(tf.random_normal(shape, stddev=0.01), name=name)
# Step 1 - Add some items to graph section of Tensorboard
def model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden):
# this line adds layer name into the graph section for visualization
with tf.name_scope("layer_1"):
X = tf.nn.dropout(X, p_keep_input)
h = tf.nn.relu(tf.matmul(X, w_h))
# this line adds layer name into the graph section for visualization
with tf.name_scope("layer_2"):
h = tf.nn.dropout(h, p_keep_hidden)
h2 = tf.nn.relu(tf.matmul(h, w_h2))
# this line adds layer name into the graph section for visualization
with tf.name_scope("layer_3"):
h2 = tf.nn.dropout(h2, p_keep_hidden)
return tf.matmul(h2, w_o)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
X = tf.placeholder("float", [None, 784], name="X")
Y = tf.placeholder("float", [None, 10], name="Y")
w_h = init_weights([784, 625], "w_h")
w_h2 = init_weights([625, 625], "w_h2")
w_o = init_weights([625, 10], "w_o")
# Step 2 - Add histogram summaries for weights into the histogram section for visualization
# These are clickable explorable line graphs in Tensorboard
tf.histogram_summary("w_h_summ", w_h)
tf.histogram_summary("w_h2_summ", w_h2)
tf.histogram_summary("w_o_summ", w_o)
p_keep_input = tf.placeholder("float", name="p_keep_input")
p_keep_hidden = tf.placeholder("float", name="p_keep_hidden")
py_x = model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden)
# Step 3 - Add cost function into the events section of Tensorboard with
# tf.name_scope and give it a specfic name
with tf.name_scope("cost_function"):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
# This section adds the cost_function to the summary section of Tensorboard
tf.scalar_summary("cost_function", cost)
# Step 4 - Add accuracy function into the events section of Tensorboard with
# tf.name_scope and give it a specfic name
with tf.name_scope("accuracy"):
correct_pred = tf.equal(tf.argmax(Y, 1), tf.argmax(py_x, 1)) # Count correct predictions
acc_op = tf.reduce_mean(tf.cast(correct_pred, "float")) # Cast boolean to float to average
# This section adds the accuracy_function to the summary section of Tensorboard
tf.scalar_summary("accuracy", acc_op)
#Step 5 - Here we add define where the Tensorboard logs get stored and pass them to active session
with tf.Session() as sess:
# In this section we create a log writer.
# if you use terminal and run this command to start: 'tensorboard --logdir=logs'
writer = tf.train.SummaryWriter("logs", sess.graph)
# This grabs all the added Tensorboard functions
merged = tf.merge_all_summaries()
tf.initialize_all_variables().run()
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_input: 0.8, p_keep_hidden: 0.5})
# Here in the list we pass 'merged' to the session to get the data from each layer
summary, acc = sess.run([merged, acc_op], feed_dict={X: teX, Y: teY,
p_keep_input: 1.0, p_keep_hidden: 1.0})
# Here we write the summary data to Tensorboard
writer.add_summary(summary, i)
print(i, acc)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment