Skip to content

Instantly share code, notes, and snippets.

input = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name="input")
labels = tf.placeholder(tf.float32, shape=(None, 10), name="labels")
...
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels), name="cost")
...
predictions = tf.nn.softmax(logits, name="predictions")
with tf.Session() as session:
#Restore Model
saver = tf.train.Saver() #Create a saver (object to save/restore sessions)
saver.restore(session, "/tmp/vggnet/vgg_net.ckpt") #Restore the session from a previously saved checkpoint
#Now we test our restored model exactly as before
batch_size = 100
num_test_batches = int(len(test_images) / 100)
total_accuracy = 0
total_cost = 0
saver = tf.train.Saver() #Create saver
num_steps = 1000
batch_size = 100
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_images = train_images[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input: batch_images, labels: batch_labels}
saver = tf.train.Saver() #Create a saver
save_path = saver.save(session, "/tmp/vggnet/vgg_net.ckpt") #Specify where to save the model
print("Saved model at: ", save_path) #Confirm the saved location
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
train_images = np.reshape(mnist.train.images, (-1, 28, 28, 1))
train_labels = mnist.train.labels
test_images = np.reshape(mnist.test.images, (-1, 28, 28, 1))
test_labels = mnist.test.labels
learning_rate = 0.001
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
input = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) #28x28x1
padded_input = tf.image.resize_image_with_crop_or_pad(input, target_height=32, target_width=32) #32x32x1
velocity = 0 #No initial velocity. (Defined outside of optimization loop)
...
momentum = 0.9
learning_rate = 0.01 #Some human-chosen learning rate
gradient_for_weight_1 = ... #Compute gradient
velocity = (momentum * velocity) - (gradient_for_weight_1 * learning_rate) #Maintain a velocity that keeps increasing if we don't change direction
weight_1 = weight_1 + velocity
learning_rate = 0.01 #Some human-chosen learning rate
gradient_for_weight_1 = ... #Compute gradient
weight_1 = weight_1 + (-gradient_for_weight1 * learning_rate) #Technically, the gradient tells us how to INCREASE cost, so we go the opposite direction by negating it
import tensorflow as tf
import numpy as np
import shutil
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
train_images = np.reshape(mnist.train.images, (-1, 28, 28, 1))
train_labels = mnist.train.labels
test_images = np.reshape(mnist.test.images, (-1, 28, 28, 1))
test_labels = mnist.test.labels