Skip to content

Instantly share code, notes, and snippets.

@sub-mod
Created January 22, 2018 15:28
Show Gist options
  • Select an option

  • Save sub-mod/793f76d0e41e9d6035e01b7d3e5e1a2f to your computer and use it in GitHub Desktop.

Select an option

Save sub-mod/793f76d0e41e9d6035e01b7d3e5e1a2f to your computer and use it in GitHub Desktop.
tf-cnn.py
import os
import sys
import tensorflow as tf
import numpy as np
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.util import compat
from tensorflow.examples.tutorials.mnist import input_data
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
num_classes = 10
_dropout = 0.4
tf.reset_default_graph()
#sess = tf.InteractiveSession()
!rm -fr /workspace/cnn/2
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
export_dir = "/workspace/cnn/2"
# serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
# feature_configs = {'x': tf.FixedLenFeature(shape=[784],dtype=tf.float32),}
# tf_example = tf.parse_example(serialized_tf_example,feature_configs)
# x = tf.identity(tf_example['x'], name='x')
x = tf.placeholder(tf.float32, [None, 784])
#y_ = tf.placeholder(tf.float32, shape=[None, 10])
y_ = tf.placeholder(tf.int64, [None])
dropout = tf.placeholder(tf.float32)
is_training = True
# Input 1-D vector shape 784 features (28*28 pixels)
inputX = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(inputX, 32, 5, activation=tf.nn.relu)
# conv1 = tf.layers.conv2d(
# inputs=inputX,
# filters=32,
# kernel_size=[5, 5],
# padding="same",
# activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(conv1, 2, 2)
#pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolution Layer with 64 filters and a kernel size of 3
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(pool1, 64, 3, activation=tf.nn.relu)
# conv2 = tf.layers.conv2d(
# inputs=pool1,
# filters=64,
# kernel_size=[5, 5],
# padding="same",
# activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(conv2, 2, 2)
#pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten the data to a 1-D vector for the fully connected layer
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.contrib.layers.flatten(pool2)
#pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Fully connected layer (in tf contrib folder for now)
# Output Tensor Shape: [batch_size, 1024]
fc1 = tf.layers.dense(pool2_flat, 1024)
#dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
#dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Output layer, class prediction
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(fc1, num_classes)
#logits = tf.layers.dense(inputs=dropout, units=10)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())
values, indices = tf.nn.top_k(logits, 10)
table = tf.contrib.lookup.index_to_string_table_from_tensor(tf.constant([str(i) for i in xrange(10)]))
prediction_classes = table.lookup(tf.to_int64(indices))
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(logits, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2000):
batch = mnist.train.next_batch(50)
# if i % 100 == 0:
# train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], dropout: 1.0})
# print('step %d, training accuracy %g' % (i, train_accuracy))
train_op.run(feed_dict={x: batch[0], y_: batch[1], dropout: 0.5})
print "=================================="
print('test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, dropout: 1.0}))
print "=================================="
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment