Skip to content

Instantly share code, notes, and snippets.

@yujuwon
Created May 11, 2017 05:56
Show Gist options
  • Select an option

  • Save yujuwon/08e5f4b704e90743950964bf0003aa6f to your computer and use it in GitHub Desktop.

Select an option

Save yujuwon/08e5f4b704e90743950964bf0003aa6f to your computer and use it in GitHub Desktop.
import numpy as np
import pickle
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo)
fo.close()
return dict
def clean(data):
imgs = data.reshape(data.shape[0], 3, 32, 32)
grayscale_imgs = imgs.mean(1)
cropped_imgs = grayscale_imgs[:, 4:28, 4:28]
img_data = cropped_imgs.reshape(data.shape[0], -1)
img_size = np.shape(img_data)[1]
means = np.mean(img_data, axis=1)
meansT = means.reshape(len(means), 1)
stds = np.std(img_data, axis=1)
stdsT = stds.reshape(len(stds), 1)
adj_stds = np.maximum(stdsT, 1.0 / np.sqrt(img_size))
normalized = (img_data - meansT) / adj_stds
return normalized
def read_data(directory):
names =unpickle('{}/batches.meta'.format(directory))['label_names']
print('names', names)
data, labels = [], []
for i in range(1,6):
filename = '{}/data_batch_{}'.format(directory, i)
batch_data = unpickle(filename)
if len(data) > 0:
data = np.vstack((data, batch_data['data']))
labels = np.hstack((labels, batch_data['labels']))
else:
data = batch_data['data']
labels = batch_data['labels']
print(np.shape(data), np.shape(labels))
data = clean(data)
data = data.astype(np.float32)
return names, data, labels
def conv_layer(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
conv_with_b = tf.nn.bias_add(conv, b)
conv_out = tf.nn.relu(conv_with_b)
return conv_out
def maxpool_layer(conv, k=2):
return tf.nn.max_pool(conv, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def model():
x_reshaped = tf.reshape(x, shape=[-1, 24, 24, 1])
conv_out1 = conv_layer(x_reshaped, W1, b1)
maxpool_out1 = maxpool_layer(conv_out1)
norm1 = tf.nn.lrn(maxpool_out1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
conv_out2 = conv_layer(norm1, W2, b2)
norm2 = tf.nn.lrn(conv_out2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
maxpool_out2 = maxpool_layer(norm2)
maxpool_reshaped = tf.reshape(maxpool_out2, [-1, W3.get_shape().as_list()[0]])
local = tf.add(tf.matmul(maxpool_reshaped, W3), b3)
local_out = tf.nn.relu(local)
out = tf.add(tf.matmul(local_out, W_out), b_out)
return out
names, data, labels = read_data('./cifar-10-batches-py')
x = tf.placeholder(tf.float32, [None, 24 * 24])
y = tf.placeholder(tf.float32, [None, len(names)])
W1 = tf.Variable(tf.random_normal([5, 5, 1, 64]))
b1 = tf.Variable(tf.random_normal([64]))
W2 = tf.Variable(tf.random_normal([5, 5, 64, 64]))
b2 = tf.Variable(tf.random_normal([64]))
W3 = tf.Variable(tf.random_normal([6*6*64, 1024]))
b3 = tf.Variable(tf.random_normal([1024]))
W_out = tf.Variable(tf.random_normal([1024, len(names)]))
b_out = tf.Variable(tf.random_normal([len(names)]))
model_op = model()
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model_op, labels=y))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
correct_pred = tf.equal(tf.argmax(model_op, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
onehot_labels = tf.one_hot(labels, len(names), on_value=1., off_value=0., axis=-1)
onehot_vals = sess.run(onehot_labels)
batch_size = len(data) // 200
print('batch size', batch_size)
for j in range(0, 1000):
avg_accuracy_val = 0
batch_count = 0
for i in range(0, len(data), batch_size):
batch_data = data[i:i+batch_size, :]
batch_onehot_vals = onehot_vals[i:i+batch_size, :]
_, accuracy_val = sess.run([train_op, accuracy], feed_dict={x: batch_data, y: batch_onehot_vals})
avg_accuracy_val += accuracy_val
batch_count += 1
avg_accuracy_val /= batch_count
print('Epoch {}. Avg accuracy {}'.format(j, avg_accuracy_val))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment