Last active
January 30, 2017 04:44
-
-
Save hammeiam/d2111fc7e80655ada9616e68dc88278d to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# All Imports | |
import pickle | |
import csv | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import cv2 | |
from math import floor, ceil | |
from sklearn.model_selection import ShuffleSplit | |
from sklearn.utils import shuffle | |
from sklearn.preprocessing import LabelBinarizer | |
import tensorflow as tf | |
from tensorflow.contrib.layers import flatten | |
# load data | |
training_file = './traffic-signs-data/train.p' | |
testing_file = './traffic-signs-data/test.p' | |
sign_name_mapping_file = './signnames.csv' | |
with open(training_file, mode='rb') as f: | |
train = pickle.load(f) | |
with open(testing_file, mode='rb') as f: | |
test = pickle.load(f) | |
# Global Vars | |
X_train, y_train = train['features'], train['labels'] | |
X_test, y_test = test['features'], test['labels'] | |
TRAINING_SIZE = len(y_train) | |
TESTING_SIZE = len(y_test) | |
EPOCHS = 30 | |
BATCH_SIZE = 128 | |
DROPOUT = .5 | |
TRAINING_STEPS = TRAINING_SIZE // BATCH_SIZE | |
LABELS_SIZE = len(set([*y_train, *y_test])) | |
# tf Vars | |
x = tf.placeholder(tf.float32, (None, 32, 32, 3)) | |
# Classify over 43 classes of traffic sign | |
y = tf.placeholder(tf.float32, (None, LABELS_SIZE)) | |
keep_prob = tf.placeholder(tf.float32) | |
# helper fns | |
def normalize_images(image_data): | |
input_min = 0 | |
input_max = 255 | |
scaled_min = -0.5 | |
scaled_max = 0.5 | |
return scaled_min + (((image_data - input_min)*(scaled_max - scaled_min))/(input_max - input_min)) | |
def preprocess(x_data, y_data): | |
x_data = normalize_images(x_data) | |
x_data, y_data = shuffle(x_data, y_data) | |
label_binarizer = LabelBinarizer() | |
y_data = label_binarizer.fit_transform(y_data) | |
return x_data, y_data | |
# define the network | |
def simplenet(x, dropout): | |
conv1 = tf.reshape(x, (-1, 32, 32, 3)) | |
# 3x3 Convolution, pool, dropout, relu | |
conv1_output_depth = 32 | |
conv1_weight = tf.Variable(tf.truncated_normal([3, 3, 3, conv1_output_depth])) | |
conv1_bias = tf.Variable(tf.zeros(conv1_output_depth)) | |
conv1 = tf.nn.conv2d(conv1, conv1_weight, strides=[1, 1, 1, 1], padding='VALID') | |
conv1 = tf.nn.bias_add(conv1, conv1_bias) | |
conv1_pooled = tf.nn.max_pool( | |
conv1, | |
ksize=[1, 2, 2, 1], | |
strides=[1, 2, 2, 1], | |
padding='VALID') | |
conv1_dropped = tf.nn.dropout(conv1_pooled, dropout) | |
conv1_activated = tf.nn.relu(conv1_dropped) | |
# Flatten for fully connected layer | |
conv1_flattened = flatten(conv1_activated) | |
# fully connected layer 1, relu | |
fc1_output_size = 128 | |
fc1_shape = (conv1_flattened.get_shape().as_list()[-1], fc1_output_size) | |
fc1_weight = tf.Variable(tf.truncated_normal(shape=fc1_shape)) | |
fc1_bias = tf.Variable(tf.zeros(fc1_output_size)) | |
fc1 = tf.matmul(conv1_flattened, fc1_weight) | |
fc1 = tf.nn.bias_add(fc1, fc1_bias) | |
fc1_activated = tf.nn.relu(fc1) | |
# fully connected layer 2 | |
fc2_output_size = 43 | |
fc2_weight = tf.Variable(tf.truncated_normal(shape=(fc1_output_size, fc2_output_size))) | |
fc2_bias = tf.Variable(tf.zeros(fc2_output_size)) | |
fc2 = tf.matmul(fc1_activated, fc2_weight) | |
fc2 = tf.nn.bias_add(fc2, fc2_bias) | |
return fc2 # softmax applied later | |
# preprocess training & test data | |
X_train, y_train = preprocess(X_train, y_train) | |
X_test, y_test = preprocess(X_test, y_test) | |
# define prediction, cost and optimizer | |
pred = simplenet(x, keep_prob) | |
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) | |
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost) | |
# Evaluate model | |
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) | |
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) | |
# Initializing the variables | |
init = tf.global_variables_initializer() | |
with tf.Session() as sess: | |
sess.run(init) | |
# Keep training until reach max iterations | |
for epoch in range(EPOCHS): | |
print("Epoch", epoch) | |
for step in range(TRAINING_STEPS): | |
step_idx_start = step * BATCH_SIZE | |
step_idx_end = step_idx_start + BATCH_SIZE | |
batch_x = X_train[step_idx_start:step_idx_end] | |
batch_y = y_train[step_idx_start:step_idx_end] | |
# Run optimization op (backprop) | |
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, | |
keep_prob: DROPOUT}) | |
if step % 10 == 0: | |
# Calculate batch loss and accuracy | |
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, | |
y: batch_y, | |
keep_prob: 1.}) | |
print("Iter " + str(step) + ", Minibatch Loss= " + \ | |
"{:.6f}".format(loss) + ", Training Accuracy= " + \ | |
"{:.5f}".format(acc)) | |
print("Optimization Finished!") | |
# Calculate accuracy for test data | |
print("Testing Accuracy:", \ | |
sess.run(accuracy, feed_dict={x: X_test, | |
y: y_test, | |
keep_prob: 1.})) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment