Created
September 18, 2018 09:28
-
-
Save MaryGeorgiou/5452beafbbac7deef6bfde503f18f3ce to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Libraries we will need | |
from __future__ import division | |
import numpy as np | |
import os | |
from skimage import data | |
import matplotlib.pyplot as plt | |
DATA_PATH = 'traffic/' | |
def load_data(dir): | |
directories = [d for d in os.listdir(dir) | |
if os.path.isdir(os.path.join(dir,d))] | |
labels = [] | |
images = [] | |
for d in directories: | |
label_directory = os.path.join(dir, d) | |
file_names = [os.path.join(label_directory, f) | |
for f in os.listdir(label_directory) | |
if f.endswith(".ppm")] | |
for f in file_names: | |
images.append(data.imread(f)) | |
labels.append(int(d)) | |
_labels = np.array(labels) | |
return images,_labels | |
from skimage import transform | |
from skimage.color import rgb2gray | |
# Image rescaling and dataset batching | |
images28 = [transform.resize(image, (28, 28)) for image in images] | |
images28 = np.array(images28) | |
images28 = rgb2gray(images28) | |
images28_batches = np.vsplit(images28,25) | |
labels_batches = np.split(labels,25) | |
print np.shape(images28_batches) | |
#Simple NN architecture | |
#Accuracy after 3 epochs 0.505158730159 | |
#define placeholder for our images and labels | |
x = tf.placeholder(dtype = tf.float32, shape = [None, 28, 28]) | |
y = tf.placeholder(dtype = tf.int32, shape = [None]) | |
#layer 1 -> flat an image to 1D vector | |
images_flat = tf.contrib.layers.flatten(x) | |
#Pass layer 1 answer to a fully connected layer with relu activations | |
logits = tf.contrib.layers.fully_connected(images_flat, 62, tf.nn.relu) | |
#Compute the loss | |
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y, logits = logits)) | |
#Minimize it with an optimizer | |
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) | |
#Give me back the correct prediction for each training example. Remember the output is a 62 position vector | |
#with a probability distribution over the labels | |
correct_pred = tf.argmax(logits, 1) | |
# Compute accuracy for training | |
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) | |
print("images_flat: ", images_flat) | |
print("logits: ", logits) | |
print("loss: ", loss) | |
print("predicted_labels: ", correct_pred) | |
#Convolutional architecture | |
#Accuracy after three epochs Accuracy: 0.922 | |
#define placeholder for our images and labels | |
x = tf.placeholder(dtype = tf.float32, shape = [None, 28, 28]) | |
y = tf.placeholder(dtype = tf.int32, shape = [None]) | |
conv1_weights = tf.Variable(tf.truncated_normal([2, 2, 1, 32], | |
stddev=0.1,seed=tf.set_random_seed(1234), | |
dtype=tf.float32)) | |
conv1_biases = tf.Variable(tf.zeros([32], dtype=tf.float32)) | |
#this time we do not make our image flat, but let it as it is. | |
input_layer = tf.reshape(x, [-1, 28, 28, 1]) | |
# Convolutional Layer #1 | |
conv1 = tf.layers.conv2d( | |
inputs=input_layer, | |
filters=32, | |
kernel_size=[5, 5], | |
padding="same", | |
activation=tf.nn.relu) | |
# Pooling Layer #1 | |
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) | |
# Convolutional Layer #2 and Pooling Layer #2 | |
conv2 = tf.layers.conv2d( | |
inputs=pool1, | |
filters=64, | |
kernel_size=[5, 5], | |
padding="same", | |
activation=tf.nn.relu) | |
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) | |
# Dense Layer | |
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) | |
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) | |
dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=True) | |
# Logits Layer | |
logits = tf.layers.dense(inputs=dropout, units=62) | |
#Finally I flatten the image and pass it to a full dense layer so it can give me my probabily distribution | |
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = y, logits =logits)) | |
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) | |
correct_pred = tf.argmax(logits, 1) | |
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) | |
print("images_flat: ", images_flat) | |
print("logits: ", logits) | |
print("loss: ", loss) | |
print("predicted_labels: ", correct_pred) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment