Last active
March 8, 2018 09:47
-
-
Save maxgfr/4acfecbd0995127f88c7b322825eba4f to your computer and use it in GitHub Desktop.
CNN on tensorflow
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
From Jeremie :) | |
""" | |
from __future__ import print_function | |
import numpy | |
import tensorflow as tf | |
rng = numpy.random | |
# Parameters | |
learning_rate = 0.01 | |
training_epochs = 100 | |
display_step = 5 | |
batch_size = 2 | |
data_a = numpy.asarray([ | |
[0, 0, 0], | |
[1, 1, 1], | |
[2, 2, 2], | |
[3, 3, 3], | |
[4, 4, 4], | |
[5, 5, 5], | |
[6, 6, 6], | |
[7, 7, 7], | |
[8, 8, 8], | |
[9, 9, 9], | |
]) | |
data_y = numpy.asarray([ | |
[0, 1], | |
[0, 1], | |
[0, 1], | |
[0, 1], | |
[0, 1], | |
[1, 0], | |
[1, 0], | |
[1, 0], | |
[1, 0], | |
[1, 0] | |
]) | |
n_hidden_1 = 10 # 1st layer number of features | |
n_hidden_2 = 10 # 2nd layer number of features | |
n_input = 3 # MNIST data input (img shape: 28*28) | |
n_classes = 2 # MNIST total classes (0-9 digits) | |
X = tf.placeholder(tf.float32, [None, n_input]) # 3 features | |
Y = tf.placeholder(tf.float32, [None, n_classes]) # binary classification | |
W = tf.Variable(tf.random_normal(shape=(3, 2), mean=1, stddev=0.1), dtype=tf.float32) | |
b = tf.Variable(tf.zeros([2])) | |
def mlp(x, weights, biases): | |
# Hidden layer with RELU activation | |
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) | |
layer_1 = tf.nn.relu(layer_1) | |
# Hidden layer with RELU activation | |
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) | |
layer_2 = tf.nn.relu(layer_2) | |
# Output layer with linear activation | |
out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] | |
return out_layer | |
weights = { | |
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), | |
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), | |
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes])) | |
} | |
biases = { | |
'b1': tf.Variable(tf.random_normal([n_hidden_1])), | |
'b2': tf.Variable(tf.random_normal([n_hidden_2])), | |
'out': tf.Variable(tf.random_normal([n_classes])) | |
} | |
prediction = mlp(X, weights, biases) | |
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=Y)) | |
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) | |
init = tf.global_variables_initializer() | |
# Launch the graph | |
with tf.Session() as sess: | |
sess.run(init) | |
# Training cycle | |
for step in range(training_epochs): | |
offset = (step * batch_size) % (data_y.shape[0] - batch_size) | |
batch_data = data_a[offset:(offset + batch_size), :] | |
batch_labels = data_y[offset:(offset + batch_size)] | |
feed_dict = {X: batch_data, Y: batch_labels} | |
_, c = sess.run([optimizer, cost], feed_dict=feed_dict) | |
print("Optimization Finished!") | |
# Test model | |
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) | |
# Calculate accuracy | |
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
print("Accuracy:", accuracy.eval({X: data_a, Y: data_y})) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment