Created
May 19, 2017 19:33
-
-
Save bfarzin/87834c3d753c327d4864b6bee5a749ad to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
os.environ["CUDA_VISIBLE_DEVICES"]="1" | |
import tensorflow as tf | |
sess = tf.Session() | |
from keras import backend as K | |
K.set_session(sess) | |
img = tf.placeholder(tf.float32,shape=(None,784)) | |
from keras.layers import Dense | |
from keras.objectives import categorical_crossentropy | |
from tensorflow.examples.tutorials.mnist import input_data | |
# x = Dense(128,activation='relu')(img) | |
# x = Dense(128,activation='relu')(x) | |
# preds = Dense(10,activation='softmax')(x) | |
dense1 = tf.layers.dense(inputs=img,units=128,activation=tf.nn.relu) | |
dense2 = tf.layers.dense(inputs=dense1,units=128,activation=tf.nn.relu) | |
preds = tf.layers.dense(inputs=dense2,units=10,activation=tf.nn.softmax) | |
labels = tf.placeholder(tf.float32,shape=(None,10)) | |
loss = tf.reduce_mean(categorical_crossentropy(labels,preds)) | |
mnist_data = input_data.read_data_sets('MNIST_data',one_hot = True) # | |
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss) | |
#builds the graph. | |
sess.run(tf.global_variables_initializer()) | |
import time | |
t0 = time.time() | |
with sess.as_default(): | |
for i in range(10000): | |
batch = mnist_data.train.next_batch(50) | |
train_step.run(feed_dict={img:batch[0],labels:batch[1]}) | |
print time.time() - t0 #16.266, native tf: 15.91 | |
### Try with pre-loading the data onto the card | |
import numpy as np | |
load_size = 50 | |
num_loads = 10000 | |
all_X = np.zeros((num_loads*load_size,784)) #image | |
all_y = np.zeros((num_loads*load_size,10)) #classes | |
for i in range(num_loads): | |
batch = mnist_data.train.next_batch(load_size) | |
all_X[i*load_size:(i+1)*load_size,:] = batch[0] | |
all_y[i*load_size:(i+1)*load_size,:] = batch[1] | |
tf_data_X = tf.constant(all_X,dtype=tf.float32) | |
tf_data_y = tf.constant(all_y,dtype=tf.float32) | |
ix = tf.placeholder(shape=(),dtype=tf.int32) | |
batch_X = tf.slice(tf_data_X,[load_size*ix,0],[load_size,-1]) | |
batch_y = tf.slice(tf_data_y,[load_size*ix,0],[load_size,-1]) | |
## Keras Helpers | |
# x = Dense(128,activation='relu')(batch_X) | |
# x = Dense(128,activation='relu')(x) | |
# preds = Dense(10,activation='softmax')(x) | |
## pure TF | |
dense1 = tf.layers.dense(inputs=batch_X,units=128,activation=tf.nn.relu) | |
dense2 = tf.layers.dense(inputs=dense1,units=128,activation=tf.nn.relu) | |
preds = tf.layers.dense(inputs=dense2,units=10,activation=tf.nn.softmax) | |
loss = tf.reduce_mean(categorical_crossentropy(batch_y,preds)) | |
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss) | |
t0 = time.time() | |
with sess.as_default(): | |
sess.run(tf.global_variables_initializer()) | |
for i in range(1000): | |
b_x,b_y,step_update = sess.run([batch_X,batch_y,train_step],feed_dict={ix:i}) | |
print time.time() - t0 # 5.821, native tf: 5.909 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment