Last active
June 26, 2018 06:37
-
-
Save analyticsindiamagazine/4e3fe007ff0f3ba2257ac21958996b88 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"# Tensorflow MNIST- Intel i5-4210U" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"[name: \"/device:CPU:0\"\n", | |
"device_type: \"CPU\"\n", | |
"memory_limit: 268435456\n", | |
"locality {\n", | |
"}\n", | |
"incarnation: 2983841888649849212\n", | |
"]\n" | |
] | |
} | |
], | |
"source": [ | |
"import tensorflow\n", | |
"from tensorflow.python.client import device_lib\n", | |
"print(device_lib.list_local_devices())" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"WARNING:tensorflow:From <ipython-input-3-acd6a8829fd9>:14: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", | |
"WARNING:tensorflow:From c:\\users\\kishan maladkar\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please write your own downloading logic.\n", | |
"WARNING:tensorflow:From c:\\users\\kishan maladkar\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please use tf.data to implement this functionality.\n", | |
"Extracting data/MNIST/train-images-idx3-ubyte.gz\n", | |
"WARNING:tensorflow:From c:\\users\\kishan maladkar\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please use tf.data to implement this functionality.\n", | |
"Extracting data/MNIST/train-labels-idx1-ubyte.gz\n", | |
"WARNING:tensorflow:From c:\\users\\kishan maladkar\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please use tf.one_hot on tensors.\n", | |
"Extracting data/MNIST/t10k-images-idx3-ubyte.gz\n", | |
"Extracting data/MNIST/t10k-labels-idx1-ubyte.gz\n", | |
"WARNING:tensorflow:From c:\\users\\kishan maladkar\\appdata\\local\\programs\\python\\python36\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", | |
"WARNING:tensorflow:From <ipython-input-3-acd6a8829fd9>:148: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", | |
"Instructions for updating:\n", | |
"\n", | |
"Future major versions of TensorFlow will allow gradients to flow\n", | |
"into the labels input on backprop by default.\n", | |
"\n", | |
"See @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n", | |
"\n", | |
"Optimization Iteration: 1, Training Accuracy: 4.7%\n", | |
"Optimization Iteration: 101, Training Accuracy: 81.2%\n", | |
"Optimization Iteration: 201, Training Accuracy: 87.5%\n", | |
"Optimization Iteration: 301, Training Accuracy: 85.9%\n", | |
"Optimization Iteration: 401, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 501, Training Accuracy: 92.2%\n", | |
"Optimization Iteration: 601, Training Accuracy: 90.6%\n", | |
"Optimization Iteration: 701, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 801, Training Accuracy: 90.6%\n", | |
"Optimization Iteration: 901, Training Accuracy: 89.1%\n", | |
"Optimization Iteration: 1001, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 1101, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 1201, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 1301, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 1401, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 1501, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 1601, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 1701, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 1801, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 1901, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 2001, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 2101, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 2201, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 2301, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 2401, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 2501, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 2601, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 2701, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 2801, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 2901, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 3001, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 3101, Training Accuracy: 93.8%\n", | |
"Optimization Iteration: 3201, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 3301, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 3401, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 3501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 3601, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 3701, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 3801, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 3901, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 4001, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 4101, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 4201, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 4301, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 4401, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 4501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 4601, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 4701, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 4801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 4901, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 5001, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 5101, Training Accuracy: 92.2%\n", | |
"Optimization Iteration: 5201, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 5301, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 5401, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 5501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 5601, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 5701, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 5801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 5901, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 6001, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 6101, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 6201, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 6301, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 6401, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 6501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 6601, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 6701, Training Accuracy: 93.8%\n", | |
"Optimization Iteration: 6801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 6901, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 7001, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 7101, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 7201, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 7301, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 7401, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 7501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 7601, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 7701, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 7801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 7901, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 8001, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 8101, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 8201, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 8301, Training Accuracy: 96.9%\n", | |
"Optimization Iteration: 8401, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 8501, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 8601, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 8701, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 8801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 8901, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 9001, Training Accuracy: 95.3%\n", | |
"Optimization Iteration: 9101, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 9201, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 9301, Training Accuracy: 100.0%\n" | |
] | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Optimization Iteration: 9401, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 9501, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 9601, Training Accuracy: 98.4%\n", | |
"Optimization Iteration: 9701, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 9801, Training Accuracy: 100.0%\n", | |
"Optimization Iteration: 9901, Training Accuracy: 100.0%\n", | |
"Time usage: 0:27:52\n" | |
] | |
} | |
], | |
"source": [ | |
"import os\n", | |
"import sys\n", | |
"os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n", | |
"import numpy as np\n", | |
"import matplotlib.pyplot as plt \n", | |
"import tensorflow as tf\n", | |
"from sklearn.metrics import confusion_matrix\n", | |
"import time \n", | |
"from datetime import timedelta\n", | |
"import math\n", | |
"import pandas as pd\n", | |
"\n", | |
"from tensorflow.examples.tutorials.mnist import input_data\n", | |
"data = input_data.read_data_sets('data/MNIST/', one_hot=True)\n", | |
"\n", | |
"weight_matrix_size1 = 5 #5x5 pixcels\n", | |
"depth1 = 16 #16 depth\n", | |
"\n", | |
"weight_matrix_size2 = 5 #5x5 pixcels\n", | |
"depth2 = 32 #32 depth\n", | |
"\n", | |
"fully_conn_layer = 256 #neuros at end of fully connected layer\n", | |
"\n", | |
"#Data dimensions\n", | |
"\n", | |
"#We have an input image of 28 x 28 dimensions\n", | |
"img_size = 28\n", | |
"\n", | |
"# We have a one hot encoded matrix of length 28*28 = 784\n", | |
"img_size_flat = img_size * img_size\n", | |
"\n", | |
"#Shape of the image represented by\n", | |
"img_shape = (img_size,img_size)\n", | |
"\n", | |
"#Number of channels in the input image\n", | |
"num_channels = 1\n", | |
"\n", | |
"#Number of output classes to be trained on\n", | |
"num_classes = 10\n", | |
"\n", | |
"def weight_matrix(dimensions):\n", | |
" return tf.Variable(tf.truncated_normal(shape = dimensions, stddev=0.1))\n", | |
"def biases_matrix(length):\n", | |
" return tf.Variable(tf.constant(0.1,shape=[length]))\n", | |
"\n", | |
"#Helper functions for ConvNet\n", | |
"\n", | |
"def convolutional_layer(input, #The images\n", | |
" depth, #channels of the image\n", | |
" no_filters, #number of filters in the output\n", | |
" weight_matrix_size):\n", | |
" \n", | |
" dimensions = [weight_matrix_size,weight_matrix_size, depth, no_filters]\n", | |
" \n", | |
" weights = weight_matrix(dimensions)\n", | |
" \n", | |
" biases = biases_matrix(length=no_filters)\n", | |
" \n", | |
" layer = tf.nn.conv2d(input=input,\n", | |
" filter= weights,\n", | |
" strides=[1, 1, 1, 1], #stride 2\n", | |
" padding='SAME') #input size = output size\n", | |
" layer += biases\n", | |
" \n", | |
" layer = tf.nn.max_pool(value=layer,\n", | |
" ksize=[1, 2, 2, 1],\n", | |
" strides=[1, 2, 2, 1],\n", | |
" padding='SAME')\n", | |
" #Passing the pooled layer into ReLU Activation function\n", | |
" layer = tf.nn.relu(layer)\n", | |
" \n", | |
" return layer , weights\n", | |
"\n", | |
"# Helper function for Flattening the layer\n", | |
"\n", | |
"def flatten_layer(layer):\n", | |
" \n", | |
" layer_shape = layer.get_shape()\n", | |
" \n", | |
" num_features = layer_shape[1:4].num_elements()\n", | |
" \n", | |
" layer_flat = tf.reshape(layer,[-1,num_features])\n", | |
" \n", | |
" return layer_flat, num_features\n", | |
"\n", | |
"#Helper functions for activation and fully connected\n", | |
"\n", | |
"def fully_connected(input,num_inputs,\n", | |
" num_outputs,\n", | |
" use_relu = True):\n", | |
" weights = weight_matrix([num_inputs,num_outputs])\n", | |
" \n", | |
" biases = biases_matrix(length= num_outputs)\n", | |
" \n", | |
" layer = tf.matmul(input,weights) + biases\n", | |
" \n", | |
" if use_relu:\n", | |
" layer = tf.nn.relu(layer)\n", | |
" \n", | |
" return layer\n", | |
"\n", | |
"#Placeholder variables\n", | |
"\n", | |
"x = tf.placeholder(tf.float32,shape=[None,img_size_flat],name='x')\n", | |
"\n", | |
"x_image = tf.reshape(x, [-1,img_size,img_size,num_channels])\n", | |
"\n", | |
"y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name = 'y_true')\n", | |
"\n", | |
"y_true_cls = tf.argmax(y_true, axis=1)\n", | |
"\n", | |
"# Setting up the network\n", | |
"\n", | |
"layer_conv1 , weights_conv1 = convolutional_layer(input = x_image,\n", | |
" depth = num_channels,\n", | |
" weight_matrix_size = weight_matrix_size1,\n", | |
" no_filters = depth1)\n", | |
"\n", | |
"#layer_conv1 shape = (-1,14,14,16) and dtype = float32\n", | |
"\n", | |
"layer_conv2 , weights_conv2 = convolutional_layer(input = layer_conv1,\n", | |
" depth = depth1,\n", | |
" weight_matrix_size = weight_matrix_size2,\n", | |
" no_filters = depth2)\n", | |
"#layer_conv2 = shape=(?, 7, 7, 36) dtype=float32\n", | |
"\n", | |
"#Flattening the layer\n", | |
"\n", | |
"layer_flat , num_features = flatten_layer(layer_conv2)\n", | |
"\n", | |
"#Fully connected layers\n", | |
"\n", | |
"layer_fc1 = fully_connected(input = layer_flat,\n", | |
" num_inputs = num_features,\n", | |
" num_outputs = fully_conn_layer,\n", | |
" use_relu = True)\n", | |
"\n", | |
"layer_fc2 = fully_connected(input = layer_fc1,\n", | |
" num_inputs = fully_conn_layer,\n", | |
" num_outputs = num_classes,\n", | |
" use_relu = False)\n", | |
"\n", | |
"y_pred = tf.nn.softmax(layer_fc2)\n", | |
"\n", | |
"y_pred_cls = tf.argmax(y_pred , axis =1)\n", | |
"\n", | |
"cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,\n", | |
" labels=y_true)\n", | |
"cost = tf.reduce_mean(cross_entropy)\n", | |
"\n", | |
"#optimizing cost function\n", | |
"\n", | |
"optimizer = tf.train.AdamOptimizer(learning_rate= 1e-4).minimize(cost)\n", | |
"\n", | |
"correct_prediction = tf.equal(y_pred_cls, y_true_cls)\n", | |
"\n", | |
"accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", | |
"\n", | |
"#TensorFlow session \n", | |
"config = tf.ConfigProto(\n", | |
" device_count = {'gpu': 0}\n", | |
" )\n", | |
"session = tf.Session(config=config)\n", | |
"\n", | |
"session.run(tf.global_variables_initializer())\n", | |
"\n", | |
"train_batch_size = 64\n", | |
"\n", | |
"total_iterations = 0\n", | |
"\n", | |
"accuracy_ = tf.summary.scalar('accuracy_value', accuracy)\n", | |
"loss_ = tf.summary.scalar('loss_value', cost)\n", | |
"\n", | |
"def optimize(num_iterations):\n", | |
" \n", | |
" global total_iterations\n", | |
"\n", | |
" start_time = time.time()\n", | |
" \n", | |
" summary_op = tf.summary.merge_all()\n", | |
"\n", | |
" file_writer = tf.summary.FileWriter('/path/to/logs', session.graph)\n", | |
" for i in range(total_iterations,\n", | |
" total_iterations + num_iterations):\n", | |
" \n", | |
" x_batch, y_true_batch = data.train.next_batch(train_batch_size)\n", | |
" \n", | |
" feed_dict_train = {x: x_batch,\n", | |
" y_true: y_true_batch}\n", | |
" \n", | |
" session.run(optimizer, feed_dict=feed_dict_train)\n", | |
" \n", | |
" acc_value = session.run(accuracy_, feed_dict=feed_dict_train)\n", | |
" loss_value = session.run(loss_, feed_dict=feed_dict_train)\n", | |
" file_writer.add_summary(acc_value, i)\n", | |
" file_writer.add_summary(loss_value, i)\n", | |
" \n", | |
" \n", | |
" if i % 100 == 0:\n", | |
" \n", | |
" acc = session.run(accuracy, feed_dict=feed_dict_train)\n", | |
" \n", | |
" msg = \"Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}\"\n", | |
" \n", | |
" print(msg.format(i + 1, acc))\n", | |
" \n", | |
" total_iterations += num_iterations\n", | |
" \n", | |
" \n", | |
" # Ending time.\n", | |
" end_time = time.time()\n", | |
"\n", | |
" # Difference between start and end-times.\n", | |
" time_dif = end_time - start_time\n", | |
"\n", | |
" # Print the time-usage.\n", | |
" print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n", | |
" \n", | |
"optimize(num_iterations=10000)" | |
] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 3", | |
"language": "python", | |
"name": "python3" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.6.3" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 2 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment