Created
September 23, 2016 17:34
-
-
Save ypwhs/64b3c8c80157d502987bc59d5e6a1ac3 to your computer and use it in GitHub Desktop.
tflearn vs tensorflow
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Extracting mnist/train-images-idx3-ubyte.gz\n" | |
] | |
}, | |
{ | |
"name": "stderr", | |
"output_type": "stream", | |
"text": [ | |
"/usr/local/Cellar/python/2.7.12/Frameworks/Python.framework/Versions/2.7/lib/python2.7/gzip.py:275: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future\n", | |
" chunk = self.extrabuf[offset: offset + size]\n", | |
"/usr/local/lib/python2.7/site-packages/tflearn/datasets/mnist.py:52: VisibleDeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future\n", | |
" data = data.reshape(num_images, rows, cols, 1)\n" | |
] | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"Extracting mnist/train-labels-idx1-ubyte.gz\n", | |
"Extracting mnist/t10k-images-idx3-ubyte.gz\n", | |
"Extracting mnist/t10k-labels-idx1-ubyte.gz\n" | |
] | |
} | |
], | |
"source": [ | |
"import numpy as np\n", | |
"import tensorflow as tf\n", | |
"import tflearn.datasets.mnist as mnist\n", | |
"X, Y, testX, testY = mnist.load_data(one_hot=True)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [], | |
"source": [ | |
"batch_size = 128\n", | |
"image_size = 28\n", | |
"num_labels = 10\n", | |
"\n", | |
"def accuracy(predictions, labels):\n", | |
" return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])\n", | |
"\n", | |
"graph = tf.Graph()\n", | |
"with graph.as_default():\n", | |
" # Input data. For the training data, we use a placeholder that will be fed\n", | |
" # at run time with a training minibatch.\n", | |
" tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n", | |
" tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n", | |
" tf_test_dataset = tf.constant(testX)\n", | |
"\n", | |
" # Variables.\n", | |
" hide = 512\n", | |
" w1 = tf.Variable(tf.truncated_normal([image_size*image_size, hide]))\n", | |
" b1 = tf.Variable(tf.truncated_normal([hide]))\n", | |
" w2 = tf.Variable(tf.truncated_normal([hide, hide]))\n", | |
" b2 = tf.Variable(tf.truncated_normal([hide]))\n", | |
" wo = tf.Variable(tf.truncated_normal([hide, num_labels]))\n", | |
" bo = tf.Variable(tf.truncated_normal([num_labels]))\n", | |
"\n", | |
" # Training computation.\n", | |
" def calc(out):\n", | |
" out = tf.nn.dropout(tf.nn.relu(tf.matmul(out, w1) + b1), 0.8)\n", | |
" out = tf.nn.dropout(tf.nn.relu(tf.matmul(out, w2) + b2), 0.8)\n", | |
" out = tf.matmul(out, wo) + bo\n", | |
" return out\n", | |
" \n", | |
" train_out = calc(tf_train_dataset)\n", | |
" cross_entropy = tf.nn.softmax_cross_entropy_with_logits(train_out, tf_train_labels)\n", | |
" \n", | |
" loss = tf.reduce_mean(cross_entropy)\n", | |
" # Optimizer.\n", | |
"# optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(loss)\n", | |
"# optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)\n", | |
" optimizer = tf.train.RMSPropOptimizer(0.001).minimize(loss)\n", | |
"\n", | |
" # Predictions for the training, validation, and test data.\n", | |
" train_prediction = tf.nn.softmax(train_out)\n", | |
" test_prediction = tf.nn.softmax(calc(tf_test_dataset))\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"metadata": { | |
"collapsed": false | |
}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"6865\n", | |
"Initialized\n", | |
"epoch: 0\t loss:2854.744629\t train: 15.6%\t test: 11.1%\n", | |
"epoch: 1\t loss:250.781906\t train: 76.6%\t test: 83.7%\n", | |
"epoch: 2\t loss:131.724091\t train: 82.8%\t test: 87.5%\n", | |
"epoch: 3\t loss:42.656372\t train: 89.1%\t test: 89.0%\n", | |
"epoch: 4\t loss:46.167946\t train: 93.0%\t test: 89.7%\n", | |
"epoch: 5\t loss:27.534512\t train: 89.8%\t test: 91.1%\n", | |
"epoch: 6\t loss:6.486470\t train: 90.6%\t test: 92.0%\n", | |
"epoch: 7\t loss:15.046724\t train: 93.8%\t test: 92.0%\n", | |
"epoch: 8\t loss:2.157014\t train: 98.4%\t test: 92.4%\n", | |
"epoch: 9\t loss:9.607678\t train: 93.0%\t test: 92.5%\n", | |
"epoch: 10\t loss:1.950438\t train: 96.9%\t test: 93.1%\n", | |
"epoch: 11\t loss:9.402186\t train: 94.5%\t test: 93.3%\n", | |
"epoch: 12\t loss:6.830947\t train: 95.3%\t test: 93.3%\n", | |
"epoch: 13\t loss:4.793999\t train: 95.3%\t test: 93.3%\n", | |
"epoch: 14\t loss:2.508249\t train: 96.9%\t test: 93.5%\n", | |
"epoch: 15\t loss:5.369869\t train: 96.9%\t test: 93.4%\n", | |
"epoch: 16\t loss:6.801050\t train: 95.3%\t test: 94.2%\n" | |
] | |
} | |
], | |
"source": [ | |
"n = X.shape[0]\n", | |
"epoch = n / batch_size\n", | |
"num_steps = 16 * epoch + 1\n", | |
"print(num_steps)\n", | |
"\n", | |
"with tf.Session(graph=graph) as session:\n", | |
" tf.initialize_all_variables().run()\n", | |
" print(\"Initialized\")\n", | |
" for step in range(num_steps):\n", | |
" offset = (step * batch_size) % (n - batch_size)\n", | |
" batch_data = X[offset:(offset + batch_size), :]\n", | |
" batch_labels = Y[offset:(offset + batch_size), :]\n", | |
" feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n", | |
" _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n", | |
" if (step % epoch == 0):\n", | |
" train = accuracy(predictions, batch_labels)\n", | |
" test = accuracy(test_prediction.eval(), testY)\n", | |
" print(\"epoch: %d\\t loss:%f\\t train: %.1f%%\\t test: %.1f%%\" % (step/epoch, l, train, test))\n" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": { | |
"collapsed": true | |
}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 2", | |
"language": "python", | |
"name": "python2" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 2 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython2", | |
"version": "2.7.12" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 1 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment