Skip to content

Instantly share code, notes, and snippets.

@cenkbircanoglu
Last active September 20, 2017 15:29
Show Gist options
  • Save cenkbircanoglu/5f3899cc3241d23296acb5aed9067ee6 to your computer and use it in GitHub Desktop.
Save cenkbircanoglu/5f3899cc3241d23296acb5aed9067ee6 to your computer and use it in GitHub Desktop.
# -*- coding: utf-8 -*-
""" Convolutional Neural Network for MNIST dataset classification task.
References:
Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""
from __future__ import division, print_function, absolute_import
import tflearn
from sklearn import svm
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, l2_normalize
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist
import tensorflow as tf
model_name = "test.h5"
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1]) # [:1000, ]
testX = testX.reshape([-1, 28, 28, 1]) # [:1000, ]
Y = Y # [:1000, ]
testY = testY # [:1000, ]
with tf.Graph().as_default():
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
embedding = l2_normalize(network, dim=1, name="embedding")
network = fully_connected(network, 10, activation="softmax")
network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy',
metric="accuracy", name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': X}, {'target': Y}, n_epoch=1, validation_set=({'input': testX}, {'target': testY}),
batch_size=256, run_id='convnet_mnist')
m2 = tflearn.DNN(embedding, session=model.session)
train_embeddings = m2.predict(X)
print(train_embeddings.shape)
clf = svm.SVC()
clf.fit(train_embeddings, Y.argmax(axis=1))
test_embeddings = m2.predict(testX)
print(clf.score(test_embeddings, testY.argmax(axis=1)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment