Created
July 8, 2018 17:34
-
-
Save adityajn105/815c94c7fa001d86016d75b9998166c1 to your computer and use it in GitHub Desktop.
Classification of MNIST Digits dataset using CNN using Keras
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from keras.datasets import mnist | |
from keras.models import Sequential | |
from keras.layers import Dense,Conv2D,MaxPooling2D | |
from keras.layers import Flatten,Dropout,Activation | |
#create a model | |
model = Sequential() | |
model.add(Conv2D(64,(5,5),input_shape=(28,28,1),data_format="channels_last",strides=(1,1),padding="valid",activation="relu")) | |
model.add(MaxPooling2D(pool_size=(2,2))) | |
model.add(Conv2D(64,(3,3),activation="relu",padding="valid")) | |
model.add(MaxPooling2D(pool_size=(2,2))) | |
model.add(Dropout(0.25)) | |
model.add(Flatten()) | |
model.add(Dense(256,activation="relu")) | |
model.add(Dropout(0.2)) | |
model.add(Dense(64,activation="relu")) | |
model.add(Dropout(0.2)) | |
model.add(Dense(10,activation="softmax")) | |
from keras import losses | |
from keras import optimizers | |
sgd = optimizers.SGD(lr=0.01,decay=0.001,momentum=0.9,nesterov=True) | |
adam = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.001, amsgrad=True) | |
model.compile(loss=losses.categorical_crossentropy,optimizer=optimizers.Adadelta(),metrics=['accuracy']) | |
print(model.summary()) | |
(trainData,trainLabels),(testData,testLabels)=mnist.load_data() | |
#normalizaing image pixels | |
trainData = trainData.reshape(len(trainData),28,28,1).astype('float32')/255 | |
testData = testData.reshape(len(testData),28,28,1).astype('float32')/255 | |
from keras.utils import np_utils | |
mTrainLabels = np_utils.to_categorical(trainLabels, 10) | |
mTestLabels = np_utils.to_categorical(testLabels, 10) | |
batch_size=128 | |
epochs=10 | |
model.fit(trainData, | |
mTrainLabels, | |
validation_data=(testData,mTestLabels), | |
batch_size=batch_size, | |
verbose=1, | |
epochs=epochs | |
) | |
score = model.evaluate(testData,mTestLabels,verbose=1) | |
print("Test Loss: {}".format(score[0])) | |
print("Test Accuracy: {}".format(score[1])) | |
#Best of 10 epochs | |
""" | |
all activations are relu and output layer has softmax/ loss is categorical_crossentropy | |
0.9894 using conv(64,(5,5)),conv(64,(3,3)),128 hidden layers and sgd(lr=0.01,decay=0.001), batch_size=150 | |
0.9935 using conv(64,(5,5)),conv(64,(3,3)),128 hidden layers and adam(lr=0.015,decay=0.002), batch_size=200, amsgrad=True | |
0.9923 using conv(64,(5,5)),conv(64,(3,3)),256,64 hidden layers and adam(lr=0.01,decay=0.001), batch_size=128, amsgrad=True | |
Time = 200s*10 epochs = 2000s = 33 min approx | |
0.9918 using conv(64,(5,5)),conv(64,(3,3)),256,64 hidden layers and Adadelta, batch_size=128 | |
""" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment