Created
April 29, 2017 13:44
-
-
Save skyer9/d4cc48e7e0182f198a8b8d1a388f248f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py | |
'''Trains a simple convnet on the MNIST dataset. | |
Gets to 99.25% test accuracy after 12 epochs | |
(there is still a lot of margin for parameter tuning). | |
16 seconds per epoch on a GRID K520 GPU. | |
''' | |
from __future__ import print_function | |
import numpy as np | |
from keras.datasets import mnist | |
from keras.models import Sequential | |
from keras.layers import Dense, Dropout, Activation, Flatten | |
from keras.layers import Conv2D, MaxPooling2D | |
from keras.utils import np_utils | |
from keras import backend as K | |
from keras.models import model_from_json | |
import os | |
np.random.seed(1337) # for reproducibility | |
batch_size = 128 | |
nb_classes = 10 | |
nb_epoch = 12 | |
img_rows, img_cols = 28, 28 | |
nb_filters = 32 | |
pool_size = (2, 2) | |
kernel_size = (3, 3) | |
# ============================================================================== | |
def build_model(X_train, Y_train, X_test, Y_test): | |
model = Sequential() | |
model.add(Conv2D(nb_filters, | |
kernel_size, | |
padding='valid', | |
input_shape=input_shape)) | |
model.add(Activation('relu')) | |
model.add(Conv2D(nb_filters, kernel_size)) | |
model.add(Activation('relu')) | |
model.add(MaxPooling2D(pool_size=pool_size)) | |
model.add(Dropout(0.25)) | |
model.add(Flatten()) | |
model.add(Dense(128)) | |
model.add(Activation('relu')) | |
model.add(Dropout(0.5)) | |
model.add(Dense(nb_classes)) | |
model.add(Activation('softmax')) | |
model.compile(loss='categorical_crossentropy', | |
optimizer='adadelta', | |
metrics=['accuracy']) | |
model.fit(X_train, Y_train, | |
batch_size=batch_size, | |
epochs=nb_epoch, | |
verbose=1, | |
validation_data=(X_test, Y_test)) | |
return model | |
# ============================================================================== | |
# load data | |
(X_train, y_train), (X_test, y_test) = mnist.load_data() | |
if K.image_dim_ordering() == 'th': | |
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols) | |
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols) | |
input_shape = (1, img_rows, img_cols) | |
else: | |
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) | |
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) | |
input_shape = (img_rows, img_cols, 1) | |
X_train = X_train.astype('float32') | |
X_test = X_test.astype('float32') | |
X_train /= 255 | |
X_test /= 255 | |
print('X_train shape:', X_train.shape) | |
print(X_train.shape[0], 'train samples') | |
print(X_test.shape[0], 'test samples') | |
Y_train = np_utils.to_categorical(y_train, nb_classes) | |
Y_test = np_utils.to_categorical(y_test, nb_classes) | |
# ============================================================================== | |
# build or load model | |
if os.path.exists('model.json'): | |
with open("model.json", "r") as json_file: | |
loaded_model_json = json_file.read() | |
model = model_from_json(loaded_model_json) | |
model.load_weights("model.h5") | |
model.compile(loss='categorical_crossentropy', | |
optimizer='adadelta', | |
metrics=['accuracy']) | |
print('model loaded...') | |
else: | |
model = build_model(X_train, Y_train, X_test, Y_test) | |
model_json = model.to_json() | |
with open("model.json", "w") as json_file: | |
json_file.write(model_json) | |
model.save_weights("model.h5") | |
print('model saved...') | |
# ============================================================================== | |
# evaluate | |
score = model.evaluate(X_test, Y_test, verbose=0) | |
print('Test score:', score[0]) | |
print('Test accuracy:', score[1]) | |
# fix tensorflow bug. | |
K.clear_session() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment