Skip to content

Instantly share code, notes, and snippets.

@mkakh
Last active September 19, 2019 15:13
Show Gist options
  • Save mkakh/925df7972e2b17fa81a74ebf416ff7ec to your computer and use it in GitHub Desktop.
Save mkakh/925df7972e2b17fa81a74ebf416ff7ec to your computer and use it in GitHub Desktop.
# datasetを読み込む
from keras.datasets import cifar100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
# 入力のリサイズ (InceptionV3の仕様のため)
import cv2
import numpy as np
from keras.utils import np_utils
#image_size = 299
image_size = 75
def resize(data):
return np.array([cv2.resize(img, dsize=(image_size, image_size)) for img in data])
x_train = resize(x_train)
y_train = np_utils.to_categorical(y_train)
x_test= resize(x_test)
y_test = np_utils.to_categorical(y_test)
# 学習するモデルを設定
from keras.applications import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
num_classes = 100
base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(image_size, image_size, 3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers[:249]:
layer.trainable = False
if layer.name.startswith('batch_normalization'):
layer.trainable = True
for layer in model.layers[249:]:
layer.trainable = True
# モデルのコンパイル
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 学習に関する設定と学習
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint
data_generator = ImageDataGenerator(
featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
rotation_range = 0,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True,
vertical_flip = False
)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
checkpoint = ModelCheckpoint(filepath = 'chpt.hdf5', monitor='val_loss', verbose=1, save_best_only=True)
history = model.fit_generator(
data_generator.flow(x_train, y_train, batch_size = 32),
steps_per_epoch = x_train.shape[0] // 32,
validation_data = (x_test, y_test),
verbose = 1,
callbacks=[early_stopping, checkpoint],
epochs=100
)
epochs = early_stopping.stopped_epoch
# 学習過程のグラフを出力
import matplotlib.pyplot as plt
def plot_history(history):
# accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['acc', 'val_acc'], loc='lower right')
plt.savefig('acc.png')
plt.clf()
# loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['loss', 'val_loss'], loc='lower right')
plt.savefig('loss.png')
plot_history(history)
# モデルの保存
model.save('model.h5')
# historyの保存
import json
with open('history.json', 'w') as f:
json.dump(history.history, f)
score = model.evaluate(x_test, y_test, verbose=1)
print('Loss:', score[0])
print('Accuracy:', score[1])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment