Last active
December 8, 2021 09:35
-
-
Save ozgurshn/f2081691731f993bdf70fcad77cf5211 to your computer and use it in GitHub Desktop.
Resnet50 finetuning and saving model as CoreML model.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from keras.applications.resnet50 import ResNet50 | |
from keras.models import Model,load_model | |
from keras.layers import Dense,GlobalAveragePooling2D,Input | |
from keras.preprocessing.image import ImageDataGenerator | |
from keras.optimizers import Adadelta | |
import keras | |
import math, os, sys | |
import matplotlib.pyplot as plt | |
import coremltools | |
def get_model(): | |
input_tensor = Input(shape=(224, 224, 3)) # this assumes K.image_data_format() == 'channels_last' | |
# create the base pre-trained model | |
base_model = ResNet50(input_tensor=input_tensor,weights='imagenet',include_top=False) | |
for layer in base_model.layers: | |
layer.trainable=False | |
x = base_model.output | |
x = GlobalAveragePooling2D(data_format='channels_last')(x) | |
x = Dense(num_classes, activation='softmax')(x) | |
updatedModel = Model(base_model.input, x) | |
return updatedModel | |
def compile_model(compiledModel): | |
compiledModel.compile(loss=keras.losses.categorical_crossentropy, | |
optimizer=Adadelta(), | |
metrics=['accuracy']) | |
def modelFitGenerator(fitModel): | |
num_train_samples = sum([len(files) for r, d, files in os.walk(train_data_dir)]) | |
num_valid_samples = sum([len(files) for r, d, files in os.walk(validation_data_dir)]) | |
num_train_steps = math.floor(num_train_samples/batch_size) | |
num_valid_steps = math.floor(num_valid_samples/batch_size) | |
train_datagen = ImageDataGenerator( | |
rotation_range=90, | |
horizontal_flip=True, | |
vertical_flip=True, | |
zoom_range=0.4) | |
test_datagen = ImageDataGenerator() | |
train_generator = train_datagen.flow_from_directory( | |
train_data_dir, | |
target_size=image_size , | |
batch_size=batch_size, | |
class_mode='categorical', shuffle=True | |
) | |
validation_generator = test_datagen.flow_from_directory( | |
validation_data_dir, | |
target_size=image_size , | |
batch_size=batch_size, | |
class_mode='categorical', shuffle=True | |
) | |
print("start history model") | |
history = fitModel.fit_generator( | |
train_generator, | |
steps_per_epoch=num_train_steps, | |
epochs=nb_epoch, | |
validation_data=validation_generator, | |
validation_steps=num_valid_steps) | |
printGraph(history) | |
def printGraph(history): | |
plt.plot(history.history['acc']) | |
plt.plot(history.history['val_acc']) | |
plt.title('model accuracy') | |
plt.ylabel('accuracy') | |
plt.xlabel('epoch') | |
plt.legend(['train', 'test'], loc='upper left') | |
plt.show() | |
# summarize history for loss | |
plt.plot(history.history['loss']) | |
plt.plot(history.history['val_loss']) | |
plt.title('model loss') | |
plt.ylabel('loss') | |
plt.xlabel('epoch') | |
plt.legend(['train', 'test'], loc='upper left') | |
plt.show() | |
def saveCoreMLModel(kerasModel): | |
coreml_model = coremltools.converters.keras.convert(kerasModel, | |
input_names=['input'], | |
output_names=['probs'], | |
image_input_names='input', | |
predicted_feature_name='predictedMoney', | |
class_labels = 'drive/Resnet/labels.txt') | |
coreml_model.save('resnet50custom.mlmodel') | |
print('CoreML model saved') | |
def main(): | |
model = get_model() | |
compile_model(model) | |
modelFitGenerator(model) | |
saveCoreMLModel(model) | |
if __name__ == '__main__': | |
# constants | |
image_size = (224, 224) | |
train_data_dir = 'drive/Resnet/dataset/train' | |
validation_data_dir = 'drive/Resnet/dataset/test' | |
nb_epoch = 50 | |
batch_size = 16 | |
num_classes = 6 | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment