Skip to content

Instantly share code, notes, and snippets.

View dipanjanS's full-sized avatar
:octocat:

Dipanjan (DJ) Sarkar dipanjanS

:octocat:
View GitHub Profile
TEST_BATCH_SIZE = 1
IMG_DIMS = (192, 192)
test_generator = test_datagen.flow_from_directory(directory=TEST_DIR,
classes=['brightpixel', 'narrowband',
'narrowbanddrd', 'noise',
'squarepulsednarrowband', 'squiggle',
'squigglesquarepulsednarrowband'],
target_size=IMG_DIMS,
batch_size=TEST_BATCH_SIZE,
class_mode='categorical',
Epoch 1/100
88/88 [=======] - 139s 2s/step - loss: 2.3190 - categorical_accuracy: 0.2356 - val_loss: 1.9669 - val_categorical_accuracy: 0.2614
Epoch 2/100
88/88 [=======] - 118s 1s/step - loss: 1.9840 - categorical_accuracy: 0.3255 - val_loss: 1.7577 - val_categorical_accuracy: 0.3500
Epoch 3/100
88/88 [=======] - 118s 1s/step - loss: 1.7720 - categorical_accuracy: 0.3823 - val_loss: 1.6397 - val_categorical_accuracy: 0.3886
...
...
Epoch 498/500
88/88 [=======] - 118s 1s/step - loss: 0.3285 - categorical_accuracy: 0.8928 - val_loss: 0.3917 - val_categorical_accuracy: 0.8729
class EpochModelSaver(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
self.model.save('resnet50_finetune_full_seti_epoch_{}.h5'.format(epoch+1))
ms_epoch = EpochModelSaver()
csv_logger = keras.callbacks.CSVLogger('resnet50_finetune_full_seti_log.csv', append=True, separator=',')
history3 = model.fit_generator(
train_generator,
steps_per_epoch=math.ceil(5600 / TRAIN_BATCH_SIZE),
INPUT_SHAPE = (192, 192, 3)
# load pre-trained resnet model
resnet = keras.applications.resnet50.ResNet50(include_top=False, weights='imagenet',
input_shape=INPUT_SHAPE)
# set all layers to be trainable
resnet.trainable = True
for layer in resnet.layers:
resnet.trainable = True
Epoch 1/100
88/88 [=======] - 147s 2s/step - loss: 2.0585 - categorical_accuracy: 0.1617 - val_loss: 1.8726 - val_categorical_accuracy: 0.3229
Epoch 2/100
88/88 [=======] - 137s 2s/step - loss: 1.8822 - categorical_accuracy: 0.2427 - val_loss: 1.7319 - val_categorical_accuracy: 0.4186
Epoch 3/100
88/88 [=======] - 137s 2s/step - loss: 1.7656 - categorical_accuracy: 0.3272 - val_loss: 1.5815 - val_categorical_accuracy: 0.4771
...
...
Epoch 98/100
88/88 [=======] - 137s 2s/step - loss: 0.4483 - categorical_accuracy: 0.8461 - val_loss: 0.4451 - val_categorical_accuracy: 0.8443
INPUT_SHAPE = (192, 192, 3)
# load pre-trained model
vgg = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet',
input_shape=INPUT_SHAPE)
# fine tune all layers
vgg.trainable = True
for layer in vgg.layers:
layer.trainable = True
history = history1
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Model Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
max_epoch = len(history.history['categorical_accuracy'])+1
epoch_list = list(range(1,max_epoch))
ax1.plot(epoch_list, history.history['categorical_accuracy'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_categorical_accuracy'], label='Validation Accuracy')
tick_range = np.arange(0, max_epoch+1, 20)
Epoch 1/100
88/88 [=======] - 109s 1s/step - loss: 1.6604 - categorical_accuracy: 0.3503 - val_loss: 1.1411 - val_categorical_accuracy: 0.5714
Epoch 2/100
88/88 [=======] - 89s 1s/step - loss: 1.1973 - categorical_accuracy: 0.5357 - val_loss: 1.0442 - val_categorical_accuracy: 0.5857
Epoch 3/100
88/88 [=======] - 94s 1s/step - loss: 0.9871 - categorical_accuracy: 0.6165 - val_loss: 0.8190 - val_categorical_accuracy: 0.6657
...
...
Epoch 98/100
88/88 [=======] - 93s 1s/step - loss: 0.4372 - categorical_accuracy: 0.8487 - val_loss: 0.4311 - val_categorical_accuracy: 0.8543
class EpochModelSaver(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
self.model.save('vgg19_finetune_partial_seti_epoch_{}.h5'.format(epoch+1))
ms_epoch = EpochModelSaver()
csv_logger = keras.callbacks.CSVLogger('vgg19_finetune_partial_seti_log.csv', append=True, separator=',')
history1 = model.fit_generator(
train_generator,
steps_per_epoch=math.ceil(5600 / TRAIN_BATCH_SIZE),
INPUT_SHAPE = (192, 192, 3)
# load the pre-trained model
vgg = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet',
input_shape=INPUT_SHAPE)
vgg.trainable = True
set_trainable = False
for layer in vgg.layers:
if layer.name in ['block5_conv1', 'block4_conv1']: