Skip to content

Instantly share code, notes, and snippets.

@iv4n-ga6l
Last active December 4, 2024 14:42
Show Gist options
  • Save iv4n-ga6l/1c02adffd610f29b69b2d61543b5056d to your computer and use it in GitHub Desktop.
Save iv4n-ga6l/1c02adffd610f29b69b2d61543b5056d to your computer and use it in GitHub Desktop.
Cat and Dog Image Classifier
# imports
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.losses import SparseCategoricalCrossentropy, BinaryCrossentropy
import os
import numpy as np
import matplotlib.pyplot as plt
# Get project files
!wget https://cdn.freecodecamp.org/project-data/cats-and-dogs/cats_and_dogs.zip
!unzip cats_and_dogs.zip
PATH = 'cats_and_dogs'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
test_dir = os.path.join(PATH, 'test')
# Get number of files in each directory. The train and validation directories
# each have the subdirecories "dogs" and "cats".
total_train = sum([len(files) for r, d, files in os.walk(train_dir)])
total_val = sum([len(files) for r, d, files in os.walk(validation_dir)])
total_test = len(os.listdir(test_dir))
# Variables for pre-processing and training.
batch_size = 128
epochs = 20
IMG_HEIGHT = 150
IMG_WIDTH = 150
# 3
train_image_generator = ImageDataGenerator(rescale=1./255)
validation_image_generator = ImageDataGenerator(rescale=1./255)
test_image_generator = ImageDataGenerator(rescale=1./255)
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=PATH, classes=['test'],
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode="input", shuffle=False)
# 4
def plotImages(images_arr, probabilities = False):
fig, axes = plt.subplots(len(images_arr), 1, figsize=(5,len(images_arr) * 3))
if probabilities is False:
for img, ax in zip( images_arr, axes):
ax.imshow(img)
ax.axis('off')
else:
for img, probability, ax in zip( images_arr, probabilities, axes):
ax.imshow(img)
ax.axis('off')
if probability > 0.5:
ax.set_title("%.2f" % (probability*100) + "% dog")
else:
ax.set_title("%.2f" % ((1-probability)*100) + "% cat")
plt.show()
sample_training_images, _ = next(train_data_gen)
plotImages(sample_training_images[:5])
# 5
train_image_generator = ImageDataGenerator(rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# 6
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
# 7
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(2))
model.compile(optimizer='adam', metrics=['accuracy'], loss=SparseCategoricalCrossentropy(from_logits=True))
model.summary()
# 8
history = model.fit(train_data_gen,
steps_per_epoch=train_data_gen.samples // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=val_data_gen.samples // batch_size)
# 9
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# 10
test_images, _ = next(test_data_gen)
# Convert probabilities to percentages
probabilities = np.argmax(model.predict(test_data_gen), axis=-1)
plotImages(test_images, probabilities=probabilities)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment