-
-
Save mgomes/fe71b80d409b5fe732bbc79ce15b428b to your computer and use it in GitHub Desktop.
| import tensorflow as tf | |
| import os | |
| import zipfile | |
| DESIRED_ACCURACY = 0.999 | |
| !wget --no-check-certificate \ | |
| "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/happy-or-sad.zip" \ | |
| -O "/tmp/happy-or-sad.zip" | |
| zip_ref = zipfile.ZipFile("/tmp/happy-or-sad.zip", 'r') | |
| zip_ref.extractall("/tmp/h-or-s") | |
| zip_ref.close() | |
| class myCallback(tf.keras.callbacks.Callback): | |
| def on_epoch_end(self, epoch, logs={}): | |
| if(logs.get('acc')>DESIRED_ACCURACY): | |
| print("\nReached 99.9% accuracy so cancelling training!") | |
| self.model.stop_training = True | |
| callbacks = myCallback() | |
| model = tf.keras.models.Sequential([ | |
| tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)), | |
| tf.keras.layers.MaxPooling2D(2, 2), | |
| tf.keras.layers.Conv2D(32, (3,3), activation='relu'), | |
| tf.keras.layers.MaxPooling2D(2,2), | |
| tf.keras.layers.Conv2D(32, (3,3), activation='relu'), | |
| tf.keras.layers.MaxPooling2D(2,2), | |
| tf.keras.layers.Flatten(), | |
| tf.keras.layers.Dense(512, activation='relu'), | |
| tf.keras.layers.Dense(1, activation='sigmoid') | |
| ]) | |
| from tensorflow.keras.optimizers import RMSprop | |
| model.compile(loss='binary_crossentropy', | |
| optimizer=RMSprop(lr=0.001), | |
| metrics=['acc']) | |
| from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
| train_datagen = ImageDataGenerator(rescale=1/255) | |
| train_generator = train_datagen.flow_from_directory( | |
| "/tmp/h-or-s", | |
| target_size=(150, 150), | |
| batch_size=10, | |
| class_mode='binary') | |
| # Expected output: 'Found 80 images belonging to 2 classes' | |
| history = model.fit_generator( | |
| train_generator, | |
| steps_per_epoch=2, | |
| epochs=15, | |
| verbose=1, | |
| callbacks=[callbacks]) |
Does this overfit?
I have not been been able to get wget to work to download the images. I get "/bin/sh: 1: wget: not found" on executing the wget command. I have pip installed it and I can see wget 3.2 installed in the python3.6 folder. I import wget but still can't get it to work on jupyter. Wget works fine on Google collab.
there are 80 images in batches of 10 so 8 steps_per_epoch
history = model.fit_generator(
train_generator,
steps_per_epoch=8,
epochs=15,
verbose=1,
callbacks=[callbacks])
I have not been been able to get wget to work to download the images. I get "/bin/sh: 1: wget: not found" on executing the wget command. I have pip installed it and I can see wget 3.2 installed in the python3.6 folder. I import wget but still can't get it to work on jupyter. Wget works fine on Google collab.
I have a solution for this. Please go to the below link and directly download the happy/sad zip file onto your local computer.
Link - https://storage.googleapis.com/laurencemoroney-blog.appspot.com/happy-or-sad.zip
After that, you can make changes to your code as below:
path = r'Desktop\happy-or-sad.zip'
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall("/tmp/h-or-s")
zip_ref.close()
Since I've saved the file on my Desktop, I've mentioned that folder. Rest of the code will remain the same.
Hope this helps. :)
Definitely overfits. Accuracy approx. 1.
@hassanharoon321 it's the dataset that is downloaded on line 7 of this script.