Last active
April 9, 2020 16:21
-
-
Save antojoseph/dd4472dd60a667f02c9014c9d3ace241 to your computer and use it in GitHub Desktop.
simple fashion_mnist classification with a dnn
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
print(tf.__version__) | |
class myCallback(tf.keras.callbacks.Callback): | |
def on_epoch_end(self, epoch, logs={}): | |
if(logs.get('loss')<0.25): | |
print("\nReached 75% accuracy so cancelling training!") | |
self.model.stop_training = True | |
callbacks = myCallback() | |
mnist = tf.keras.datasets.fashion_mnist | |
(training_images, training_labels), (test_images, test_labels) = mnist.load_data() | |
training_images=training_images/255.0 | |
test_images=test_images/255.0 | |
#The first layer in your network should be the same shape as your data. | |
#Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, | |
#so it makes more sense to 'flatten' that 28,28 into a 784x1. | |
#Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, | |
#and when the arrays are loaded into the model later, they'll automatically be flattened for us. | |
model = tf.keras.models.Sequential([ | |
tf.keras.layers.Flatten(input_shape=(28,28)), | |
tf.keras.layers.Dense(512, activation=tf.nn.relu), | |
tf.keras.layers.Dense(10, activation=tf.nn.softmax) | |
]) | |
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') | |
model.fit(training_images, training_labels, epochs=10, callbacks=[callbacks]) | |
model.evaluate(test_images, test_labels) | |
classifications = model.predict(test_images) | |
print(classifications[0]) | |
print(test_labels[0]) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment