Last active
November 19, 2020 08:48
-
-
Save bryanlimy/c9b20130e2dc3d5e21627b55df19fce8 to your computer and use it in GitHub Desktop.
TensorFlow macOS acceleration
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Install tensorflow for macOS from https://github.com/apple/tensorflow_macos | |
import argparse | |
import tensorflow as tf | |
from tensorflow.keras import datasets, layers, models | |
from tensorflow.python.compiler.mlcompute import mlcompute | |
def get_dataset(hparams): | |
(train_images, train_labels), (test_images, | |
test_labels) = datasets.cifar10.load_data() | |
train_images, test_images = train_images / 255.0, test_images / 255.0 | |
hparams.input_shape = train_images.shape[1:] | |
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels)) | |
train_ds = train_ds.shuffle(1000).batch(hparams.batch_size) | |
test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)) | |
test_ds = test_ds.batch(hparams.batch_size) | |
return train_ds, test_ds | |
def get_model(hparams): | |
inputs = tf.keras.Input(shape=hparams.input_shape, name='inputs') | |
outputs = layers.Conv2D(64, (6, 6), activation='relu')(inputs) | |
outputs = layers.MaxPooling2D((2, 2))(outputs) | |
outputs = layers.Dropout(0.2)(outputs) | |
outputs = layers.Conv2D(128, (3, 3), activation='relu')(outputs) | |
outputs = layers.MaxPooling2D((2, 2))(outputs) | |
outputs = layers.Dropout(0.2)(outputs) | |
outputs = layers.Flatten()(outputs) | |
outputs = layers.Dense(64, activation='relu')(outputs) | |
outputs = layers.Dropout(0.2)(outputs) | |
outputs = layers.Dense(32, activation='relu')(outputs) | |
outputs = layers.Dense(10)(outputs) | |
return tf.keras.Model(inputs=inputs, outputs=outputs) | |
def main(hparams): | |
if hparams.use_gpu: | |
mlcompute.set_mlc_device(device_name='gpu') | |
train_ds, test_ds = get_dataset(hparams) | |
model = get_model(hparams) | |
model.summary() | |
model.compile( | |
optimizer=tf.keras.optimizers.Adam(hparams.learning_rate), | |
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), | |
metrics=['accuracy']) | |
history = model.fit(train_ds, epochs=hparams.epochs, validation_data=test_ds) | |
test_loss, test_acc = model.evaluate(test_ds, verbose=2) | |
print(f'Test loss: {test_loss:.04f}\tTest accuracy: {test_acc*100:.02f}') | |
if __name__ == '__main__': | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--batch_size', default=32, type=int) | |
parser.add_argument('--learning_rate', default=0.001, type=float) | |
parser.add_argument('--epochs', default=10, type=int) | |
parser.add_argument('--use_gpu', action='store_true') | |
main(parser.parse_args()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment