Skip to content

Instantly share code, notes, and snippets.

@AFAgarap
Created May 16, 2019 09:35
Show Gist options
  • Save AFAgarap/122c901aea6e8331af425dc65093c950 to your computer and use it in GitHub Desktop.
Save AFAgarap/122c901aea6e8331af425dc65093c950 to your computer and use it in GitHub Desktop.
Training loop for a variational autoencoder model.
epochs = 60
writer = tf.summary.create_file_writer('tmp')
with writer.as_default():
with tf.summary.record_if(True):
for epoch in range(epochs):
for step, batch_features in enumerate(train_dataset):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = vae.encoder(tf.constant(batch_features))
reconstructed = vae.decoder(z)
loss = mse_loss_fn(batch_features.numpy().reshape(-1, 784), reconstructed)
loss += sum(vae.losses)
grads = tape.gradient(loss, vae.trainable_variables)
optimizer.apply_gradients(zip(grads, vae.trainable_variables))
loss_metric(loss)
if (epoch != 0) and ((epoch + 1) % 10 == 0):
print('Epoch {}/{} : mean loss = {}'.format(epoch + 1, epochs, loss_metric.result()))
tf.summary.scalar('loss', loss_metric.result(), step=step)
tf.summary.image('original', batch_features, max_outputs=10, step=step)
tf.summary.image('reconstructed', reconstructed.numpy().reshape(-1, 28, 28, 1), max_outputs=10, step=step)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment