This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # It only has two dependencies numpy and tensorflow | |
| import numpy as np | |
| import tensorflow as tf | |
| from config import cfg | |
| # Class defining a Convolutional Capsule | |
| # consisting of multiple neuron layers | |
| # |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """TensorFlow 2.0 implementation of vanilla Autoencoder.""" | |
| import numpy as np | |
| import tensorflow as tf | |
| __author__ = "Abien Fred Agarap" | |
| np.random.seed(1) | |
| tf.random.set_seed(1) | |
| batch_size = 128 | |
| epochs = 10 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class Encoder(tf.keras.layers.Layer): | |
| def __init__(self, intermediate_dim): | |
| super(Encoder, self).__init__() | |
| self.hidden_layer = tf.keras.layers.Dense( | |
| units=intermediate_dim, | |
| activation=tf.nn.relu, | |
| kernel_initializer='he_uniform' | |
| ) | |
| self.output_layer = tf.keras.layers.Dense( | |
| units=intermediate_dim, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class Decoder(tf.keras.layers.Layer): | |
| def __init__(self, intermediate_dim, original_dim): | |
| super(Decoder, self).__init__() | |
| self.hidden_layer = tf.keras.layers.Dense( | |
| units=intermediate_dim, | |
| activation=tf.nn.relu, | |
| kernel_initializer='he_uniform' | |
| ) | |
| self.output_layer = tf.keras.layers.Dense( | |
| units=original_dim, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class Autoencoder(tf.keras.Model): | |
| def __init__(self, intermediate_dim, original_dim): | |
| super(Autoencoder, self).__init__() | |
| self.encoder = Encoder(intermediate_dim=intermediate_dim) | |
| self.decoder = Decoder(intermediate_dim=intermediate_dim, original_dim=original_dim) | |
| def call(self, input_features): | |
| code = self.encoder(input_features) | |
| reconstructed = self.decoder(code) | |
| return reconstructed |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def loss(model, original): | |
| reconstruction_error = tf.reduce_mean(tf.square(tf.subtract(model(original), original))) | |
| return reconstruction_error |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def train(loss, model, opt, original): | |
| with tf.GradientTape() as tape: | |
| gradients = tape.gradient(loss(model, original), model.trainable_variables) | |
| gradient_variables = zip(gradients, model.trainable_variables) | |
| opt.apply_gradients(gradient_variables) |