Created
June 16, 2020 13:51
-
-
Save khanhnamle1994/71696f14fde489653cfb02efe6cf134d to your computer and use it in GitHub Desktop.
AutoRec model architecture
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class AutoRec: | |
""" | |
Function to define the AutoRec model class | |
""" | |
def prepare_model(self): | |
""" | |
Function to build AutoRec | |
""" | |
self.input_R = tf.compat.v1.placeholder(dtype=tf.float32, | |
shape=[None, self.num_items], | |
name="input_R") | |
self.input_mask_R = tf.compat.v1.placeholder(dtype=tf.float32, | |
shape=[None, self.num_items], | |
name="input_mask_R") | |
V = tf.compat.v1.get_variable(name="V", initializer=tf.compat.v1.truncated_normal( | |
shape=[self.num_items, self.hidden_neuron], | |
mean=0, stddev=0.03), dtype=tf.float32) | |
W = tf.compat.v1.get_variable(name="W", initializer=tf.compat.v1.truncated_normal( | |
shape=[self.hidden_neuron, self.num_items], | |
mean=0, stddev=0.03), dtype=tf.float32) | |
mu = tf.compat.v1.get_variable(name="mu", initializer=tf.zeros(shape=self.hidden_neuron), dtype=tf.float32) | |
b = tf.compat.v1.get_variable(name="b", initializer=tf.zeros(shape=self.num_items), dtype=tf.float32) | |
pre_Encoder = tf.matmul(self.input_R, V) + mu | |
self.Encoder = tf.nn.sigmoid(pre_Encoder) | |
pre_Decoder = tf.matmul(self.Encoder, W) + b | |
self.Decoder = tf.identity(pre_Decoder) | |
pre_rec_cost = tf.multiply((self.input_R - self.Decoder), self.input_mask_R) | |
rec_cost = tf.square(self.l2_norm(pre_rec_cost)) | |
pre_reg_cost = tf.square(self.l2_norm(W)) + tf.square(self.l2_norm(V)) | |
reg_cost = self.lambda_value * 0.5 * pre_reg_cost | |
self.cost = rec_cost + reg_cost | |
if self.optimizer_method == "Adam": | |
optimizer = tf.compat.v1.train.AdamOptimizer(self.lr) | |
elif self.optimizer_method == "RMSProp": | |
optimizer = tf.compat.v1.train.RMSPropOptimizer(self.lr) | |
else: | |
raise ValueError("Optimizer Key ERROR") | |
if self.grad_clip: | |
gvs = optimizer.compute_gradients(self.cost) | |
capped_gvs = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gvs] | |
self.optimizer = optimizer.apply_gradients(capped_gvs, global_step=self.global_step) | |
else: | |
self.optimizer = optimizer.minimize(self.cost, global_step=self.global_step) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment