Last active
March 5, 2016 10:12
-
-
Save pvalienteverde/f22047a7a65d50c76668 to your computer and use it in GitHub Desktop.
Gradient Descent With Momentum, TensorFLow
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def minimizar(optimizador, funcion_coste, acumulador, learning_rate_iniciaL_tf, rate_momemtum_tf): | |
grads_and_vars = optimizador.compute_gradients(funcion_coste) | |
gradientes,variables = [],[] | |
for grad, var in grads_and_vars: | |
variables.append(var) | |
if grad is None: | |
gradientes.append(grad) | |
continue | |
# accumulador = accumulador * momentum + gradiente | |
accum_aux = tf.add(tf.mul(rate_momemtum_tf, acumulador), grad) | |
accum_op = tf.assign(acumulador, accum_aux) | |
gradientes.append(accum_aux) | |
with tf.control_dependencies([accum_op]): | |
# variable = variable - lr * accumulador | |
optimizar = optimizador.apply_gradients(zip(gradientes, variables)) | |
return optimizar |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
La explicación del algoritmo en p.valienteverde.com