Created
June 16, 2018 01:45
-
-
Save kovasb/27d87c5210354389cbd2a1881310ad02 to your computer and use it in GitHub Desktop.
03 LR solution
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# recall the training loop pattern | |
# 1. Batch of training data | |
# Batch of training data | |
x_values = tf.range(0,1,0.01) | |
# 1B: desired outputs or 'labels' | |
y_values = 3*x_values + 1 + tf.random_uniform(x_values.shape) | |
# 2. Model | |
# 2A: Variables that will be trained | |
# 2B: Prediction function combines training inputs, ops, and variables, generates prediction | |
A = tfe.Variable(0.0) | |
B = tfe.Variable(0.0) | |
def predict_y_values(x_values): | |
return A*x_values+B | |
# the training loop | |
for i in range(200): | |
with tfe.GradientTape() as tape: | |
# 3. Loss computes error between prediction and desired output | |
predicted_y_values = predict_y_values(x_values) | |
loss_value = loss_function(predicted_y_values, y_values) | |
# 5. Metrics tell you how well your model is training | |
if i % 20 == 0: | |
print("Loss at step {:03d}: {:.3f}".format(i, loss_value)) | |
# 4. Update step | |
# 4A: Gradient tells us which direction to change the variables to reduce loss | |
gradient_A, gradient_B = tape.gradient(loss_value, [A, B]) | |
# 4B: Nudge the variables by a small step in the right direction | |
A.assign_sub(gradient_A*0.1) | |
B.assign_sub(gradient_B*0.1) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment