Skip to content

Instantly share code, notes, and snippets.

@lmyslinski
Created November 11, 2016 19:13
Show Gist options
  • Save lmyslinski/c4e49c41c8908ff80d46626ac7e1d2aa to your computer and use it in GitHub Desktop.
Save lmyslinski/c4e49c41c8908ff80d46626ac7e1d2aa to your computer and use it in GitHub Desktop.
with tf.Session() as sess:
# Set up all the tensors, variables, and operations.
input = tf.constant(x_with_bias)
target = tf.constant(np.transpose([y]).astype(np.float32))
weights = tf.Variable(tf.random_normal([2, 1], 0, 0.1))
tf.initialize_all_variables().run()
yhat = tf.matmul(input, weights)
yerror = tf.sub(yhat, target)
loss = tf.nn.l2_loss(yerror)
update_weights = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
for _ in range(training_steps):
# Repeatedly run the operations, updating the TensorFlow variable.
update_weights.run()
losses.append(loss.eval())
# Training is done, get the final values for the graphs
betas = weights.eval()
yhat = yhat.eval()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment