Last active
August 19, 2019 15:39
-
-
Save ground0state/6ee9b8030c5b10b2a98a41dc09c8e345 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data() | |
x_train_mean = x_train.mean(axis=0) | |
x_train_std = x_train.std(axis=0) | |
y_train_mean = y_train.mean() | |
y_train_std = y_train.std() | |
x_train = (x_train-x_train_mean)/x_train_std | |
y_train = (y_train-y_train_mean)/y_train_std | |
x_test_mean = x_test.mean(axis=0) | |
x_test_std = x_test.std(axis=0) | |
y_test_mean = y_test.mean() | |
y_test_std = y_test.std() | |
x_test = (x_test-x_test_mean)/x_test_std | |
y_test = (y_test-y_test_mean)/y_test_std | |
x = tf.placeholder(tf.float32, (None, 13), name='x') | |
y = tf.placeholder(tf.float32, (None, 1), name='y') | |
w = tf.Variable(tf.random_normal((13,1))) | |
pred = tf.matmul(x, w) | |
loss = tf.reduce_mean((y-pred)**2) | |
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) | |
train_step = optimizer.minimize(loss) | |
with tf.Session() as sess: | |
sess.run(tf.global_variables_initializer()) | |
for step in range(100): | |
train_loss, _ = sess.run( | |
[loss, train_step], | |
feed_dict={ | |
x: x_train, | |
y: y_train.reshape((-1,1)) | |
} | |
) | |
print('step:{}, train_loss:{}'.format(step, train_loss)) | |
pred_ = sess.run( | |
pred, | |
feed_dict={x:x_test} | |
) | |
import numpy as np | |
def get_batches(x, y, batch_size, shuffle=True): | |
n_data = len(x) | |
indices = np.arange(n_data) | |
if shuffle: | |
np.random.shuffle(indices) | |
x_shuffled = x[indices] | |
y_shuffled = y[indices] | |
for i in range(0, n_data, batch_size): | |
x_batch = x_shuffled[i: i+batch_size] | |
y_batch = y_shuffled[i: i+batch_size] | |
yield x_batch, y_batch | |
BATCH_SIZE = 32 | |
step = 0 | |
with tf.Session() as sess: | |
sess.run(tf.global_variables_initializer()) | |
for epoch in range(100): | |
for x_batch, y_batch in get_batches(x_train, y_train, 32): | |
train_loss, _ = sess.run( | |
[loss, train_step], | |
feed_dict={ | |
x: x_batch, | |
y: y_batch.reshape((-1,1)) | |
} | |
) | |
print('step:{}, train_loss:{}'.format(step, train_loss)) | |
step += 1 | |
pred_ = sess.run( | |
pred, | |
feed_dict={x:x_test} | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment