Skip to content

Instantly share code, notes, and snippets.

@sj82516
Last active July 2, 2017 14:11
Show Gist options
  • Select an option

  • Save sj82516/cf81fd2a0b185a2e5b8fa050708fbc59 to your computer and use it in GitHub Desktop.

Select an option

Save sj82516/cf81fd2a0b185a2e5b8fa050708fbc59 to your computer and use it in GitHub Desktop.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
# 定義參數
data_size = 100
normalize_lambda = 0.02
learning_rate = 0.001
learning_loop = 500
# 產生資料
def generate_dataset():
x_data = np.linspace(-3, 3, data_size)
# 生成函式為 y = 0.2x^3 + 1.79x + 2
y_data = 0.2 * x_data**3 + 1.3 * x_data + np.random.uniform(-0.5, 0.5, data_size)
return x_data, y_data
# 定義節點圖
def multi_poly_linear_regression(power):
x = tf.placeholder(tf.float32, shape=(None,), name="x")
y = tf.placeholder(tf.float32, shape=(None,), name="y")
y_pred = tf.Variable(tf.random_normal([1]), name="pred")
loss = 0
# 計算多項次的y_pred
for i in range(power):
W = tf.Variable(tf.random_normal([1]), name='weight_%d' % i)
y_pred = tf.add(tf.multiply(tf.pow(x, i), W), y_pred)
# 加入正規化
loss += normalize_lambda * W ** 2
loss += tf.reduce_sum(tf.pow(y_pred - y, 2)) / data_size
return x, y, y_pred, loss
def run():
x_batch, y_batch = generate_dataset()
x, y, y_pred, loss = multi_poly_linear_regression(4)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(learning_loop):
loss_val,_ = sess.run([loss, optimizer], {x: x_batch, y: y_batch})
y_pred_batch = sess.run(y_pred, {x:x_batch})
plt.figure(1)
plt.scatter(x_batch, y_batch)
plt.scatter(x_batch, y_pred_batch)
plt.show()
if __name__ == '__main__':
run()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment