Created
March 15, 2022 23:04
-
-
Save sithu/bd555186df2085c49ce01da2f39889b0 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import tensorflow as tf | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| # 1-dimensional values. | |
| # Numbers close to 5 will be given the label [0], and | |
| # Numbers close to 2 will be given the label [1]. | |
| x_label0 = np.random.normal(5, 1, 10) | |
| # outliers: | |
| # x_label0 = np.append(np.random.normal(5, 1,9), 20) | |
| x_label1 = np.random.normal(2, 1, 10) | |
| xs = np.append(x_label0, x_label1) | |
| labels = [0.] * len(x_label0) + [1.] * len(x_label1) | |
| plt.scatter(xs, labels) | |
| # Define the hyper-parameters, placeholders, and variables: | |
| learning_rate = 0.001 | |
| training_epochs = 1000 | |
| X = tf.placeholder("float") | |
| Y = tf.placeholder("float") | |
| w = tf.Variable([0., 0.], name="parameters") | |
| def model(X, w): | |
| return tf.add(tf.multiply(w[1], tf.pow(X, 1)), | |
| tf.multiply(w[0], tf.pow(X, 0))) | |
| # Define the cost function | |
| y_model = model(X, w) | |
| cost = tf.reduce_sum(tf.square(Y-y_model)) | |
| # Set up the training op, and also introduce a couple ops to calculate some metrics, such as accuracy: | |
| train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) | |
| # When the model response is greater than 0.5, it should be a positive label, and vice versa. | |
| correct_prediction = tf.equal(Y, tf.to_float(tf.greater(y_model, 0.5))) | |
| accuracy = tf.reduce_mean(tf.to_float(correct_prediction)) | |
| sess = tf.Session() | |
| init = tf.global_variables_initializer() | |
| sess.run(init) | |
| # Run learning operation multiple times. | |
| for epoch in range(training_epochs): | |
| sess.run(train_op, feed_dict={X: xs, Y: labels}) | |
| current_cost = sess.run(cost, feed_dict={X: xs, Y: labels}) | |
| if epoch % 100 == 0: | |
| print(epoch, current_cost) | |
| # Show some final metrics/results: | |
| w_val = sess.run(w) | |
| print('learned parameters', w_val) | |
| print('accuracy', sess.run(accuracy, feed_dict={X: xs, Y: labels})) | |
| sess.close() | |
| # Plot the learned function to show the best-fit line | |
| all_xs = np.linspace(0, 10, 100) | |
| plt.plot(all_xs, all_xs*w_val[1] + w_val[0]) | |
| plt.scatter(xs, labels) | |
| plt.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment