Created
January 7, 2018 16:29
-
-
Save izzuddin91/7538b00d60c75bb3f76ea633e335d5b1 to your computer and use it in GitHub Desktop.
low level training and classification
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import tensorflow as tf | |
| # node1 = tf.constant(3.0, tf.float32) | |
| # node2 = tf.constant(4.0) | |
| # sess = tf.Session() | |
| # print(sess.run([node2, node1])) | |
| #when we finish the session, we need to close it in order to free up the space | |
| # sess.close() | |
| #another way of open n close the graph | |
| # with tf.Session() as sess: | |
| # output = sess.run([node1, node2]) | |
| # print(output) | |
| # a = tf.constant(5.0) | |
| # b = tf.constant(6.0) | |
| # c = a * b | |
| # sess = tf.Session() | |
| # File_writer = tf.summary.FileWriter('/Users/izzuddin/Desktop/Tensorflow/graph', sess.graph) | |
| # print(sess.run(c)) | |
| #PLACEHOLDER | |
| # a = tf.placeholder(tf.float32) | |
| # b = tf.placeholder(tf.float32) | |
| # adder_node = a + b | |
| # sess = tf.Session() | |
| # print(sess.run(adder_node, {a: [1,3], b: [2,4] })) | |
| # #VARIABLE - linear model | |
| # W = tf.Variable([.3], tf.float32) | |
| # b = tf.Variable([-.3], tf.float32) | |
| # x = tf.placeholder(tf.float32) | |
| # linear_model = W * x + b | |
| # y = tf.placeholder(tf.float32) | |
| # #Loss | |
| # squared_delta = tf.square(linear_model-y) | |
| # loss = tf.reduce_sum(squared_delta) | |
| # optimizer = tf.train.GradientDescentOptimizer(0.01) | |
| # train = optimizer.minimize(loss) | |
| # init = tf.global_variables_initializer() | |
| # sess = tf.Session() | |
| # sess.run(init) | |
| # # print(sess.run (loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}) ) | |
| # for i in range (10000): | |
| # sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]} ) | |
| # print(sess.run([W, b])) | |
| # import matplotlib.pyplot as plt | |
| # import numpy as np | |
| # import pandas as pd | |
| # from sklearn.preprocessing import LabelEncoder | |
| # from sklearn.utils import shuffle | |
| # from sklearn.model_selection import train_test_split | |
| #VARIABLE - quadratic model not working | |
| # a = tf.Variable([2.0], tf.float32) | |
| # b = tf.Variable([3.0], tf.float32) | |
| # c = tf.Variable([1.0], tf.float32) | |
| # x = tf.placeholder(tf.float32) | |
| # linear_model = a * x * x + b + c | |
| # y = tf.placeholder(tf.float32) | |
| # #Loss | |
| # squared_delta = tf.square(linear_model-y) | |
| # loss = tf.reduce_sum(squared_delta) | |
| # optimizer = tf.train.GradientDescentOptimizer(0.01) | |
| # train = optimizer.minimize(loss) | |
| # init = tf.global_variables_initializer() | |
| # sess = tf.Session() | |
| # sess.run(init) | |
| # # print(sess.run (loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}) ) | |
| # for i in range (1000): | |
| # sess.run(train, {x:[1,2,3,4], y:[3,7,13,21]} ) | |
| # print(sess.run([a, b, c])) | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| import pandas as pd | |
| from sklearn.preprocessing import LabelEncoder | |
| from sklearn.utils import shuffle | |
| from sklearn.model_selection import train_test_split | |
| df = pd.read_csv('/Users/izzuddin/Desktop/tensorflow-datasets.csv') | |
| x = df[df.columns[0:2]].values | |
| y = df[df.columns[2]] | |
| # print(x, y) | |
| encoder = LabelEncoder() | |
| encoder.fit(y) | |
| y = encoder.transform(y) | |
| n_labels = len(y) | |
| # print(len(y)) | |
| n_unique_labels = len(np.unique(y)) | |
| one_hot_encode = np.zeros((n_labels, n_unique_labels)) | |
| one_hot_encode[np.arange(n_labels), y] = 1 | |
| # print(n_labels, n_unique_labels) | |
| # print(x, one_hot_encode) | |
| X, Y = shuffle(x, one_hot_encode, random_state=1) | |
| # print(X.shape[1],Y.shape) | |
| train_x, test_x, train_y, test_y = train_test_split(X,Y, test_size=0.2, random_state=415) | |
| # print(train_x) | |
| # print(test_x) | |
| # print(train_y) | |
| # print(test_y) | |
| learning_rate = 0.3 | |
| training_epoch = 1000 | |
| cost_history = np.empty(shape=[1], dtype=float) | |
| n_dim = X.shape[1] | |
| # print("n_ dim", n_dim) | |
| n_class= 2 | |
| model_path = '/Users/izzuddin/Desktop/Tensorflow/output_model' | |
| n_hidden_1 = 60 | |
| n_hidden_2 = 60 | |
| n_hidden_3 = 60 | |
| n_hidden_4 = 60 | |
| x = tf.placeholder(tf.float32, [None, n_dim]) | |
| W = tf.Variable(tf.zeros([n_dim, n_class])) | |
| b = tf.Variable(tf.zeros([n_class])) | |
| y_ = tf.placeholder(tf.float32, [None, n_class]) | |
| def multilayer_perception(x, weights, biases): | |
| layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) | |
| layer_1 = tf.nn.sigmoid(layer_1) | |
| layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) | |
| layer_2 = tf.nn.sigmoid(layer_2) | |
| layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3']) | |
| layer_3 = tf.nn.sigmoid(layer_3) | |
| layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4']) | |
| layer_4 = tf.nn.relu(layer_4) | |
| out_layer = tf.matmul(layer_4, weights['out']) + biases['out'] | |
| return out_layer | |
| weights = { | |
| 'h1': tf.Variable(tf.truncated_normal([n_dim, n_hidden_1])), | |
| 'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])), | |
| 'h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])), | |
| 'h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])), | |
| 'out': tf.Variable(tf.truncated_normal([n_hidden_4, n_class])) | |
| } | |
| biases = { | |
| 'b1' : tf.Variable(tf.truncated_normal([n_hidden_1])), | |
| 'b2' : tf.Variable(tf.truncated_normal([n_hidden_2])), | |
| 'b3' : tf.Variable(tf.truncated_normal([n_hidden_3])), | |
| 'b4' : tf.Variable(tf.truncated_normal([n_hidden_4])), | |
| 'out' : tf.Variable(tf.truncated_normal([n_class])) | |
| } | |
| init = tf.global_variables_initializer() | |
| saver = tf.train.Saver() | |
| y = multilayer_perception(x, weights, biases) | |
| cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_)) | |
| training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function) | |
| sess = tf.Session() | |
| sess.run(init) | |
| mse_history = [] | |
| accuracy_history = [] | |
| for epoch in range(training_epoch): | |
| sess.run(training_step, feed_dict={x: train_x, y_: train_y}) | |
| cost = sess.run(cost_function, feed_dict={x: train_x, y_: train_y}) | |
| cost_history = np.append(cost_history, cost) | |
| correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1)) | |
| accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) | |
| pred_y = sess.run(y, feed_dict={x: test_x}) | |
| mse = tf.reduce_mean(tf.square(pred_y - test_y)) | |
| mse_ = sess.run(mse) | |
| mse_history.append(mse_) | |
| accuracy = (sess.run(accuracy, feed_dict={x: train_x, y_: train_y})) | |
| accuracy_history.append(accuracy) | |
| print('epoch: ', epoch, '-', 'cost: ', cost, " - MSE: ", mse_, " - Train Accuracy: ", accuracy) | |
| save_path = saver.save(sess, model_path) | |
| print("Model saved in file: %s" % save_path) | |
| plt.plot(mse_history, 'r') | |
| plt.show() | |
| plt.plot(accuracy_history) | |
| plt.show() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment