Last active
January 22, 2018 23:13
-
-
Save danellis/dab8be31a0965023781dacf254b71116 to your computer and use it in GitHub Desktop.
TensorFlow
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import numpy as np | |
import matplotlib.pyplot as plot | |
import pandas as pd | |
import csv | |
def load_data_points(filename): | |
print("Opening CSV file") | |
with open(filename) as csvfile: | |
print("Creating CSV reader") | |
reader = csv.reader(csvfile) | |
print("Reading CSV") | |
return [[[float(p)] for p in row] for row in reader] | |
flatten = lambda l: [item for sublist in l for item in sublist] | |
data_points = load_data_points('dataset.csv') | |
print("Loaded") | |
prediction_size = 10 | |
num_test_rows = 1 | |
num_data_rows = len(data_points) - num_test_rows | |
row_size = len(data_points[0]) - prediction_size | |
# Training data | |
data_rows = data_points[:-num_test_rows] | |
x_data_points = np.array([row[:-prediction_size] for row in data_rows]).reshape([-1, row_size, 1]) | |
y_data_points = np.array([row[prediction_size:] for row in data_rows]).reshape([-1, row_size, 1]) | |
# Test data | |
test_rows = data_points[-num_test_rows:] | |
x_test_points = np.array([[data_points[0][:-prediction_size]]]).reshape([-1, row_size, 1]) | |
y_test_points = np.array([[data_points[0][prediction_size:]]]).reshape([-1, row_size, 1]) | |
tf.reset_default_graph() | |
num_hidden = 100 | |
x = tf.placeholder(tf.float32, [None, row_size, 1]) | |
y = tf.placeholder(tf.float32, [None, row_size, 1]) | |
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=num_hidden, activation=tf.nn.relu) | |
rnn_outputs, _ = tf.nn.dynamic_rnn(basic_cell, x, dtype=tf.float32) | |
learning_rate = 0.001 | |
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, num_hidden]) | |
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, 1) | |
outputs = tf.reshape(stacked_outputs, [-1, row_size, 1]) | |
loss = tf.reduce_sum(tf.square(outputs - y)) | |
optimizer = tf.train.AdamOptimizer(learning_rate) | |
training_op = optimizer.minimize(loss) | |
init = tf.global_variables_initializer() | |
iterations = 1000 | |
with tf.Session() as sess: | |
init.run() | |
for ep in range(iterations): | |
sess.run(training_op, feed_dict={x: x_data_points, y: y_data_points}) | |
if ep % 100 == 0: | |
mse = loss.eval(feed_dict={x: x_data_points, y: y_data_points}) | |
print(ep, "\tMSE:", mse) | |
y_pred = sess.run(stacked_outputs, feed_dict={x: x_test_points}) | |
plot.rcParams["figure.figsize"] = (20, 10) | |
plot.title("Actual vs Predicted") | |
plot.plot(pd.Series(np.ravel(x_test_points)), 'g:', markersize=2, label="X") | |
plot.plot(pd.Series(np.ravel(y_test_points)), 'b--', markersize=2, label="Y") | |
plot.plot(pd.Series(np.ravel(y_pred)), 'r-', markersize=2, label="Predicted") | |
plot.legend(loc='upper left') | |
plot.xlabel("Time periods") | |
plot.tick_params( | |
axis='y', | |
which='both', | |
left='off', | |
right='off', | |
labelleft='off') | |
plot.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment