This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
df = df[df.symbol == 'GOOG'] | |
df.drop(['symbol'],1,inplace=True) | |
df.head() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## finding all columns that have nan: | |
droping_list_all=[] | |
for j in range(0,7): | |
if not df.iloc[:, j].notnull().all(): | |
droping_list_all.append(j) | |
#print(df.iloc[:,j].unique()) | |
droping_list_all |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
######Prediction############### | |
lstm.eval() | |
train_predict = lstm(dataX.to(device)) | |
data_predict = train_predict.cpu().data.numpy() | |
dataY_plot = dataY.data.numpy() | |
## Inverse Normalize | |
data_predict = scaler.inverse_transform(data_predict) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Create the testing data set | |
#Create a new array containing scaled values from index 1543 to 2002 | |
test_data = scaled_data[training_data_len - 60: , :] | |
#Create the data sets x_test and y_test | |
x_test = [] | |
y_test = dataset[training_data_len:, :] | |
for i in range(60, len(test_data)): | |
x_test.append(test_data[i-60:i, 0]) | |
# Convert the data to a numpy array |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def normalize_data(df): | |
min_max_scaler = sklearn.preprocessing.MinMaxScaler() | |
df['open'] = min_max_scaler.fit_transform(df.open.values.reshape(-1,1)) | |
df['high'] = min_max_scaler.fit_transform(df.high.values.reshape(-1,1)) | |
df['low'] = min_max_scaler.fit_transform(df.low.values.reshape(-1,1)) | |
df['close'] = min_max_scaler.fit_transform(df['close'].values.reshape(-1,1)) | |
return df |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
np.sqrt(((dataY_plot[-testX.size()[0]:] - data_predict[-testX.size()[0]:] ) ** 2).mean()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import matplotlib.pyplot as plt | |
plt.style.use('fivethirtyeight') | |
import pandas as pd | |
from sklearn.preprocessing import MinMaxScaler | |
from keras.models import Sequential | |
from keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional | |
from keras.optimizers import SGD | |
import math | |
from sklearn.metrics import mean_squared_error |