https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/ https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data
Last active
May 30, 2024 03:15
-
-
Save is/7a7b64d90506834c35f6a107a35e2035 to your computer and use it in GitHub Desktop.
Multivariate Time Series Forecasting with LSTMs in Keras
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pandas import read_csv | |
from datetime import datetime | |
def parse(x): | |
return datetime.strptime(x, '%Y %m %d %H') | |
dataset = read_csv('raw.csv', | |
parse_dates=[['year', 'month', 'day', 'hour',]], | |
index_col=0, | |
date_parser=parse) | |
dataset.drop('No', axis=1, inplace=True) | |
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'] | |
dataset.index.name = 'date' | |
dataset['pollution'].fillna(0, inplace=True) | |
dataset = dataset[24:] | |
print(dataset.head(5)) | |
dataset.to_csv('pollution.csv') | |
""" | |
from pandas import read_csv | |
from datetime import datetime | |
# load data | |
def parse(x): | |
return datetime.strptime(x, '%Y %m %d %H') | |
dataset = read_csv('raw.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse) | |
dataset.drop('No', axis=1, inplace=True) | |
# manually specify column names | |
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'] | |
dataset.index.name = 'date' | |
# mark all NA values with 0 | |
dataset['pollution'].fillna(0, inplace=True) | |
# drop the first 24 hours | |
dataset = dataset[24:] | |
# summarize first 5 rows | |
print(dataset.head(5)) | |
# save to file | |
dataset.to_csv('pollution.csv') | |
""" | |
# vim:ts=2 sts=2 ai sw=2 expandtab |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pandas import DataFrame | |
from pandas import concat | |
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): | |
n_var = 1 if type(data) is list else data.shape[1] | |
df = DataFrame(data) | |
cols, names = list(), list() | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)] | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
if i == 0: | |
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)] | |
else: | |
names += [('var%d(t+%d)' %(j + 1, i)) for j in range(n_vars)] | |
agg = concat(cols, axis=1) | |
if dropnan: | |
agg.dropnan(inplace=True) | |
return dropnan | |
dataset = read_csv('pollution.csv', header=0, index_col=0) | |
values = dataset.values | |
encoder = LabelEncoder() | |
values[:,4] = encoder.fit_transform(values[:,4]) | |
values = values.astype('float32') | |
scalar = MinMaxScalar(feature_range(0, 1)) | |
scaled = scalar.fit_transform(values) | |
reframed = series_to_supervised(scaled, 1, 1) | |
reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True) | |
print(reframed.head(5)) | |
# vim: ts=2 sts=2 sw=2 expandtab ai |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from math import sqrt | |
from numpy import concatenate | |
from matplotlib import pyplot | |
from pandas import read_csv | |
from pandas import DataFrame | |
from pandas import concat | |
from sklearn.preprocessing import MinMaxScaler | |
from sklearn.preprocessing import LabelEncoder | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
# convert series to supervised learning | |
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): | |
n_vars = 1 if type(data) is list else data.shape[1] | |
df = DataFrame(data) | |
cols, names = list(), list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
if i == 0: | |
names += [('var%d(t)' % (j+1)) for j in range(n_vars)] | |
else: | |
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] | |
# put it all together | |
agg = concat(cols, axis=1) | |
agg.columns = names | |
# drop rows with NaN values | |
if dropnan: | |
agg.dropna(inplace=True) | |
return agg | |
# load dataset | |
dataset = read_csv('pollution.csv', header=0, index_col=0) | |
values = dataset.values | |
# integer encode direction | |
encoder = LabelEncoder() | |
values[:,4] = encoder.fit_transform(values[:,4]) | |
# ensure all data is float | |
values = values.astype('float32') | |
# normalize features | |
scaler = MinMaxScaler(feature_range=(0, 1)) | |
scaled = scaler.fit_transform(values) | |
# specify the number of lag hours | |
n_hours = 3 | |
n_features = 8 | |
# frame as supervised learning | |
reframed = series_to_supervised(scaled, n_hours, 1) | |
print(reframed.shape) | |
# split into train and test sets | |
values = reframed.values | |
n_train_hours = 365 * 24 | |
train = values[:n_train_hours, :] | |
test = values[n_train_hours:, :] | |
# split into input and outputs | |
n_obs = n_hours * n_features | |
train_X, train_y = train[:, :n_obs], train[:, -n_features] | |
test_X, test_y = test[:, :n_obs], test[:, -n_features] | |
print(train_X.shape, len(train_X), train_y.shape) | |
# reshape input to be 3D [samples, timesteps, features] | |
train_X = train_X.reshape((train_X.shape[0], n_hours, n_features)) | |
test_X = test_X.reshape((test_X.shape[0], n_hours, n_features)) | |
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) | |
# design network | |
model = Sequential() | |
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) | |
model.add(Dense(1)) | |
model.compile(loss='mae', optimizer='adam') | |
# fit network | |
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False) | |
# plot history | |
pyplot.plot(history.history['loss'], label='train') | |
pyplot.plot(history.history['val_loss'], label='test') | |
pyplot.legend() | |
pyplot.show() | |
# make a prediction | |
yhat = model.predict(test_X) | |
test_X = test_X.reshape((test_X.shape[0], n_hours*n_features)) | |
# invert scaling for forecast | |
inv_yhat = concatenate((yhat, test_X[:, -7:]), axis=1) | |
inv_yhat = scaler.inverse_transform(inv_yhat) | |
inv_yhat = inv_yhat[:,0] | |
# invert scaling for actual | |
test_y = test_y.reshape((len(test_y), 1)) | |
inv_y = concatenate((test_y, test_X[:, -7:]), axis=1) | |
inv_y = scaler.inverse_transform(inv_y) | |
inv_y = inv_y[:,0] | |
# calculate RMSE | |
rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) | |
print('Test RMSE: %.3f' % rmse) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from math import sqrt | |
from numpy import concatenate | |
from matplotlib import pyplot | |
from pandas import read_csv | |
from pandas import DataFrame | |
from pandas import concat | |
from sklearn.preprocessing import MinMaxScaler | |
from sklearn.preprocessing import LabelEncoder | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
# convert series to supervised learning | |
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): | |
n_vars = 1 if type(data) is list else data.shape[1] | |
df = DataFrame(data) | |
cols, names = list(), list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
if i == 0: | |
names += [('var%d(t)' % (j+1)) for j in range(n_vars)] | |
else: | |
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] | |
# put it all together | |
agg = concat(cols, axis=1) | |
agg.columns = names | |
# drop rows with NaN values | |
if dropnan: | |
agg.dropna(inplace=True) | |
return agg | |
# load dataset | |
dataset = read_csv('pollution.csv', header=0, index_col=0) | |
values = dataset.values | |
# integer encode direction | |
encoder = LabelEncoder() | |
values[:,4] = encoder.fit_transform(values[:,4]) | |
# ensure all data is float | |
values = values.astype('float32') | |
# normalize features | |
scaler = MinMaxScaler(feature_range=(0, 1)) | |
scaled = scaler.fit_transform(values) | |
# specify the number of lag hours | |
n_hours = 3 | |
n_features = 8 | |
# frame as supervised learning | |
reframed = series_to_supervised(scaled, n_hours, 1) | |
print(reframed.shape) | |
# split into train and test sets | |
values = reframed.values | |
n_train_hours = 365 * 24 | |
train = values[:n_train_hours, :] | |
test = values[n_train_hours:, :] | |
# split into input and outputs | |
n_obs = n_hours * n_features | |
train_X, train_y = train[:, :n_obs], train[:, -n_features] | |
test_X, test_y = test[:, :n_obs], test[:, -n_features] | |
print(train_X.shape, len(train_X), train_y.shape) | |
# reshape input to be 3D [samples, timesteps, features] | |
train_X = train_X.reshape((train_X.shape[0], n_hours, n_features)) | |
test_X = test_X.reshape((test_X.shape[0], n_hours, n_features)) | |
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) | |
# design network | |
model = Sequential() | |
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) | |
model.add(Dense(1)) | |
model.compile(loss='mae', optimizer='adam') | |
# fit network | |
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False) | |
# plot history | |
pyplot.plot(history.history['loss'], label='train') | |
pyplot.plot(history.history['val_loss'], label='test') | |
pyplot.legend() | |
pyplot.show() | |
# make a prediction | |
yhat = model.predict(test_X) | |
test_X = test_X.reshape((test_X.shape[0], n_hours*n_features)) | |
# invert scaling for forecast | |
inv_yhat = concatenate((yhat, test_X[:, -7:]), axis=1) | |
inv_yhat = scaler.inverse_transform(inv_yhat) | |
inv_yhat = inv_yhat[:,0] | |
# invert scaling for actual | |
test_y = test_y.reshape((len(test_y), 1)) | |
inv_y = concatenate((test_y, test_X[:, -7:]), axis=1) | |
inv_y = scaler.inverse_transform(inv_y) | |
inv_y = inv_y[:,0] | |
# calculate RMSE | |
rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) | |
print('Test RMSE: %.3f' % rmse) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment