Created
September 19, 2019 16:18
-
-
Save ground0state/fc8fa611ff18d095f5a55671e1f6a7ea to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import math | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import pandas as pd | |
import torch | |
from sklearn.model_selection import * | |
from sklearn.model_selection import train_test_split | |
from sklearn.utils import shuffle | |
from torch import nn, optim | |
from torch.utils.data import DataLoader, TensorDataset | |
from tqdm import tqdm | |
import torchvision | |
from torchvision import transforms | |
from torchvision.datasets import ImageFolder | |
from torchvision.utils import save_image | |
df = pd.read_csv('AirPassengers.csv', index_col=0) | |
f_ori = df.values | |
f = f_ori / 600 # 1くらいにスケール | |
length_of_sequences = len(df) | |
width = 12 | |
data = [] | |
target = [] | |
for i in range(0, length_of_sequences - width + 1): | |
data.append(f[i: i + width]) | |
target.append(f[i + width - 1]) | |
X = np.array(data).reshape(-1, width, 1) | |
y = np.array(target).reshape(len(data), 1) | |
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.1) | |
train = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) | |
train_loader = DataLoader(train, batch_size=1, shuffle=False) | |
test = TensorDataset(torch.Tensor(X_validation), torch.Tensor(y_validation)) | |
test_loader = DataLoader(test, batch_size=1, shuffle=False) | |
class LstmNet(nn.Module): | |
def __init__(self, feature_num=1, hidden_size=50, num_layers=1, dropout=0.2): | |
super().__init__() | |
self.lstm = nn.LSTM(feature_num, | |
hidden_size, | |
num_layers, | |
batch_first=True, | |
dropout=dropout) | |
self.linear = nn.Linear(hidden_size, 1) | |
def forward(self, x, h0=None, l=None): | |
# 初期状態h0と共にRNNにxを渡す | |
# xは(batch_size, step_size, feature_num) | |
x, h = self.lstm(x, h0) | |
# 最後のステップのみ取り出す | |
# xは(batch_size, step_size, hidden_size) | |
x = x[:, -1, :] | |
# 取り出した最後のステップを線形層に入れる | |
x = self.linear(x) | |
# 余分な次元を削除する | |
# (batch_size, 1) -> (batch_size, ) | |
x = x.squeeze() | |
return x | |
net = LstmNet(hidden_size=50, num_layers=2, dropout=0.2).to("cuda:0") | |
optimizer = optim.Adam(net.parameters(), lr=0.0002, betas=(0.5, 0.999)) | |
loss_f = nn.MSELoss() | |
for epoch in range(100): | |
losses = [] | |
net.train() | |
for x, y in tqdm(train_loader): | |
x = x.to("cuda:0") | |
y = y.to("cuda:0") | |
y_pred = net(x) | |
loss = loss_f(y_pred, y.float()) | |
net.zero_grad() | |
loss.backward() | |
optimizer.step() | |
losses.append(loss.item()) | |
# print(loss.item()) | |
# widthブロックの1個目を取得 | |
Z = X[:1] # X[0]としないのはshapeを(1, width, 1)とするため | |
original = [f[i] for i in range(width)] | |
predicted = [None for i in range(width)] | |
net.eval() | |
z_ = Z[-1] # shape=(width, 1) | |
for i in range(length_of_sequences - width + 1): | |
y_ = net(torch.Tensor(z_.reshape(1, width, 1)).to("cuda:0")).item() | |
z_ = np.delete(z_, 0) | |
z_ = np.append(z_, y_) | |
predicted.append(y_) | |
plt.plot(original, linestyle='dashed', color='black') | |
plt.plot(predicted, color='black') | |
# training dataに対する予測 | |
y_ = net(torch.Tensor(X).to("cuda:0")).to("cpu").detach().numpy() | |
predicted_ = np.array([None for i in range(width)]) | |
y_ = np.concatenate([predicted_, y_.reshape(-1)], axis=0) | |
plt.plot(original, linestyle='dashed', color='black') | |
plt.plot(y_, color='black') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment