Skip to content

Instantly share code, notes, and snippets.

@tsuchm
Created June 9, 2020 23:05
Show Gist options
  • Save tsuchm/e248cacf70506c4ee99a5de2fe55a78c to your computer and use it in GitHub Desktop.
Save tsuchm/e248cacf70506c4ee99a5de2fe55a78c to your computer and use it in GitHub Desktop.
# https://tonak-ai.hatenablog.com/entry/2019/03/26/124537
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_vocab = 3
n_layers = 1
n_embed = 5
n_hidden = 5
n_labels = 3
embedding = nn.Embedding(n_vocab, n_embed)
lstm = nn.LSTM(n_embed,
n_hidden,
num_layers=n_layers,
batch_first=True)
linear = nn.Linear(n_hidden, n_labels)
#inputs = [[1,1,2,1,1,2,1],[1,1,2,1,1],[1,1,2]]
inputs = [[1,1,2,1,1,2,1],[1,1,2],[1,1,2,1,1]]
lengths = torch.tensor([len(e) for e in inputs], device=device)
inputs = nn.utils.rnn.pad_sequence([torch.tensor(e, device=device) for e in inputs],
batch_first=True)
# print(inputs)
# print(lengths)
embedded = embedding(inputs)
#print(embedded)
packed = nn.utils.rnn.pack_padded_sequence(embedded,
lengths,
batch_first=True,
enforce_sorted=False)
#print(packed)
hidden = (torch.randn(n_layers, len(lengths), n_hidden),
torch.randn(n_layers, len(lengths), n_hidden))
# print(hidden[0].shape)
# print(hidden)
output, hidden = lstm(packed, hidden)
output, output_lengths = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
#print(output.shape)
#print(output_lengths)
# print(output)
# for i,j in enumerate(output_lengths):
# print(output[i][j-1])
output = torch.stack([output[i][j] for i,j in enumerate(output_lengths - 1)])
print(output.shape)
print(output)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment