Last active
March 15, 2018 22:25
-
-
Save idavydov/1060b3fae833af436cdad11913c9e7e1 to your computer and use it in GitHub Desktop.
LSTM benchmark: tensorflow, eager, pytorch
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import tensorflow as tf | |
import tensorflow.contrib.eager as tfe | |
# use 1 CPU | |
conf=tf.ConfigProto( | |
intra_op_parallelism_threads=1, | |
inter_op_parallelism_threads=1) | |
tfe.enable_eager_execution(conf) | |
n_iter = 100 | |
n_layers = 2 | |
batch_size = 32 | |
seq_len = 1000 | |
input_dim = 7 | |
data = tf.random_uniform((batch_size, seq_len, input_dim)) | |
cells = [tf.contrib.rnn.LSTMCell(input_dim) for _ in range(n_layers)] | |
multicell = tf.contrib.rnn.MultiRNNCell(cells) | |
for _ in range(n_iter): | |
tf.nn.dynamic_rnn(multicell, data, dtype=tf.float32) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import torch | |
import torch.nn as nn | |
import torch.autograd as autograd | |
n_iter = 100 | |
n_layers = 2 | |
batch_size = 32 | |
seq_len = 1000 | |
input_dim = 7 | |
x = autograd.Variable(torch.rand(batch_size, seq_len, input_dim)) | |
lstm = nn.LSTM(input_dim, input_dim, n_layers, batch_first=True) | |
for _ in range(n_iter): | |
lstm(x) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import numpy as np | |
import tensorflow as tf | |
# use 1 CPU | |
conf=tf.ConfigProto( | |
intra_op_parallelism_threads=1, | |
inter_op_parallelism_threads=1) | |
n_iter = 100 | |
n_layers = 2 | |
batch_size = 32 | |
seq_len = 1000 | |
input_dim = 7 | |
data = np.random.uniform(size=(batch_size, seq_len, input_dim)) | |
x = tf.placeholder(tf.float32, shape=(batch_size, seq_len, input_dim)) | |
cells = [tf.contrib.rnn.LSTMCell(input_dim) for _ in range(n_layers)] | |
multicell = tf.contrib.rnn.MultiRNNCell(cells) | |
rnn_outputs, final_state = tf.nn.dynamic_rnn(multicell, x, dtype=tf.float32) | |
init = tf.global_variables_initializer() | |
with tf.Session(config=conf) as sess: | |
sess.run(init) | |
for _ in range(n_iter): | |
sess.run(rnn_outputs, {x: data}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I'd be curious to see the results of this