Last active
December 9, 2022 22:03
-
-
Save davidberard98/1309a58ce527b6ca3684d628187a28aa to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<eval_with_key>.0:5: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters(). (Triggered internally at /scratch/dberard/dynamo38/pytorch/aten/src/ATen/native/cudnn/RNN.cpp:982.) | |
lstm = torch.lstm(permute, (zeros, zeros_1), [self_model_lstm_lstm_flat_weights_0_, self_model_lstm_lstm_flat_weights_1_, self_model_lstm_lstm_flat_weights_2_, self_model_lstm_lstm_flat_weights_3_, self_model_lstm_lstm_flat_weights_4_, self_model_lstm_lstm_flat_weights_5_, self_model_lstm_lstm_flat_weights_6_, self_model_lstm_lstm_flat_weights_7_, self_model_lstm_lstm_flat_weights_8_, self_model_lstm_lstm_flat_weights_9_, self_model_lstm_lstm_flat_weights_10_, self_model_lstm_lstm_flat_weights_11_, self_model_lstm_lstm_flat_weights_12_, self_model_lstm_lstm_flat_weights_13_, self_model_lstm_lstm_flat_weights_14_, self_model_lstm_lstm_flat_weights_15_], True, 2, 0.0, True, True, False); permute = zeros = zeros_1 = self_model_lstm_lstm_flat_weights_0_ = self_model_lstm_lstm_flat_weights_1_ = self_model_lstm_lstm_flat_weights_2_ = self_model_lstm_lstm_flat_weights_3_ = self_model_lstm_lstm_flat_weights_4_ = self_model_lstm_lstm_flat_weights_5_ = self_model_lstm_lstm_flat_weights_6_ = self_model_lstm_lstm_flat_weights_7_ = self_model_lstm_lstm_flat_weights_8_ = self_model_lstm_lstm_flat_weights_9_ = self_model_lstm_lstm_flat_weights_10_ = self_model_lstm_lstm_flat_weights_11_ = self_model_lstm_lstm_flat_weights_12_ = self_model_lstm_lstm_flat_weights_13_ = self_model_lstm_lstm_flat_weights_14_ = self_model_lstm_lstm_flat_weights_15_ = None | |
graph(): | |
%self_model_lstm_lstm_flat_weights_0_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_0_] | |
%self_model_lstm_lstm_flat_weights_1_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_1_] | |
%self_model_lstm_lstm_flat_weights_2_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_2_] | |
%self_model_lstm_lstm_flat_weights_3_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_3_] | |
%self_model_lstm_lstm_flat_weights_4_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_4_] | |
%self_model_lstm_lstm_flat_weights_5_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_5_] | |
%self_model_lstm_lstm_flat_weights_6_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_6_] | |
%self_model_lstm_lstm_flat_weights_7_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_7_] | |
%self_model_lstm_lstm_flat_weights_8_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_8_] | |
%self_model_lstm_lstm_flat_weights_9_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_9_] | |
%self_model_lstm_lstm_flat_weights_10_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_10_] | |
%self_model_lstm_lstm_flat_weights_11_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_11_] | |
%self_model_lstm_lstm_flat_weights_12_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_12_] | |
%self_model_lstm_lstm_flat_weights_13_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_13_] | |
%self_model_lstm_lstm_flat_weights_14_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_14_] | |
%self_model_lstm_lstm_flat_weights_15_ : torch.Tensor [#users=1] = placeholder[target=self_model_lstm_lstm_flat_weights_15_] | |
%permute : [#users=1] = placeholder[target=permute] | |
%zeros : [#users=1] = placeholder[target=zeros] | |
%zeros_1 : [#users=1] = placeholder[target=zeros_1] | |
%lstm : [#users=1] = call_function[target=torch.lstm](args = (%permute, (%zeros, %zeros_1), [%self_model_lstm_lstm_flat_weights_0_, %self_model_lstm_lstm_flat_weights_1_, %self_model_lstm_lstm_flat_weights_2_, %self_model_lstm_lstm_flat_weights_3_, %self_model_lstm_lstm_flat_weights_4_, %self_model_lstm_lstm_flat_weights_5_, %self_model_lstm_lstm_flat_weights_6_, %self_model_lstm_lstm_flat_weights_7_, %self_model_lstm_lstm_flat_weights_8_, %self_model_lstm_lstm_flat_weights_9_, %self_model_lstm_lstm_flat_weights_10_, %self_model_lstm_lstm_flat_weights_11_, %self_model_lstm_lstm_flat_weights_12_, %self_model_lstm_lstm_flat_weights_13_, %self_model_lstm_lstm_flat_weights_14_, %self_model_lstm_lstm_flat_weights_15_], True, 2, 0.0, True, True, False), kwargs = {}) | |
return (lstm,) | |
~~~~~~~~LSTM sanity check (FakeTensors) | |
~~~~~~~~LSTM sanity check PASSED | |
~~~~~~~~LSTM check with functional tensors | |
Traceback (most recent call last): | |
File "repro3.py", line 63, in <module> | |
traced_mod(*converted_args) | |
File "/scratch/dberard/dynamo38/pytorch/torch/fx/graph_module.py", line 660, in call_wrapped | |
return self._wrapped_call(self, *args, **kwargs) | |
File "/scratch/dberard/dynamo38/pytorch/torch/fx/graph_module.py", line 279, in __call__ | |
raise e | |
File "/scratch/dberard/dynamo38/pytorch/torch/fx/graph_module.py", line 269, in __call__ | |
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc] | |
File "/scratch/dberard/dynamo38/pytorch/torch/nn/modules/module.py", line 1482, in _call_impl | |
return forward_call(*args, **kwargs) | |
File "<eval_with_key>.0", line 5, in forward | |
File "/scratch/dberard/dynamo38/pytorch/torch/_subclasses/fake_tensor.py", line 886, in __torch_dispatch__ | |
r = func(*args, **kwargs) | |
File "/scratch/dberard/dynamo38/pytorch/torch/_ops.py", line 285, in __call__ | |
return self._op(*args, **kwargs or {}) | |
RuntimeError: Attempted to set the storage of a tensor on device "meta" to a storage on different device "cuda:0". This is no longer allowed; the devices must match. |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment