Created
December 21, 2022 07:30
-
-
Save vanbasten23/888e39ec3b7ab3f88a252151ff56c921 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
root@t1v-n-621e873b-w-0:/workspaces/work# python3 pytorch/xla/test/test_dynamic_shape_backward_models.py | |
x_test_nonzero_dev.shape= torch.Size([<=80, 2]) | |
y_test_nonzero_dev.shape= torch.Size([<=80]) | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=893function=call_function: inputs 0:[], inputs[i].sym_sizes()=[] | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=915function=call_function: has_post_hooks=false | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=919function=call_function: about to run validate_outputs on fn.name()=BinaryCrossEntropyBackward0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=1 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=893function=call_function: inputs 0:[80], inputs[i].sym_sizes()=[<=80] | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=915function=call_function: has_post_hooks=false | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=1 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=919function=call_function: about to run validate_outputs on fn.name()=SqueezeBackward0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=1 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=79 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=893function=call_function: inputs 0:[80, 1], inputs[i].sym_sizes()=[<=80, 1] | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=915function=call_function: has_post_hooks=false | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/generated/Functions.cpp, line=4618function=apply: | |
xw32, file=torch_xla/csrc/tensor_methods.cpp, line=2194function=sigmoid_backward: | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=919function=call_function: about to run validate_outputs on fn.name()=SigmoidBackward0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=1 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=80, dim_node_1->getDynamicValue()=79 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=102function=is_same_shape: typeid(gradSymSizes).name()=N3c108ArrayRefINS_6SymIntEEE, typeid(shapeAsDimVector).name()=N3c108ArrayRefINS_6SymIntEEE | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=1 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=80, dim_node_1->getDynamicValue()=79 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=809function=validate_outputs: is_same_shape=0 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=111function=is_expandable_to_shape: grad.is_nested()=0 | |
xw32, file=/workspaces/work/pytorch/aten/src/ATen/ExpandUtils.h, line=504function=is_expandable_to: ndim=2, target_dim=2 | |
xw32, file=/workspaces/work/pytorch/aten/src/ATen/ExpandUtils.h, line=511function=is_expandable_to: i=0, size=1, target=1 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=0, sci.is_symbolic()=0 | |
xw32, file=/workspaces/work/pytorch/aten/src/ATen/ExpandUtils.h, line=516function=is_expandable_to: succeeded for i=.0 | |
xw32, file=/workspaces/work/pytorch/aten/src/ATen/ExpandUtils.h, line=511function=is_expandable_to: i=1, size=<=80, target=80 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=80 | |
xw32, file=/workspaces/work/pytorch/c10/core/SymInt.cpp, line=99function=operator==: is_symbolic()=1, sci.is_symbolic()=0 | |
xw32, file=torch_xla/csrc/tensor.cpp, line=665function=eq: | |
xw32, file=torch_xla/csrc/tensor.cpp, line=757function=bool_: | |
xw32, file=torch_xla/csrc/ops/dynamic_ir.cpp, line=113function=getDynamicValue: dim_node_0->getDynamicValue()=79, dim_node_1->getDynamicValue()=1 | |
xw32, file=/workspaces/work/pytorch/aten/src/ATen/ExpandUtils.h, line=513function=is_expandable_to: returning false for i=1 | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/engine.cpp, line=813function=validate_outputs: metadata.is_expandable_to_shape(grad) evaluate to false | |
xw32, file=/workspaces/work/pytorch/torch/csrc/autograd/input_metadata.h, line=131function=incompatible_shape_error_message: grad.is_nested()=0 | |
Traceback (most recent call last): | |
File "pytorch/xla/test/test_dynamic_shape_backward_models.py", line 83, in <module> | |
train(model, loss_fn=criterion, optimizer=optimizer) | |
File "pytorch/xla/test/test_dynamic_shape_backward_models.py", line 70, in train | |
loss.backward() | |
File "/home/ptxla/.local/lib/python3.8/site-packages/torch/_tensor.py", line 484, in backward | |
torch.autograd.backward( | |
File "/home/ptxla/.local/lib/python3.8/site-packages/torch/autograd/__init__.py", line 197, in backward | |
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass | |
RuntimeError: Function SigmoidBackward0 returned an invalid gradient at index 0 - got [80, 1] but expected shape compatible with [<=80, 1] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment