Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save jamesr66a/532de5a714b3a2cd0383935c86f58b60 to your computer and use it in GitHub Desktop.
Save jamesr66a/532de5a714b3a2cd0383935c86f58b60 to your computer and use it in GitHub Desktop.
def test_trace_size(self):
def fn(x):
return x.view(x.shape[1] * 2, x.size(0), 2)
x = torch.randn(5, 2, 4, requires_grad=True)
y = torch.randn(4, 8, 4)
# Check that it behaves as expected
traced_fn = torch.jit.trace(x)(fn)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
# Check that the trace looks ok
trace, _ = torch.jit.get_trace_graph(fn, (x,))
self.assertExpectedTrace(trace)
=====
running stage: 0 of 1
graph() {
%7 : Dynamic = prim::Store()
%1 : Long() = aten::size[dim=1](%7)
%2 : Dynamic = aten::mul[other={2}](%1)
%3 : Long() = aten::size[dim=0](%7)
%4 : Long() = prim::Constant[value={2}]()
%5 : Dynamic = aten::stack[dim=0](%2, %3, %4)
%6 : Dynamic = aten::view(%7, %5)
= prim::Load(%6)
return ();
}
0 = Store
1 = size 0
2 = mul move(1)
3 = size 0
4 = Constant
5 = stack move(2), move(3), move(4)
6 = view move(0), move(5)
= Load move(6)
executing 0: 0 = Store
pop reg[0];
0x7fbd000e1e90
executing 1: 1 = size 0
push reg[0];
(1,.,.) =
3.1580 -0.8736 1.3865 1.4566
0.3330 -1.8784 -0.5407 0.4879
-0.2758 1.5137 -0.2111 1.8952
-0.5660 0.0936 0.0163 -1.5740
0.0979 -0.4209 -1.0568 -0.5242
0.8876 0.6669 1.4653 1.2939
-2.1600 -0.9112 0.2039 -0.0287
-0.8024 -0.8119 -0.6031 -0.9115
(2,.,.) =
-1.9934 -0.2218 1.1089 0.4606
1.6974 0.2270 1.7200 -0.2040
-0.3356 -1.4493 -0.1912 -0.0708
0.6092 1.2009 -0.1737 0.7159
-0.2119 0.9687 -0.7972 -0.5198
-0.2389 0.7222 0.2283 1.2084
-0.1505 -0.0949 1.3746 0.6525
-1.3448 -0.2770 -0.2190 -0.2331
(3,.,.) =
0.1825 -0.5591 1.3220 -0.1320
0.1523 0.2832 -1.0018 -1.4634
0.6359 -0.8019 -0.9232 0.3543
-0.6966 -0.1955 0.3845 0.2277
-0.2403 1.3721 0.3951 -0.5223
-0.5406 0.4327 0.1002 -0.8430
-0.2169 -0.0946 -0.8139 -1.9160
0.1520 1.1471 -0.9900 -0.8709
(4,.,.) =
0.4399 1.4242 1.1418 -0.7383
0.7817 -0.4334 -1.6740 0.0904
1.2376 1.1928 0.7885 -0.9496
-1.5112 -1.4846 1.0594 0.1604
0.4687 -0.3111 0.8251 -1.1349
-1.4605 1.3864 -1.2217 -0.4275
-1.9310 -0.6847 0.2256 1.1233
0.5109 0.5698 0.6596 0.4295
[ CPUDoubleTensor{4,8,4} ]
pop reg[1];
0x7fbd00513bf0
executing 2: 2 = mul move(1)
push reg[1];
8
[ CPULongTensor{} ]
pop reg[2];
0x7fbd004900a0
executing 3: 3 = size 0
push reg[0];
(1,.,.) =
3.1580 -0.8736 1.3865 1.4566
0.3330 -1.8784 -0.5407 0.4879
-0.2758 1.5137 -0.2111 1.8952
-0.5660 0.0936 0.0163 -1.5740
0.0979 -0.4209 -1.0568 -0.5242
0.8876 0.6669 1.4653 1.2939
-2.1600 -0.9112 0.2039 -0.0287
-0.8024 -0.8119 -0.6031 -0.9115
(2,.,.) =
-1.9934 -0.2218 1.1089 0.4606
1.6974 0.2270 1.7200 -0.2040
-0.3356 -1.4493 -0.1912 -0.0708
0.6092 1.2009 -0.1737 0.7159
-0.2119 0.9687 -0.7972 -0.5198
-0.2389 0.7222 0.2283 1.2084
-0.1505 -0.0949 1.3746 0.6525
-1.3448 -0.2770 -0.2190 -0.2331
(3,.,.) =
0.1825 -0.5591 1.3220 -0.1320
0.1523 0.2832 -1.0018 -1.4634
0.6359 -0.8019 -0.9232 0.3543
-0.6966 -0.1955 0.3845 0.2277
-0.2403 1.3721 0.3951 -0.5223
-0.5406 0.4327 0.1002 -0.8430
-0.2169 -0.0946 -0.8139 -1.9160
0.1520 1.1471 -0.9900 -0.8709
(4,.,.) =
0.4399 1.4242 1.1418 -0.7383
0.7817 -0.4334 -1.6740 0.0904
1.2376 1.1928 0.7885 -0.9496
-1.5112 -1.4846 1.0594 0.1604
0.4687 -0.3111 0.8251 -1.1349
-1.4605 1.3864 -1.2217 -0.4275
-1.9310 -0.6847 0.2256 1.1233
0.5109 0.5698 0.6596 0.4295
[ CPUDoubleTensor{4,8,4} ]
pop reg[3];
0x7fbd00513bf0
executing 4: 4 = Constant
pop reg[4];
0x7fbd00619ef0
executing 5: 5 = stack move(2), move(3), move(4)
push reg[2];
16
[ CPULongTensor{} ]
push reg[3];
4
[ CPULongTensor{} ]
push reg[4];
2
[ CPULongTensor{} ]
pop reg[5];
0x7fbd00448cb0
executing 6: 6 = view move(0), move(5)
push reg[0];
(1,.,.) =
3.1580 -0.8736 1.3865 1.4566
0.3330 -1.8784 -0.5407 0.4879
-0.2758 1.5137 -0.2111 1.8952
-0.5660 0.0936 0.0163 -1.5740
0.0979 -0.4209 -1.0568 -0.5242
0.8876 0.6669 1.4653 1.2939
-2.1600 -0.9112 0.2039 -0.0287
-0.8024 -0.8119 -0.6031 -0.9115
(2,.,.) =
-1.9934 -0.2218 1.1089 0.4606
1.6974 0.2270 1.7200 -0.2040
-0.3356 -1.4493 -0.1912 -0.0708
0.6092 1.2009 -0.1737 0.7159
-0.2119 0.9687 -0.7972 -0.5198
-0.2389 0.7222 0.2283 1.2084
-0.1505 -0.0949 1.3746 0.6525
-1.3448 -0.2770 -0.2190 -0.2331
(3,.,.) =
0.1825 -0.5591 1.3220 -0.1320
0.1523 0.2832 -1.0018 -1.4634
0.6359 -0.8019 -0.9232 0.3543
-0.6966 -0.1955 0.3845 0.2277
-0.2403 1.3721 0.3951 -0.5223
-0.5406 0.4327 0.1002 -0.8430
-0.2169 -0.0946 -0.8139 -1.9160
0.1520 1.1471 -0.9900 -0.8709
(4,.,.) =
0.4399 1.4242 1.1418 -0.7383
0.7817 -0.4334 -1.6740 0.0904
1.2376 1.1928 0.7885 -0.9496
-1.5112 -1.4846 1.0594 0.1604
0.4687 -0.3111 0.8251 -1.1349
-1.4605 1.3864 -1.2217 -0.4275
-1.9310 -0.6847 0.2256 1.1233
0.5109 0.5698 0.6596 0.4295
[ CPUDoubleTensor{4,8,4} ]
push reg[5];
16
4
2
[ CPULongTensor{3} ]
pop reg[6];
0x7fbd00513bf0
executing 7: = Load move(6)
push reg[6];
(1,.,.) =
3.1580 -0.8736
1.3865 1.4566
0.3330 -1.8784
-0.5407 0.4879
(2,.,.) =
-0.2758 1.5137
-0.2111 1.8952
-0.5660 0.0936
0.0163 -1.5740
(3,.,.) =
0.0979 -0.4209
-1.0568 -0.5242
0.8876 0.6669
1.4653 1.2939
(4,.,.) =
-2.1600 -0.9112
0.2039 -0.0287
-0.8024 -0.8119
-0.6031 -0.9115
(5,.,.) =
-1.9934 -0.2218
1.1089 0.4606
1.6974 0.2270
1.7200 -0.2040
(6,.,.) =
-0.3356 -1.4493
-0.1912 -0.0708
0.6092 1.2009
-0.1737 0.7159
(7,.,.) =
-0.2119 0.9687
-0.7972 -0.5198
-0.2389 0.7222
0.2283 1.2084
(8,.,.) =
-0.1505 -0.0949
1.3746 0.6525
-1.3448 -0.2770
-0.2190 -0.2331
(9,.,.) =
0.1825 -0.5591
1.3220 -0.1320
0.1523 0.2832
-1.0018 -1.4634
(10,.,.) =
0.6359 -0.8019
-0.9232 0.3543
-0.6966 -0.1955
0.3845 0.2277
(11,.,.) =
-0.2403 1.3721
0.3951 -0.5223
-0.5406 0.4327
0.1002 -0.8430
(12,.,.) =
-0.2169 -0.0946
-0.8139 -1.9160
0.1520 1.1471
-0.9900 -0.8709
(13,.,.) =
0.4399 1.4242
1.1418 -0.7383
0.7817 -0.4334
-1.6740 0.0904
(14,.,.) =
1.2376 1.1928
0.7885 -0.9496
-1.5112 -1.4846
1.0594 0.1604
(15,.,.) =
0.4687 -0.3111
0.8251 -1.1349
-1.4605 1.3864
-1.2217 -0.4275
(16,.,.) =
-1.9310 -0.6847
0.2256 1.1233
0.5109 0.5698
0.6596 0.4295
[ CPUDoubleTensor{16,4,2} ]
running stage: 0 of 1
graph() {
%7 : Dynamic = prim::Store()
%1 : Long() = aten::size[dim=1](%7)
%2 : Long() = aten::mul[other={2}](%1)
%3 : Long() = aten::size[dim=0](%7)
%4 : Long() = prim::Constant[value={2}]()
%5 : Dynamic = aten::stack[dim=0](%2, %3, %4)
%6 : Double(4, 5, 2) = aten::view(%7, %5)
= prim::Load(%6)
return ();
}
0 = Store
1 = size 0
2 = mul move(1)
3 = size 0
4 = Constant
5 = stack move(2), move(3), move(4)
6 = view move(0), move(5)
= Load move(6)
executing 0: 0 = Store
pop reg[0];
0x7fbd000e0d50
executing 1: 1 = size 0
push reg[0];
(1,.,.) =
-0.0883 0.3420 0.4112 1.0051
-0.1117 -0.5988 -0.0982 -0.3511
(2,.,.) =
0.7209 -0.2169 -1.0427 0.2448
-0.9887 -0.5196 0.6585 0.6406
(3,.,.) =
0.7839 0.6573 -1.6348 -0.6108
0.6003 -0.8769 0.9649 -0.1926
(4,.,.) =
0.6152 0.9267 1.5431 -0.5705
1.6676 0.4235 -1.5994 1.8092
(5,.,.) =
0.9826 0.4800 2.4168 -0.1189
0.5960 -0.5618 -0.2084 0.3178
[ Variable[CPUDoubleType]{5,2,4} ]
pop reg[1];
0x7fbd00619750
executing 2: 2 = mul move(1)
push reg[1];
2
[ CPULongTensor{} ]
pop reg[2];
0x7fbd0061c330
executing 3: 3 = size 0
push reg[0];
(1,.,.) =
-0.0883 0.3420 0.4112 1.0051
-0.1117 -0.5988 -0.0982 -0.3511
(2,.,.) =
0.7209 -0.2169 -1.0427 0.2448
-0.9887 -0.5196 0.6585 0.6406
(3,.,.) =
0.7839 0.6573 -1.6348 -0.6108
0.6003 -0.8769 0.9649 -0.1926
(4,.,.) =
0.6152 0.9267 1.5431 -0.5705
1.6676 0.4235 -1.5994 1.8092
(5,.,.) =
0.9826 0.4800 2.4168 -0.1189
0.5960 -0.5618 -0.2084 0.3178
[ Variable[CPUDoubleType]{5,2,4} ]
pop reg[3];
0x7fbd00513bf0
executing 4: 4 = Constant
pop reg[4];
0x7fbd0061e4f0
executing 5: 5 = stack move(2), move(3), move(4)
push reg[2];
4
[ CPULongTensor{} ]
push reg[3];
5
[ CPULongTensor{} ]
push reg[4];
2
E
======================================================================
ERROR: test_trace_size (__main__.TestJit)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/test_jit.py", line 786, in test_trace_size
self.assertEqual(traced_fn(x), fn(x))
RuntimeError: Expected a Tensor of type CPULongType but found a type Variable[CPULongType] for sequence element 2 in sequence argument at position #1 'tensors' (tensor_list_checked_cast at ../src/ATen/Utils.h:45)
frame #0: std::__1::vector<THLongTensor*, std::__1::allocator<THLongTensor*> > at::tensor_list_checked_cast<at::CPULongTensor, at::Tensor, THLongTensor>(at::ArrayRef<at::Tensor>, char const*, int) + 705 (0x1162d0f91 in libATen.dylib)
frame #1: at::CPULongType::_cat(at::ArrayRef<at::Tensor>, long long) const + 187 (0x1162d111b in libATen.dylib)
frame #2: at::_cat(at::ArrayRef<at::Tensor>, long long) + 110 (0x11606565e in libATen.dylib)
frame #3: at::native::cat(at::ArrayRef<at::Tensor>, long long) + 137 (0x1160655d9 in libATen.dylib)
frame #4: at::Type::cat(at::ArrayRef<at::Tensor>, long long) const + 64 (0x11636ddf0 in libATen.dylib)
frame #5: at::cat(at::ArrayRef<at::Tensor>, long long) + 110 (0x11607196e in libATen.dylib)
frame #6: at::native::stack(at::ArrayRef<at::Tensor>, long long) + 284 (0x1160718bc in libATen.dylib)
frame #7: at::Type::stack(at::ArrayRef<at::Tensor>, long long) const + 64 (0x116375e70 in libATen.dylib)
frame #8: at::stack(at::ArrayRef<at::Tensor>, long long) + 110 (0x11288e52e in _C.cpython-36m-darwin.so)
frame #9: torch::jit::(anonymous namespace)::$_963::operator()(torch::jit::Node*) const::'lambda'(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)::operator()(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&) const + 175 (0x112890ccf in _C.cpython-36m-darwin.so)
frame #10: int std::__1::__invoke_void_return_wrapper<int>::__call<torch::jit::(anonymous namespace)::$_963::operator()(torch::jit::Node*) const::'lambda'(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)&, std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&>(torch::jit::(anonymous namespace)::$_963::operator()(torch::jit::Node*) const::'lambda'(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)&&&, std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&&&) + 77 (0x112890c0d in _C.cpython-36m-darwin.so)
frame #11: std::__1::__function::__func<torch::jit::(anonymous namespace)::$_963::operator()(torch::jit::Node*) const::'lambda'(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&), std::__1::allocator<torch::jit::(anonymous namespace)::$_963::operator()(torch::jit::Node*) const::'lambda'(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)>, int (std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)>::operator()(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&) + 57 (0x112890b09 in _C.cpython-36m-darwin.so)
frame #12: std::__1::function<int (std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&)>::operator()(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&) const + 142 (0x111a1995e in _C.cpython-36m-darwin.so)
frame #13: torch::jit::InterpreterStateImpl::runOneStage(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&) + 691 (0x1119e5e83 in _C.cpython-36m-darwin.so)
frame #14: torch::jit::InterpreterState::runOneStage(std::__1::vector<at::Tensor, std::__1::allocator<at::Tensor> >&) + 40 (0x1119e5bc8 in _C.cpython-36m-darwin.so)
frame #15: torch::jit::GraphExecutorImpl::runFallback(torch::jit::variable_tensor_list) + 101 (0x111ab7935 in _C.cpython-36m-darwin.so)
frame #16: torch::jit::GraphExecutorImpl::run(torch::jit::variable_tensor_list) + 2206 (0x111ab5f4e in _C.cpython-36m-darwin.so)
frame #17: torch::jit::GraphExecutor::run(torch::jit::variable_tensor_list&&) + 94 (0x111ab566e in _C.cpython-36m-darwin.so)
frame #18: torch::jit::initJITBindings(_object*)::$_8::operator()(torch::jit::GraphExecutor&, pybind11::args) const + 148 (0x1119e05f4 in _C.cpython-36m-darwin.so)
frame #19: pybind11::object pybind11::detail::argument_loader<torch::jit::GraphExecutor&, pybind11::args>::call_impl<pybind11::object, torch::jit::initJITBindings(_object*)::$_8&, 0ul, 1ul, pybind11::detail::void_type>(torch::jit::initJITBindings(_object*)::$_8&&&, pybind11::detail::index_sequence<0ul, 1ul>, pybind11::detail::void_type&&) + 195 (0x1119e0523 in _C.cpython-36m-darwin.so)
frame #20: std::__1::enable_if<!(std::is_void<pybind11::object>::value), pybind11::object>::type pybind11::detail::argument_loader<torch::jit::GraphExecutor&, pybind11::args>::call<pybind11::object, pybind11::detail::void_type, torch::jit::initJITBindings(_object*)::$_8&>(torch::jit::initJITBindings(_object*)::$_8&&&) + 56 (0x1119dfbe8 in _C.cpython-36m-darwin.so)
frame #21: void pybind11::cpp_function::initialize<torch::jit::initJITBindings(_object*)::$_8, pybind11::object, torch::jit::GraphExecutor&, pybind11::args, pybind11::name, pybind11::is_method, pybind11::sibling>(torch::jit::initJITBindings(_object*)::$_8&&, pybind11::object (*)(torch::jit::GraphExecutor&, pybind11::args), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::operator()(pybind11::detail::function_call&) const + 225 (0x1119dfa21 in _C.cpython-36m-darwin.so)
frame #22: void pybind11::cpp_function::initialize<torch::jit::initJITBindings(_object*)::$_8, pybind11::object, torch::jit::GraphExecutor&, pybind11::args, pybind11::name, pybind11::is_method, pybind11::sibling>(torch::jit::initJITBindings(_object*)::$_8&&, pybind11::object (*)(torch::jit::GraphExecutor&, pybind11::args), pybind11::name const&, pybind11::is_method const&, pybind11::sibling const&)::'lambda'(pybind11::detail::function_call&)::__invoke(pybind11::detail::function_call&) + 24 (0x1119df928 in _C.cpython-36m-darwin.so)
frame #23: pybind11::cpp_function::dispatcher(_object*, _object*, _object*) + 6919 (0x1117b91a7 in _C.cpython-36m-darwin.so)
frame #24: _PyCFunction_FastCallDict + 183 (0x10f8fccc7 in libpython3.6m.dylib)
frame #25: _PyObject_FastCallDict + 326 (0x10f8b0fa6 in libpython3.6m.dylib)
frame #26: _PyObject_Call_Prepend + 149 (0x10f8b10b5 in libpython3.6m.dylib)
frame #27: PyObject_Call + 97 (0x10f8b0d51 in libpython3.6m.dylib)
frame #28: slot_tp_call + 117 (0x10f914285 in libpython3.6m.dylib)
frame #29: _PyObject_FastCallDict + 257 (0x10f8b0f61 in libpython3.6m.dylib)
frame #30: call_function + 184 (0x10f980658 in libpython3.6m.dylib)
frame #31: _PyEval_EvalFrameDefault + 24936 (0x10f97d0b8 in libpython3.6m.dylib)
frame #32: fast_function + 488 (0x10f981e78 in libpython3.6m.dylib)
frame #33: call_function + 401 (0x10f980731 in libpython3.6m.dylib)
frame #34: _PyEval_EvalFrameDefault + 24936 (0x10f97d0b8 in libpython3.6m.dylib)
frame #35: _PyEval_EvalCodeWithName + 3436 (0x10f98159c in libpython3.6m.dylib)
frame #36: _PyFunction_FastCallDict + 891 (0x10f9822bb in libpython3.6m.dylib)
frame #37: _PyObject_FastCallDict + 307 (0x10f8b0f93 in libpython3.6m.dylib)
frame #38: _PyObject_Call_Prepend + 149 (0x10f8b10b5 in libpython3.6m.dylib)
frame #39: PyObject_Call + 97 (0x10f8b0d51 in libpython3.6m.dylib)
frame #40: _PyEval_EvalFrameDefault + 25540 (0x10f97d314 in libpython3.6m.dylib)
frame #41: _PyEval_EvalCodeWithName + 3436 (0x10f98159c in libpython3.6m.dylib)
frame #42: _PyFunction_FastCallDict + 891 (0x10f9822bb in libpython3.6m.dylib)
frame #43: _PyObject_FastCallDict + 307 (0x10f8b0f93 in libpython3.6m.dylib)
frame #44: _PyObject_Call_Prepend + 149 (0x10f8b10b5 in libpython3.6m.dylib)
frame #45: PyObject_Call + 97 (0x10f8b0d51 in libpython3.6m.dylib)
frame #46: slot_tp_call + 117 (0x10f914285 in libpython3.6m.dylib)
frame #47: _PyObject_FastCallDict + 257 (0x10f8b0f61 in libpython3.6m.dylib)
frame #48: call_function + 184 (0x10f980658 in libpython3.6m.dylib)
frame #49: _PyEval_EvalFrameDefault + 24936 (0x10f97d0b8 in libpython3.6m.dylib)
frame #50: _PyEval_EvalCodeWithName + 3436 (0x10f98159c in libpython3.6m.dylib)
frame #51: _PyFunction_FastCallDict + 891 (0x10f9822bb in libpython3.6m.dylib)
frame #52: _PyObject_FastCallDict + 307 (0x10f8b0f93 in libpython3.6m.dylib)
frame #53: _PyObject_Call_Prepend + 149 (0x10f8b10b5 in libpython3.6m.dylib)
frame #54: PyObject_Call + 97 (0x10f8b0d51 in libpython3.6m.dylib)
frame #55: _PyEval_EvalFrameDefault + 25540 (0x10f97d314 in libpython3.6m.dylib)
frame #56: _PyEval_EvalCodeWithName + 3436 (0x10f98159c in libpython3.6m.dylib)
frame #57: _PyFunction_FastCallDict + 891 (0x10f9822bb in libpython3.6m.dylib)
frame #58: _PyObject_FastCallDict + 307 (0x10f8b0f93 in libpython3.6m.dylib)
frame #59: _PyObject_Call_Prepend + 149 (0x10f8b10b5 in libpython3.6m.dylib)
frame #60: PyObject_Call + 97 (0x10f8b0d51 in libpython3.6m.dylib)
frame #61: slot_tp_call + 117 (0x10f914285 in libpython3.6m.dylib)
frame #62: _PyObject_FastCallDict + 257 (0x10f8b0f61 in libpython3.6m.dylib)
frame #63: call_function + 184 (0x10f980658 in libpython3.6m.dylib)
----------------------------------------------------------------------
Ran 1 test in 0.024s
FAILED (errors=1)
[ Variable[CPULongType]{} ]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment