Skip to content

Instantly share code, notes, and snippets.

@soulitzer
Created June 19, 2023 20:12
Show Gist options
  • Save soulitzer/c747d3e9cb1e241f6c7b9b57c0f84a9b to your computer and use it in GitHub Desktop.
Save soulitzer/c747d3e9cb1e241f6c7b9b57c0f84a9b to your computer and use it in GitHub Desktop.
fx graph after inlining module call
test_dynamo_inline_module_nn_AdaptiveAvgPool1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- ------------------------------------------------------------------- ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_avg_pool1d <built-in method adaptive_avg_pool1d of type object at 0x103ccc7e8> (l_args_0_, 3) {}
output output output ((adaptive_avg_pool1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AdaptiveAvgPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- --------------------------------------------- ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_avg_pool2d <function adaptive_avg_pool2d at 0x10513a9d0> (l_args_0_, 3) {}
output output output ((adaptive_avg_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AdaptiveAvgPool3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- --------------------------------------------- ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_avg_pool3d <function adaptive_avg_pool3d at 0x10513aa60> (l_args_0_, 3) {}
output output output ((adaptive_avg_pool3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AdaptiveMaxPool1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- ------------------------------------------------------ ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_max_pool1d <function boolean_dispatch.<locals>.fn at 0x10513a5e0> (l_args_0_, 3, False) {}
output output output ((adaptive_max_pool1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AdaptiveMaxPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- ------------------------------------------------------ ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_max_pool2d <function boolean_dispatch.<locals>.fn at 0x10513a790> (l_args_0_, 3, False) {}
output output output ((adaptive_max_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AdaptiveMaxPool3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- ------------------------------------------------------ ------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function adaptive_max_pool3d <function boolean_dispatch.<locals>.fn at 0x10513a940> (l_args_0_, 3, False) {}
output output output ((adaptive_max_pool3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AvgPool1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ---------------------------------------------------------- ------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function avg_pool1d <built-in method avg_pool1d of type object at 0x103ccc7e8> (l_args_0_, (2,), (2,), (0,), False, True) {}
output output output ((avg_pool1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AvgPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------ ------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function avg_pool2d <built-in function avg_pool2d> (l_args_0_, (2, 2), (2, 2), 0, False, True, None) {}
output output output ((avg_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_AvgPool3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------ ------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function avg_pool3d <built-in function avg_pool3d> (l_args_0_, (2, 2, 2), (2, 2, 2), 0, False, True, None) {}
output output output ((avg_pool3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_BatchNorm1d_eval_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_BatchNorm1d_train_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_BatchNorm2d_eval_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_BatchNorm2d_train_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_BatchNorm3d_eval_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_BatchNorm3d_train_mode_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_Bilinear_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- -------------------------------------------------------- -------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function bilinear <built-in method bilinear of type object at 0x103ccc7e8> (l_args_0_, l_args_1_, l__m___weight, l__m___bias) {}
output output output ((bilinear,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_CELU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------ ----------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function celu <function celu at 0x10513e280> (l_args_0_, 2.0, False) {}
output output output ((celu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConstantPad1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ---------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1), 'constant', 2) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConstantPad2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ---------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1), 'constant', 3) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConstantPad3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ---------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1, 1, 1), 'constant', 3) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Conv1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ------------------------------------------------------ ------------------------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv1d <built-in method conv1d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1,), (0,), (1,), 1) {}
output output output ((conv1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Conv2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ------------------------------------------------------ ------------------------------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv2d <built-in method conv2d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1, 1), (0, 0), (1, 1), 1) {}
output output output ((conv2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Conv3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ------------------------------------------------------ --------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv3d <built-in method conv3d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1, 1, 1), (0, 0, 0), (1, 1, 1), 1) {}
output output output ((conv3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConvTranspose1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- ---------------------------------------------------------------- ------------------------------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv_transpose1d <built-in method conv_transpose1d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1,), (0,), (0,), 1, (1,)) {}
output output output ((conv_transpose1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConvTranspose2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- ---------------------------------------------------------------- -------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv_transpose2d <built-in method conv_transpose2d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1, 1), (0, 0), (0, 0), 1, (1, 1)) {}
output output output ((conv_transpose2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ConvTranspose3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- ---------------------------------------------------------------- -------------------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function conv_transpose3d <built-in method conv_transpose3d of type object at 0x103ccc7e8> (l_args_0_, l__m___weight, l__m___bias, (1, 1, 1), (0, 0, 0), (0, 0, 0), 1, (1, 1, 1)) {}
output output output ((conv_transpose3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_CrossEntropyLoss_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ---------------------- ----------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_function cross_entropy <function cross_entropy at 0x105140430> (l_args_0_, l_args_1_) {'weight': None, 'ignore_index': -100, 'reduction': 'sum', 'label_smoothing': 0.0}
output output output ((cross_entropy,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ELU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------------- ----------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function elu <function elu at 0x10513e160> (l_args_0_, 2.0, False) {}
output output output ((elu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Embedding_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ----------------------------------- --------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
call_function embedding <function embedding at 0x10513ec10> (l_args_0_, l__m___weight, None, None, 2.0, False, False) {}
output output output ((embedding,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_FractionalMaxPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------------- ------------------------------------------------------ -------------------------------------------- -------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m____random_samples L__m____random_samples () {}
call_function fractional_max_pool2d <function boolean_dispatch.<locals>.fn at 0x1051369d0> (l_args_0_, (2, 2), None, (0.5, 0.5), False) {'_random_samples': l__m____random_samples}
output output output ((fractional_max_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_FractionalMaxPool3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------------- ------------------------------------------------------ ---------------------------------------------------- -------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m____random_samples L__m____random_samples () {}
call_function fractional_max_pool3d <function boolean_dispatch.<locals>.fn at 0x105136b80> (l_args_0_, (2, 2, 2), None, (0.5, 0.5, 0.5), False) {'_random_samples': l__m____random_samples}
output output output ((fractional_max_pool3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_GELU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------ ------------ -----------------------
placeholder l_args_0_ L_args_0_ () {}
call_function gelu <built-in function gelu> (l_args_0_,) {'approximate': 'none'}
output output output ((gelu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_GLU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------------- --------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function glu <function glu at 0x10513af70> (l_args_0_, -1) {}
output output output ((glu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_GRUCell_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- -------------------------------------------------------- -------------------------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_method unsqueeze unsqueeze (l_args_0_, 0) {}
call_method unsqueeze_1 unsqueeze (l_args_1_, 0) {}
get_attr l__m___weight_ih L__m___weight_ih () {}
get_attr l__m___weight_hh L__m___weight_hh () {}
get_attr l__m___bias_ih L__m___bias_ih () {}
get_attr l__m___bias_hh L__m___bias_hh () {}
call_function gru_cell <built-in method gru_cell of type object at 0x103ccc7e8> (unsqueeze, unsqueeze_1, l__m___weight_ih, l__m___weight_hh, l__m___bias_ih, l__m___bias_hh) {}
call_method squeeze squeeze (gru_cell, 0) {}
output output output ((squeeze,),) {}
inline_call []
stats [('calls_captured', 4), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_GRU_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_GRU_train_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_GaussianNLLLoss_cpu_float32 (__main__.TestTempCPU) ...
inline_call [('data dependent operator: aten._local_scalar_dense.default', 1)]
unimplemented [('data dependent operator: aten._local_scalar_dense.default', 1)]
expected failure
test_dynamo_inline_module_nn_GroupNorm_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ------------------------------------ ------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function group_norm <function group_norm at 0x105140040> (l_args_0_, 3, l__m___weight, l__m___bias, 0.001) {}
output output output ((group_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Hardshrink_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ---------------------------------------------------------- ---------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function hardshrink <built-in method hardshrink of type object at 0x103ccc7e8> (l_args_0_, 2.0) {}
output output output ((hardshrink,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Hardswish_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------------------- ------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function hardswish <function hardswish at 0x10513eaf0> (l_args_0_, False) {}
output output output ((hardswish,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Hardtanh_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------------- ----------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function hardtanh <function hardtanh at 0x10513e040> (l_args_0_, -1.0, 1.0, False) {}
output output output ((hardtanh,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm1d_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm1d_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm2d_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm2d_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm3d_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_InstanceNorm3d_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- --------------------------------------- ----------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function instance_norm <function instance_norm at 0x10513eee0> (l_args_0_, None, None, None, None, True, 0.3, 0.001) {}
output output output ((instance_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_L1Loss_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- --------------------------------- ---------------------- ---------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_function l1_loss <function l1_loss at 0x105140700> (l_args_0_, l_args_1_) {'reduction': 'mean'}
output output output ((l1_loss,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LPPool1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------------------- -------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function lp_pool1d <function lp_pool1d at 0x10513a430> (l_args_0_, 1.5, 2, None, False) {}
output output output ((lp_pool1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LPPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------------------- ----------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function lp_pool2d <function lp_pool2d at 0x10513a3a0> (l_args_0_, 2.0, 2, 2, False) {}
output output output ((lp_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LSTMCell_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- --------------------------------------------------------- ----------------------------------------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_0_ L_args_1_0_ () {}
placeholder l_args_1_1_ L_args_1_1_ () {}
call_method unsqueeze unsqueeze (l_args_0_, 0) {}
call_method unsqueeze_1 unsqueeze (l_args_1_0_, 0) {}
call_method unsqueeze_2 unsqueeze (l_args_1_1_, 0) {}
get_attr l__m___weight_ih L__m___weight_ih () {}
get_attr l__m___weight_hh L__m___weight_hh () {}
get_attr l__m___bias_ih L__m___bias_ih () {}
get_attr l__m___bias_hh L__m___bias_hh () {}
call_function lstm_cell <built-in method lstm_cell of type object at 0x103ccc7e8> (unsqueeze, (unsqueeze_1, unsqueeze_2), l__m___weight_ih, l__m___weight_hh, l__m___bias_ih, l__m___bias_hh) {}
call_function getitem <built-in function getitem> (lstm_cell, 0) {}
call_function getitem_1 <built-in function getitem> (lstm_cell, 1) {}
call_method squeeze squeeze (getitem, 0) {}
call_method squeeze_1 squeeze (getitem_1, 0) {}
output output output ((squeeze, squeeze_1),) {}
inline_call []
stats [('calls_captured', 8), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LSTM_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_LSTM_train_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_LayerNorm_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ------------------------------------ ---------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (l_args_0_, (5,), l__m___weight, l__m___bias, 0.001) {}
output output output ((layer_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LazyConv1d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LazyConv2d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LazyConv3d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LazyConvTranspose1d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LazyConvTranspose2d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LazyConvTranspose3d_cpu_float32 (__main__.TestTempCPU) ... skipped 'skipping lazy module'
test_dynamo_inline_module_nn_LeakyReLU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------ ------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function leaky_relu <function leaky_relu at 0x10513e310> (l_args_0_, 0.01, False) {}
output output output ((leaky_relu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Linear_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------------- -------------------------- --------------------------------------------- --------
placeholder l_kwargs_input_ L_kwargs_input_ () {}
get_attr l__m___weight L__m___weight () {}
get_attr l__m___bias L__m___bias () {}
call_function linear <built-in function linear> (l_kwargs_input_, l__m___weight, l__m___bias) {}
output output output ((linear,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LocalResponseNorm_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------- --------------------------------------------- --------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function local_response_norm <function local_response_norm at 0x1051400d0> (l_args_0_, 3, 0.0001, 0.75, 1.0) {}
output output output ((local_response_norm,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LogSigmoid_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ----------- ------------------------------- ----------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function log_sigmoid <built-in function log_sigmoid> (l_args_0_,) {}
output output output ((log_sigmoid,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_LogSoftmax_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ----------- ------------------------------------- ----------------- ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function log_softmax <function log_softmax at 0x10513e790> (l_args_0_, 1) {'_stacklevel': 5}
output output output ((log_softmax,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_MaxPool1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------------------------ ----------------------- ---------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function max_pool1d <function boolean_dispatch.<locals>.fn at 0x105136d30> (l_args_0_, 4, 4, 0, 1) {'ceil_mode': False, 'return_indices': False}
output output output ((max_pool1d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_MaxPool2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------------------------ -------------------------------------- ---------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function max_pool2d <function boolean_dispatch.<locals>.fn at 0x105136ee0> (l_args_0_, (3, 3), (2, 2), (1, 1), 1) {'ceil_mode': False, 'return_indices': False}
output output output ((max_pool2d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_MaxPool3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------------------------ --------------------------------------- ---------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function max_pool3d <function boolean_dispatch.<locals>.fn at 0x10513a0d0> (l_args_0_, (2, 2, 2), (2, 2, 2), 0, 1) {'ceil_mode': False, 'return_indices': False}
output output output ((max_pool3d,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Mish_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------ ------------ ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function mish <function mish at 0x10513ea60> (l_args_0_,) {'inplace': False}
output output output ((mish,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_MultiheadAttention_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------------------- ------------------------------------------------------ -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
placeholder l_args_2_ L_args_2_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'key_padding_mask', 'other_type': None, 'other_name': 'attn_mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'attn_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function getattr_1 <built-in function getattr> (l_args_0_, 'is_nested') {}
call_function getattr_2 <built-in function getattr> (l_args_1_, 'is_nested') {}
call_function getattr_3 <built-in function getattr> (l_args_2_, 'is_nested') {}
get_attr l__m___in_proj_weight L__m___in_proj_weight () {}
get_attr l__m___in_proj_bias L__m___in_proj_bias () {}
get_attr l__m___bias_k L__m___bias_k () {}
get_attr l__m___bias_v L__m___bias_v () {}
get_attr l__m___out_proj_weight L__m___out_proj_weight () {}
get_attr l__m___out_proj_bias L__m___out_proj_bias () {}
call_function multi_head_attention_forward <function multi_head_attention_forward at 0x1051cf310> (l_args_0_, l_args_1_, l_args_2_, 3, 3, l__m___in_proj_weight, l__m___in_proj_bias, l__m___bias_k, l__m___bias_v, True, 0.0, l__m___out_proj_weight, l__m___out_proj_bias) {'training': False, 'key_padding_mask': None, 'need_weights': True, 'attn_mask': None, 'average_attn_weights': True, 'is_causal': False}
call_function getitem <built-in function getitem> (multi_head_attention_forward, 0) {}
call_function getitem_1 <built-in function getitem> (multi_head_attention_forward, 1) {}
output output output ((getitem, getitem_1),) {}
inline_call []
stats [('calls_captured', 8), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_MultiheadAttention_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------------------- ------------------------------------------------------ -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
placeholder l_args_2_ L_args_2_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'key_padding_mask', 'other_type': None, 'other_name': 'attn_mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'attn_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function getattr_1 <built-in function getattr> (l_args_0_, 'is_nested') {}
call_function getattr_2 <built-in function getattr> (l_args_1_, 'is_nested') {}
call_function getattr_3 <built-in function getattr> (l_args_2_, 'is_nested') {}
get_attr l__m___in_proj_weight L__m___in_proj_weight () {}
get_attr l__m___in_proj_bias L__m___in_proj_bias () {}
get_attr l__m___bias_k L__m___bias_k () {}
get_attr l__m___bias_v L__m___bias_v () {}
get_attr l__m___out_proj_weight L__m___out_proj_weight () {}
get_attr l__m___out_proj_bias L__m___out_proj_bias () {}
call_function multi_head_attention_forward <function multi_head_attention_forward at 0x1051cf310> (l_args_0_, l_args_1_, l_args_2_, 3, 3, l__m___in_proj_weight, l__m___in_proj_bias, l__m___bias_k, l__m___bias_v, True, 0.0, l__m___out_proj_weight, l__m___out_proj_bias) {'training': True, 'key_padding_mask': None, 'need_weights': True, 'attn_mask': None, 'average_attn_weights': True, 'is_causal': False}
call_function getitem <built-in function getitem> (multi_head_attention_forward, 0) {}
call_function getitem_1 <built-in function getitem> (multi_head_attention_forward, 1) {}
output output output ((getitem, getitem_1),) {}
inline_call []
stats [('calls_captured', 8), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_NLLLoss_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------------- ---------------------- -----------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_function nll_loss <function nll_loss at 0x1051401f0> (l_args_0_, l_args_1_) {'weight': None, 'ignore_index': -100, 'reduction': 'mean'}
output output output ((nll_loss,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_PReLU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------- ----------------------------------------------------- -------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
get_attr l__m___weight L__m___weight () {}
call_function prelu <built-in method prelu of type object at 0x103ccc7e8> (l_args_0_, l__m___weight) {}
output output output ((prelu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_RNNCell_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------------- ------------------------------------------------------------- -------------------------------------------------------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_method unsqueeze unsqueeze (l_args_0_, 0) {}
call_method unsqueeze_1 unsqueeze (l_args_1_, 0) {}
get_attr l__m___weight_ih L__m___weight_ih () {}
get_attr l__m___weight_hh L__m___weight_hh () {}
get_attr l__m___bias_ih L__m___bias_ih () {}
get_attr l__m___bias_hh L__m___bias_hh () {}
call_function rnn_tanh_cell <built-in method rnn_tanh_cell of type object at 0x103ccc7e8> (unsqueeze, unsqueeze_1, l__m___weight_ih, l__m___weight_hh, l__m___bias_ih, l__m___bias_hh) {}
call_method squeeze squeeze (rnn_tanh_cell, 0) {}
output output output ((squeeze,),) {}
inline_call []
stats [('calls_captured', 4), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_RNN_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_RNN_train_mode_cpu_float32 (__main__.TestTempCPU) ...
unimplemented [('TorchDynamo purposely graph breaks on RNN, GRU, LSTMs', 1)]
expected failure
test_dynamo_inline_module_nn_ReLU6_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------------- ---------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function hardtanh <function hardtanh at 0x10513e040> (l_args_0_, 0.0, 6.0, False) {}
output output output ((hardtanh,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReLU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------ ------------ ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function relu <function relu at 0x10513aee0> (l_args_0_,) {'inplace': False}
output output output ((relu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReflectionPad1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1), 'reflect') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReflectionPad2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1), 'reflect') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReflectionPad3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1, 1, 1), 'reflect') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReplicationPad1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- -------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1), 'replicate') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReplicationPad2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- -------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1), 'replicate') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ReplicationPad3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- -------------------------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1, 1, 1), 'replicate') {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_SELU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------ ------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function selu <function selu at 0x10513e1f0> (l_args_0_, False) {}
output output output ((selu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_SiLU_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------ ------------ ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function silu <function silu at 0x10513e9d0> (l_args_0_,) {'inplace': False}
output output output ((silu,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Sigmoid_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ------------------------------------------------------- ------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function sigmoid <built-in method sigmoid of type object at 0x103ccc7e8> (l_args_0_,) {}
output output output ((sigmoid,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softmax2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- --------------------------------- --------------- ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function softmax <function softmax at 0x10513e670> (l_args_0_, -3) {'_stacklevel': 5}
output output output ((softmax,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softmax_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- --------------------------------- -------------- ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function softmax <function softmax at 0x10513e670> (l_args_0_, 1) {'_stacklevel': 5}
output output output ((softmax,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softmin_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- --------------------------------- -------------- ------------------
placeholder l_args_0_ L_args_0_ () {}
call_function softmin <function softmin at 0x10513e5e0> (l_args_0_, 1) {'_stacklevel': 5}
output output output ((softmin,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softplus_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------- ------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function softplus <built-in function softplus> (l_args_0_, 1, 20) {}
output output output ((softplus,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softshrink_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------ ---------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function softshrink <built-in function softshrink> (l_args_0_, 0.5) {}
output output output ((softshrink,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Softsign_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------------- -------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function softsign <function softsign at 0x10513e4c0> (l_args_0_,) {}
output output output ((softsign,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Tanh_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ---------------------------------------------------- ------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function tanh <built-in method tanh of type object at 0x103ccc7e8> (l_args_0_,) {}
output output output ((tanh,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Tanhshrink_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------ ---------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function tanhshrink <function tanhshrink at 0x10513e430> (l_args_0_,) {}
output output output ((tanhshrink,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_Threshold_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ---------- ------------------------------------ ---------------------------- --------
placeholder l_args_0_ L_args_0_ () {}
call_function _threshold <function _threshold at 0x10513ae50> (l_args_0_, 2.0, 1.0, False) {}
output output output ((_threshold,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_TransformerDecoderLayer_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------------------- ------------------------------------ ------------------------------------------------------------ ----------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_module l__m___self_attn L__m___self_attn (l_args_0_, l_args_0_, l_args_0_) {'attn_mask': None, 'key_padding_mask': None, 'is_causal': False, 'need_weights': False}
call_function getitem <built-in function getitem> (l__m___self_attn, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, True, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___norm1_weight L__m___norm1_weight () {}
get_attr l__m___norm1_bias L__m___norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (add, (4,), l__m___norm1_weight, l__m___norm1_bias, 1e-05) {}
call_module l__m___multihead_attn L__m___multihead_attn (layer_norm, l_args_1_, l_args_1_) {'attn_mask': None, 'key_padding_mask': None, 'is_causal': False, 'need_weights': False}
call_function getitem_1 <built-in function getitem> (l__m___multihead_attn, 0) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (getitem_1, 0.0, True, False) {}
call_function add_1 <built-in function add> (layer_norm, dropout_1) {}
get_attr l__m___norm2_weight L__m___norm2_weight () {}
get_attr l__m___norm2_bias L__m___norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___norm2_weight, l__m___norm2_bias, 1e-05) {}
get_attr l__m___linear1_weight L__m___linear1_weight () {}
get_attr l__m___linear1_bias L__m___linear1_bias () {}
call_function linear <built-in function linear> (layer_norm_1, l__m___linear1_weight, l__m___linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (relu, 0.0, True, False) {}
get_attr l__m___linear2_weight L__m___linear2_weight () {}
get_attr l__m___linear2_bias L__m___linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_2, l__m___linear2_weight, l__m___linear2_bias) {}
call_function dropout_3 <function dropout at 0x10513aaf0> (linear_1, 0.0, True, False) {}
call_function add_2 <built-in function add> (layer_norm_1, dropout_3) {}
get_attr l__m___norm3_weight L__m___norm3_weight () {}
get_attr l__m___norm3_bias L__m___norm3_bias () {}
call_function layer_norm_2 <function layer_norm at 0x10513ef70> (add_2, (4,), l__m___norm3_weight, l__m___norm3_bias, 1e-05) {}
output output output ((layer_norm_2,),) {}
inline_call [('comparison TensorVariable() <built-in function is_not> TensorVariable()', 2)]
stats [('calls_captured', 17), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_TransformerEncoderLayer_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------------------- ----------------------------------------- ------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___self_attn L__m___self_attn (l_args_0_, l_args_0_, l_args_0_) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem <built-in function getitem> (l__m___self_attn, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, False, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___norm1_weight L__m___norm1_weight () {}
get_attr l__m___norm1_bias L__m___norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (add, (4,), l__m___norm1_weight, l__m___norm1_bias, 1e-05) {}
get_attr l__m___linear1_weight L__m___linear1_weight () {}
get_attr l__m___linear1_bias L__m___linear1_bias () {}
call_function linear <built-in function linear> (layer_norm, l__m___linear1_weight, l__m___linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (relu, 0.0, False, False) {}
get_attr l__m___linear2_weight L__m___linear2_weight () {}
get_attr l__m___linear2_bias L__m___linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_1, l__m___linear2_weight, l__m___linear2_bias) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (linear_1, 0.0, False, False) {}
call_function add_1 <built-in function add> (layer_norm, dropout_2) {}
get_attr l__m___norm2_weight L__m___norm2_weight () {}
get_attr l__m___norm2_bias L__m___norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___norm2_weight, l__m___norm2_bias, 1e-05) {}
output output output ((layer_norm_1,),) {}
inline_call [('comparison TensorVariable() <built-in function is_not> TensorVariable()', 1)]
stats [('calls_captured', 14), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_TransformerEncoderLayer_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------------------- ----------------------------------------- ------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___self_attn L__m___self_attn (l_args_0_, l_args_0_, l_args_0_) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem <built-in function getitem> (l__m___self_attn, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, True, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___norm1_weight L__m___norm1_weight () {}
get_attr l__m___norm1_bias L__m___norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (add, (4,), l__m___norm1_weight, l__m___norm1_bias, 1e-05) {}
get_attr l__m___linear1_weight L__m___linear1_weight () {}
get_attr l__m___linear1_bias L__m___linear1_bias () {}
call_function linear <built-in function linear> (layer_norm, l__m___linear1_weight, l__m___linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (relu, 0.0, True, False) {}
get_attr l__m___linear2_weight L__m___linear2_weight () {}
get_attr l__m___linear2_bias L__m___linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_1, l__m___linear2_weight, l__m___linear2_bias) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (linear_1, 0.0, True, False) {}
call_function add_1 <built-in function add> (layer_norm, dropout_2) {}
get_attr l__m___norm2_weight L__m___norm2_weight () {}
get_attr l__m___norm2_bias L__m___norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___norm2_weight, l__m___norm2_bias, 1e-05) {}
output output output ((layer_norm_1,),) {}
inline_call [('comparison TensorVariable() <built-in function is_not> TensorVariable()', 1)]
stats [('calls_captured', 14), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_TransformerEncoder_eval_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------------------ ----------------------------------------- ------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function _canonical_mask_2 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_3 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___layers_0_self_attn L__m___layers_0_self_attn (l_args_0_, l_args_0_, l_args_0_) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem <built-in function getitem> (l__m___layers_0_self_attn, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, False, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___layers_0_norm1_weight L__m___layers_0_norm1_weight () {}
get_attr l__m___layers_0_norm1_bias L__m___layers_0_norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (add, (4,), l__m___layers_0_norm1_weight, l__m___layers_0_norm1_bias, 1e-05) {}
get_attr l__m___layers_0_linear1_weight L__m___layers_0_linear1_weight () {}
get_attr l__m___layers_0_linear1_bias L__m___layers_0_linear1_bias () {}
call_function linear <built-in function linear> (layer_norm, l__m___layers_0_linear1_weight, l__m___layers_0_linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (relu, 0.0, False, False) {}
get_attr l__m___layers_0_linear2_weight L__m___layers_0_linear2_weight () {}
get_attr l__m___layers_0_linear2_bias L__m___layers_0_linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_1, l__m___layers_0_linear2_weight, l__m___layers_0_linear2_bias) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (linear_1, 0.0, False, False) {}
call_function add_1 <built-in function add> (layer_norm, dropout_2) {}
get_attr l__m___layers_0_norm2_weight L__m___layers_0_norm2_weight () {}
get_attr l__m___layers_0_norm2_bias L__m___layers_0_norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___layers_0_norm2_weight, l__m___layers_0_norm2_bias, 1e-05) {}
call_function _canonical_mask_6 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_7 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___layers_1_self_attn L__m___layers_1_self_attn (layer_norm_1, layer_norm_1, layer_norm_1) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem_1 <built-in function getitem> (l__m___layers_1_self_attn, 0) {}
call_function dropout_3 <function dropout at 0x10513aaf0> (getitem_1, 0.0, False, False) {}
call_function add_2 <built-in function add> (layer_norm_1, dropout_3) {}
get_attr l__m___layers_1_norm1_weight L__m___layers_1_norm1_weight () {}
get_attr l__m___layers_1_norm1_bias L__m___layers_1_norm1_bias () {}
call_function layer_norm_2 <function layer_norm at 0x10513ef70> (add_2, (4,), l__m___layers_1_norm1_weight, l__m___layers_1_norm1_bias, 1e-05) {}
get_attr l__m___layers_1_linear1_weight L__m___layers_1_linear1_weight () {}
get_attr l__m___layers_1_linear1_bias L__m___layers_1_linear1_bias () {}
call_function linear_2 <built-in function linear> (layer_norm_2, l__m___layers_1_linear1_weight, l__m___layers_1_linear1_bias) {}
call_function relu_1 <function relu at 0x10513aee0> (linear_2,) {}
call_function dropout_4 <function dropout at 0x10513aaf0> (relu_1, 0.0, False, False) {}
get_attr l__m___layers_1_linear2_weight L__m___layers_1_linear2_weight () {}
get_attr l__m___layers_1_linear2_bias L__m___layers_1_linear2_bias () {}
call_function linear_3 <built-in function linear> (dropout_4, l__m___layers_1_linear2_weight, l__m___layers_1_linear2_bias) {}
call_function dropout_5 <function dropout at 0x10513aaf0> (linear_3, 0.0, False, False) {}
call_function add_3 <built-in function add> (layer_norm_2, dropout_5) {}
get_attr l__m___layers_1_norm2_weight L__m___layers_1_norm2_weight () {}
get_attr l__m___layers_1_norm2_bias L__m___layers_1_norm2_bias () {}
call_function layer_norm_3 <function layer_norm at 0x10513ef70> (add_3, (4,), l__m___layers_1_norm2_weight, l__m___layers_1_norm2_bias, 1e-05) {}
output output output ((layer_norm_3,),) {}
inline_call [('comparison TensorVariable() <built-in function is_not> TensorVariable()', 2)]
stats [('calls_captured', 30), ('unique_graphs', 1)]
unexpected success
test_dynamo_inline_module_nn_TransformerEncoder_train_mode_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------------------ ----------------------------------------- ------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function _canonical_mask_2 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_3 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___layers_0_self_attn L__m___layers_0_self_attn (l_args_0_, l_args_0_, l_args_0_) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem <built-in function getitem> (l__m___layers_0_self_attn, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, True, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___layers_0_norm1_weight L__m___layers_0_norm1_weight () {}
get_attr l__m___layers_0_norm1_bias L__m___layers_0_norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (add, (4,), l__m___layers_0_norm1_weight, l__m___layers_0_norm1_bias, 1e-05) {}
get_attr l__m___layers_0_linear1_weight L__m___layers_0_linear1_weight () {}
get_attr l__m___layers_0_linear1_bias L__m___layers_0_linear1_bias () {}
call_function linear <built-in function linear> (layer_norm, l__m___layers_0_linear1_weight, l__m___layers_0_linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (relu, 0.0, True, False) {}
get_attr l__m___layers_0_linear2_weight L__m___layers_0_linear2_weight () {}
get_attr l__m___layers_0_linear2_bias L__m___layers_0_linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_1, l__m___layers_0_linear2_weight, l__m___layers_0_linear2_bias) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (linear_1, 0.0, True, False) {}
call_function add_1 <built-in function add> (layer_norm, dropout_2) {}
get_attr l__m___layers_0_norm2_weight L__m___layers_0_norm2_weight () {}
get_attr l__m___layers_0_norm2_bias L__m___layers_0_norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___layers_0_norm2_weight, l__m___layers_0_norm2_bias, 1e-05) {}
call_function _canonical_mask_6 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_7 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_module l__m___layers_1_self_attn L__m___layers_1_self_attn (layer_norm_1, layer_norm_1, layer_norm_1) {'attn_mask': None, 'key_padding_mask': None, 'need_weights': False, 'is_causal': False}
call_function getitem_1 <built-in function getitem> (l__m___layers_1_self_attn, 0) {}
call_function dropout_3 <function dropout at 0x10513aaf0> (getitem_1, 0.0, True, False) {}
call_function add_2 <built-in function add> (layer_norm_1, dropout_3) {}
get_attr l__m___layers_1_norm1_weight L__m___layers_1_norm1_weight () {}
get_attr l__m___layers_1_norm1_bias L__m___layers_1_norm1_bias () {}
call_function layer_norm_2 <function layer_norm at 0x10513ef70> (add_2, (4,), l__m___layers_1_norm1_weight, l__m___layers_1_norm1_bias, 1e-05) {}
get_attr l__m___layers_1_linear1_weight L__m___layers_1_linear1_weight () {}
get_attr l__m___layers_1_linear1_bias L__m___layers_1_linear1_bias () {}
call_function linear_2 <built-in function linear> (layer_norm_2, l__m___layers_1_linear1_weight, l__m___layers_1_linear1_bias) {}
call_function relu_1 <function relu at 0x10513aee0> (linear_2,) {}
call_function dropout_4 <function dropout at 0x10513aaf0> (relu_1, 0.0, True, False) {}
get_attr l__m___layers_1_linear2_weight L__m___layers_1_linear2_weight () {}
get_attr l__m___layers_1_linear2_bias L__m___layers_1_linear2_bias () {}
call_function linear_3 <built-in function linear> (dropout_4, l__m___layers_1_linear2_weight, l__m___layers_1_linear2_bias) {}
call_function dropout_5 <function dropout at 0x10513aaf0> (linear_3, 0.0, True, False) {}
call_function add_3 <built-in function add> (layer_norm_2, dropout_5) {}
get_attr l__m___layers_1_norm2_weight L__m___layers_1_norm2_weight () {}
get_attr l__m___layers_1_norm2_bias L__m___layers_1_norm2_bias () {}
call_function layer_norm_3 <function layer_norm at 0x10513ef70> (add_3, (4,), l__m___layers_1_norm2_weight, l__m___layers_1_norm2_bias, 1e-05) {}
output output output ((layer_norm_3,),) {}
inline_call [('comparison TensorVariable() <built-in function is_not> TensorVariable()', 2)]
stats [('calls_captured', 30), ('unique_graphs', 1)]
unexpected success
test_dynamo_inline_module_nn_Transformer_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- ------------------------------------------------------ ------------------------------------------------------ -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------
placeholder l_args_0_ L_args_0_ () {}
placeholder l_args_1_ L_args_1_ () {}
call_function _canonical_mask <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'mask', 'target_type': torch.float32}
call_function _canonical_mask_1 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function _canonical_mask_2 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_key_padding_mask', 'other_type': None, 'other_name': 'src_mask', 'target_type': torch.float32}
call_function _canonical_mask_3 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'src_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
get_attr l__m___encoder_layers_0_norm1_weight L__m___encoder_layers_0_norm1_weight () {}
get_attr l__m___encoder_layers_0_norm1_bias L__m___encoder_layers_0_norm1_bias () {}
call_function layer_norm <function layer_norm at 0x10513ef70> (l_args_0_, (4,), l__m___encoder_layers_0_norm1_weight, l__m___encoder_layers_0_norm1_bias, 1e-05) {}
call_function _canonical_mask_4 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'key_padding_mask', 'other_type': None, 'other_name': 'attn_mask', 'target_type': torch.float32}
call_function _canonical_mask_5 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'attn_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function getattr_1 <built-in function getattr> (layer_norm, 'is_nested') {}
call_function getattr_2 <built-in function getattr> (layer_norm, 'is_nested') {}
call_function getattr_3 <built-in function getattr> (layer_norm, 'is_nested') {}
get_attr l__m___encoder_layers_0_self_attn_in_proj_weight L__m___encoder_layers_0_self_attn_in_proj_weight () {}
get_attr l__m___encoder_layers_0_self_attn_in_proj_bias L__m___encoder_layers_0_self_attn_in_proj_bias () {}
get_attr l__m___encoder_layers_0_self_attn_out_proj_weight L__m___encoder_layers_0_self_attn_out_proj_weight () {}
get_attr l__m___encoder_layers_0_self_attn_out_proj_bias L__m___encoder_layers_0_self_attn_out_proj_bias () {}
call_function multi_head_attention_forward <function multi_head_attention_forward at 0x1051cf310> (layer_norm, layer_norm, layer_norm, 4, 2, l__m___encoder_layers_0_self_attn_in_proj_weight, l__m___encoder_layers_0_self_attn_in_proj_bias, None, None, False, 0.0, l__m___encoder_layers_0_self_attn_out_proj_weight, l__m___encoder_layers_0_self_attn_out_proj_bias) {'training': True, 'key_padding_mask': None, 'need_weights': False, 'attn_mask': None, 'average_attn_weights': True, 'is_causal': False}
call_function getitem <built-in function getitem> (multi_head_attention_forward, 0) {}
call_function dropout <function dropout at 0x10513aaf0> (getitem, 0.0, True, False) {}
call_function add <built-in function add> (l_args_0_, dropout) {}
get_attr l__m___encoder_layers_0_norm2_weight L__m___encoder_layers_0_norm2_weight () {}
get_attr l__m___encoder_layers_0_norm2_bias L__m___encoder_layers_0_norm2_bias () {}
call_function layer_norm_1 <function layer_norm at 0x10513ef70> (add, (4,), l__m___encoder_layers_0_norm2_weight, l__m___encoder_layers_0_norm2_bias, 1e-05) {}
get_attr l__m___encoder_layers_0_linear1_weight L__m___encoder_layers_0_linear1_weight () {}
get_attr l__m___encoder_layers_0_linear1_bias L__m___encoder_layers_0_linear1_bias () {}
call_function linear <built-in function linear> (layer_norm_1, l__m___encoder_layers_0_linear1_weight, l__m___encoder_layers_0_linear1_bias) {}
call_function relu <function relu at 0x10513aee0> (linear,) {}
call_function dropout_1 <function dropout at 0x10513aaf0> (relu, 0.0, True, False) {}
get_attr l__m___encoder_layers_0_linear2_weight L__m___encoder_layers_0_linear2_weight () {}
get_attr l__m___encoder_layers_0_linear2_bias L__m___encoder_layers_0_linear2_bias () {}
call_function linear_1 <built-in function linear> (dropout_1, l__m___encoder_layers_0_linear2_weight, l__m___encoder_layers_0_linear2_bias) {}
call_function dropout_2 <function dropout at 0x10513aaf0> (linear_1, 0.0, True, False) {}
call_function add_1 <built-in function add> (add, dropout_2) {}
get_attr l__m___encoder_norm_weight L__m___encoder_norm_weight () {}
get_attr l__m___encoder_norm_bias L__m___encoder_norm_bias () {}
call_function layer_norm_2 <function layer_norm at 0x10513ef70> (add_1, (4,), l__m___encoder_norm_weight, l__m___encoder_norm_bias, 1e-05) {}
get_attr l__m___decoder_layers_0_norm1_weight L__m___decoder_layers_0_norm1_weight () {}
get_attr l__m___decoder_layers_0_norm1_bias L__m___decoder_layers_0_norm1_bias () {}
call_function layer_norm_3 <function layer_norm at 0x10513ef70> (l_args_1_, (4,), l__m___decoder_layers_0_norm1_weight, l__m___decoder_layers_0_norm1_bias, 1e-05) {}
call_function _canonical_mask_6 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'key_padding_mask', 'other_type': None, 'other_name': 'attn_mask', 'target_type': torch.float32}
call_function _canonical_mask_7 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'attn_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function getattr_4 <built-in function getattr> (layer_norm_3, 'is_nested') {}
call_function getattr_5 <built-in function getattr> (layer_norm_3, 'is_nested') {}
call_function getattr_6 <built-in function getattr> (layer_norm_3, 'is_nested') {}
get_attr l__m___decoder_layers_0_self_attn_in_proj_weight L__m___decoder_layers_0_self_attn_in_proj_weight () {}
get_attr l__m___decoder_layers_0_self_attn_in_proj_bias L__m___decoder_layers_0_self_attn_in_proj_bias () {}
get_attr l__m___decoder_layers_0_self_attn_out_proj_weight L__m___decoder_layers_0_self_attn_out_proj_weight () {}
get_attr l__m___decoder_layers_0_self_attn_out_proj_bias L__m___decoder_layers_0_self_attn_out_proj_bias () {}
call_function multi_head_attention_forward_1 <function multi_head_attention_forward at 0x1051cf310> (layer_norm_3, layer_norm_3, layer_norm_3, 4, 2, l__m___decoder_layers_0_self_attn_in_proj_weight, l__m___decoder_layers_0_self_attn_in_proj_bias, None, None, False, 0.0, l__m___decoder_layers_0_self_attn_out_proj_weight, l__m___decoder_layers_0_self_attn_out_proj_bias) {'training': True, 'key_padding_mask': None, 'need_weights': False, 'attn_mask': None, 'average_attn_weights': True, 'is_causal': False}
call_function getitem_1 <built-in function getitem> (multi_head_attention_forward_1, 0) {}
call_function dropout_3 <function dropout at 0x10513aaf0> (getitem_1, 0.0, True, False) {}
call_function add_2 <built-in function add> (l_args_1_, dropout_3) {}
get_attr l__m___decoder_layers_0_norm2_weight L__m___decoder_layers_0_norm2_weight () {}
get_attr l__m___decoder_layers_0_norm2_bias L__m___decoder_layers_0_norm2_bias () {}
call_function layer_norm_4 <function layer_norm at 0x10513ef70> (add_2, (4,), l__m___decoder_layers_0_norm2_weight, l__m___decoder_layers_0_norm2_bias, 1e-05) {}
call_function _canonical_mask_8 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'key_padding_mask', 'other_type': None, 'other_name': 'attn_mask', 'target_type': torch.float32}
call_function _canonical_mask_9 <function _canonical_mask at 0x1051cf1f0> () {'mask': None, 'mask_name': 'attn_mask', 'other_type': None, 'other_name': '', 'target_type': torch.float32, 'check_other': False}
call_function getattr_7 <built-in function getattr> (layer_norm_4, 'is_nested') {}
call_function getattr_8 <built-in function getattr> (layer_norm_2, 'is_nested') {}
call_function getattr_9 <built-in function getattr> (layer_norm_2, 'is_nested') {}
get_attr l__m___decoder_layers_0_multihead_attn_in_proj_weight L__m___decoder_layers_0_multihead_attn_in_proj_weight () {}
get_attr l__m___decoder_layers_0_multihead_attn_in_proj_bias L__m___decoder_layers_0_multihead_attn_in_proj_bias () {}
get_attr l__m___decoder_layers_0_multihead_attn_out_proj_weight L__m___decoder_layers_0_multihead_attn_out_proj_weight () {}
get_attr l__m___decoder_layers_0_multihead_attn_out_proj_bias L__m___decoder_layers_0_multihead_attn_out_proj_bias () {}
call_function multi_head_attention_forward_2 <function multi_head_attention_forward at 0x1051cf310> (layer_norm_4, layer_norm_2, layer_norm_2, 4, 2, l__m___decoder_layers_0_multihead_attn_in_proj_weight, l__m___decoder_layers_0_multihead_attn_in_proj_bias, None, None, False, 0.0, l__m___decoder_layers_0_multihead_attn_out_proj_weight, l__m___decoder_layers_0_multihead_attn_out_proj_bias) {'training': True, 'key_padding_mask': None, 'need_weights': False, 'attn_mask': None, 'average_attn_weights': True, 'is_causal': False}
call_function getitem_2 <built-in function getitem> (multi_head_attention_forward_2, 0) {}
call_function dropout_4 <function dropout at 0x10513aaf0> (getitem_2, 0.0, True, False) {}
call_function add_3 <built-in function add> (add_2, dropout_4) {}
get_attr l__m___decoder_layers_0_norm3_weight L__m___decoder_layers_0_norm3_weight () {}
get_attr l__m___decoder_layers_0_norm3_bias L__m___decoder_layers_0_norm3_bias () {}
call_function layer_norm_5 <function layer_norm at 0x10513ef70> (add_3, (4,), l__m___decoder_layers_0_norm3_weight, l__m___decoder_layers_0_norm3_bias, 1e-05) {}
get_attr l__m___decoder_layers_0_linear1_weight L__m___decoder_layers_0_linear1_weight () {}
get_attr l__m___decoder_layers_0_linear1_bias L__m___decoder_layers_0_linear1_bias () {}
call_function linear_2 <built-in function linear> (layer_norm_5, l__m___decoder_layers_0_linear1_weight, l__m___decoder_layers_0_linear1_bias) {}
call_function relu_1 <function relu at 0x10513aee0> (linear_2,) {}
call_function dropout_5 <function dropout at 0x10513aaf0> (relu_1, 0.0, True, False) {}
get_attr l__m___decoder_layers_0_linear2_weight L__m___decoder_layers_0_linear2_weight () {}
get_attr l__m___decoder_layers_0_linear2_bias L__m___decoder_layers_0_linear2_bias () {}
call_function linear_3 <built-in function linear> (dropout_5, l__m___decoder_layers_0_linear2_weight, l__m___decoder_layers_0_linear2_bias) {}
call_function dropout_6 <function dropout at 0x10513aaf0> (linear_3, 0.0, True, False) {}
call_function add_4 <built-in function add> (add_3, dropout_6) {}
get_attr l__m___decoder_norm_weight L__m___decoder_norm_weight () {}
get_attr l__m___decoder_norm_bias L__m___decoder_norm_bias () {}
call_function layer_norm_6 <function layer_norm at 0x10513ef70> (add_4, (4,), l__m___decoder_norm_weight, l__m___decoder_norm_bias, 1e-05) {}
output output output ((layer_norm_6,),) {}
inline_call []
stats [('calls_captured', 50), ('unique_graphs', 1)]
unexpected success
test_dynamo_inline_module_nn_ZeroPad1d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1), 'constant', 0.0) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ZeroPad2d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1), 'constant', 0.0) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
test_dynamo_inline_module_nn_ZeroPad3d_cpu_float32 (__main__.TestTempCPU) ...
opcode name target args kwargs
------------- --------- ----------------------- ------------------------------------------------ --------
placeholder l_args_0_ L_args_0_ () {}
call_function pad <built-in function pad> (l_args_0_, (1, 1, 1, 1, 1, 1), 'constant', 0.0) {}
output output output ((pad,),) {}
inline_call []
stats [('calls_captured', 1), ('unique_graphs', 1)]
ok
----------------------------------------------------------------------
Ran 105 tests in 1.530s
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment