Created
February 4, 2025 02:37
-
-
Save swolchok/8719569204620d04059c8d8bf10b311d to your computer and use it in GitHub Desktop.
generated code being checked in backends/arm/test/models/test_conformer.py test_conformer_tosa_MI
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
new code | |
def forward(self, b_conformer_layers_0_conv_module_sequential_3_num_batches_tracked, b_conformer_layers_1_conv_module_sequential_3_num_batches_tracked, input, lengths): input_1 = input | |
aten_arange_start_step = executorch_exir_dialects_edge__ops_aten_arange_start_step(0, 97, dtype = torch.int32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor = torch.ops.aten.scalar_tensor.default(-inf, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
scalar_tensor_1 = torch.ops.aten.scalar_tensor.default(-inf, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
lowered_module_0 = self.lowered_module_0 | |
lowered_module_1 = self.lowered_module_1 | |
lowered_module_2 = self.lowered_module_2 | |
lowered_module_3 = self.lowered_module_3 | |
aten_max_default = executorch_exir_dialects_edge__ops_aten_max_default(lengths) | |
lowered_module_4 = self.lowered_module_4 | |
executorch_call_delegate_4 = torch.ops.higher_order.executorch_call_delegate(lowered_module_4, input_1, lengths, aten_arange_start_step); lowered_module_4 = input_1 = aten_arange_start_step = None | |
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(aten_max_default); aten_max_default = None | |
eq_1 = _local_scalar_dense == 97 | |
_assert_scalar_9 = torch.ops.aten._assert_scalar.default(eq_1, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq'"); eq_1 = _assert_scalar_9 = None | |
le_3 = _local_scalar_dense <= 97 | |
_assert_scalar_8 = torch.ops.aten._assert_scalar.default(le_3, "Runtime assertion failed for expression u0 <= 97 on node 'le'"); le_3 = _assert_scalar_8 = None | |
ge_3 = _local_scalar_dense >= 97 | |
_assert_scalar_7 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u0 >= 97 on node 'ge_1'"); ge_3 = _assert_scalar_7 = None | |
eq = _local_scalar_dense == 97 | |
_assert_scalar_6 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq_1'"); eq = _assert_scalar_6 = None | |
le_2 = _local_scalar_dense <= 97 | |
_assert_scalar_5 = torch.ops.aten._assert_scalar.default(le_2, "Runtime assertion failed for expression u0 <= 97 on node 'le_1'"); le_2 = _assert_scalar_5 = None | |
ge_2 = _local_scalar_dense >= 97 | |
_assert_scalar_4 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 97 on node 'ge_2'"); ge_2 = _assert_scalar_4 = None | |
le_1 = _local_scalar_dense <= 97 | |
_assert_scalar_3 = torch.ops.aten._assert_scalar.default(le_1, "Runtime assertion failed for expression u4 <= 97 on node 'le_2'"); le_1 = _assert_scalar_3 = None | |
ge_1 = _local_scalar_dense >= 97 | |
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u4 >= 97 on node 'ge_3'"); ge_1 = _assert_scalar_2 = None | |
le = _local_scalar_dense <= 97 | |
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u4 <= 97 on node 'le_3'"); le = _assert_scalar_1 = None | |
ge = _local_scalar_dense >= 97 | |
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u4 >= 97 on node 'ge_4'"); ge = _assert_scalar = None | |
sym_constrain_range_for_size = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense); _local_scalar_dense = sym_constrain_range_for_size = None | |
getitem_21 = executorch_call_delegate_4[0] | |
getitem_22 = executorch_call_delegate_4[1] | |
getitem_23 = executorch_call_delegate_4[2] | |
getitem_24 = executorch_call_delegate_4[3] | |
getitem_25 = executorch_call_delegate_4[4]; executorch_call_delegate_4 = None | |
aten_full_like_default = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_21, 0, dtype = torch.float32, pin_memory = False, memory_format = torch.preserve_format) | |
aten_full_like_default_2 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_21, 0, dtype = torch.float32, pin_memory = False, memory_format = torch.preserve_format) | |
aten_mul_scalar = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_24, 0.7071067811865476); getitem_24 = None | |
aten_mul_scalar_1 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_25, 0.7071067811865476); getitem_25 = None | |
aten_where_self = executorch_exir_dialects_edge__ops_aten_where_self(getitem_21, scalar_tensor, aten_full_like_default); scalar_tensor = aten_full_like_default = None | |
aten_where_self_2 = executorch_exir_dialects_edge__ops_aten_where_self(getitem_21, scalar_tensor_1, aten_full_like_default_2); getitem_21 = scalar_tensor_1 = aten_full_like_default_2 = None | |
executorch_call_delegate_2 = torch.ops.higher_order.executorch_call_delegate(lowered_module_2, aten_where_self, aten_mul_scalar, aten_mul_scalar_1); lowered_module_2 = aten_where_self = aten_mul_scalar = aten_mul_scalar_1 = None | |
getitem_15 = executorch_call_delegate_2[0] | |
getitem_16 = executorch_call_delegate_2[1]; executorch_call_delegate_2 = None | |
aten_eq_scalar = executorch_exir_dialects_edge__ops_aten_eq_Scalar(getitem_15, -inf); getitem_15 = None | |
aten_full_like_default_1 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_16, 0, pin_memory = False, memory_format = torch.preserve_format) | |
aten_logical_not_default = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_eq_scalar); aten_eq_scalar = None | |
aten_any_dim = executorch_exir_dialects_edge__ops_aten_any_dim(aten_logical_not_default, -1, True); aten_logical_not_default = None | |
aten_logical_not_default_1 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_any_dim); aten_any_dim = None | |
aten_where_self_1 = executorch_exir_dialects_edge__ops_aten_where_self(aten_logical_not_default_1, aten_full_like_default_1, getitem_16); aten_logical_not_default_1 = aten_full_like_default_1 = getitem_16 = None | |
executorch_call_delegate_3 = torch.ops.higher_order.executorch_call_delegate(lowered_module_3, getitem_23, aten_where_self_1, getitem_22); lowered_module_3 = getitem_23 = aten_where_self_1 = getitem_22 = None | |
getitem_17 = executorch_call_delegate_3[0] | |
getitem_18 = executorch_call_delegate_3[1] | |
getitem_19 = executorch_call_delegate_3[2] | |
getitem_20 = executorch_call_delegate_3[3]; executorch_call_delegate_3 = None | |
aten_mul_scalar_2 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_19, 0.7071067811865476); getitem_19 = None | |
aten_mul_scalar_3 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_20, 0.7071067811865476); getitem_20 = None | |
executorch_call_delegate = torch.ops.higher_order.executorch_call_delegate(lowered_module_0, aten_where_self_2, aten_mul_scalar_2, aten_mul_scalar_3); lowered_module_0 = aten_where_self_2 = aten_mul_scalar_2 = aten_mul_scalar_3 = None | |
getitem_12 = executorch_call_delegate[0] | |
getitem_13 = executorch_call_delegate[1]; executorch_call_delegate = None | |
aten_eq_scalar_1 = executorch_exir_dialects_edge__ops_aten_eq_Scalar(getitem_12, -inf); getitem_12 = None | |
aten_full_like_default_3 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_13, 0, pin_memory = False, memory_format = torch.preserve_format) | |
aten_logical_not_default_2 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_eq_scalar_1); aten_eq_scalar_1 = None | |
aten_any_dim_1 = executorch_exir_dialects_edge__ops_aten_any_dim(aten_logical_not_default_2, -1, True); aten_logical_not_default_2 = None | |
aten_logical_not_default_3 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_any_dim_1); aten_any_dim_1 = None aten_where_self_3 = executorch_exir_dialects_edge__ops_aten_where_self(aten_logical_not_default_3, aten_full_like_default_3, getitem_13); aten_logical_not_default_3 = aten_full_like_default_3 = getitem_13 = None | |
executorch_call_delegate_1 = torch.ops.higher_order.executorch_call_delegate(lowered_module_1, getitem_18, aten_where_self_3, getitem_17); lowered_module_1 = getitem_18 = aten_where_self_3 = getitem_17 = None | |
getitem_14 = executorch_call_delegate_1[0]; executorch_call_delegate_1 = None | |
return (getitem_14, lengths) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def forward(self, b_conformer_layers_0_conv_module_sequential_3_num_batches_tracked, b_conformer_layers_1_conv_module_sequential_3_num_batches_tracked, input, lengths): | |
input_1 = input | |
aten_arange_start_step = executorch_exir_dialects_edge__ops_aten_arange_start_step(0, 97, dtype = torch.int32, layout = torch.strided, device = device(type='cpu'), pin_memory = False) | |
scalar_tensor = torch.ops.aten.scalar_tensor.default(-inf, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
scalar_tensor_1 = torch.ops.aten.scalar_tensor.default(-inf, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')) | |
lowered_module_0 = self.lowered_module_0 | |
lowered_module_1 = self.lowered_module_1 | |
lowered_module_2 = self.lowered_module_2 | |
lowered_module_3 = self.lowered_module_3 | |
aten_max_default = executorch_exir_dialects_edge__ops_aten_max_default(lengths) | |
lowered_module_4 = self.lowered_module_4 | |
executorch_call_delegate_4 = torch.ops.higher_order.executorch_call_delegate(lowered_module_4, input_1, lengths, aten_arange_start_step); lowered_module_4 = input_1 = aten_arange_start_step = None | |
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(aten_max_default); aten_max_default = None | |
eq_3 = _local_scalar_dense == 97 | |
_assert_scalar_11 = torch.ops.aten._assert_scalar.default(eq_3, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq'"); eq_3 = _assert_scalar_11 = None | |
le_3 = _local_scalar_dense <= 97 | |
_assert_scalar_10 = torch.ops.aten._assert_scalar.default(le_3, "Runtime assertion failed for expression u0 <= 97 on node 'le'"); le_3 = _assert_scalar_10 = None | |
ge_3 = _local_scalar_dense >= 97 | |
_assert_scalar_9 = torch.ops.aten._assert_scalar.default(ge_3, "Runtime assertion failed for expression u0 >= 97 on node 'ge_1'"); ge_3 = _assert_scalar_9 = None | |
eq_2 = _local_scalar_dense == 97 | |
_assert_scalar_8 = torch.ops.aten._assert_scalar.default(eq_2, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq_1'"); eq_2 = _assert_scalar_8 = None | |
le_2 = _local_scalar_dense <= 97 | |
_assert_scalar_7 = torch.ops.aten._assert_scalar.default(le_2, "Runtime assertion failed for expression u0 <= 97 on node 'le_1'"); le_2 = _assert_scalar_7 = None | |
ge_2 = _local_scalar_dense >= 97 | |
_assert_scalar_6 = torch.ops.aten._assert_scalar.default(ge_2, "Runtime assertion failed for expression u0 >= 97 on node 'ge_2'"); ge_2 = _assert_scalar_6 = None | |
eq_1 = _local_scalar_dense == 97 | |
_assert_scalar_5 = torch.ops.aten._assert_scalar.default(eq_1, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq_4'"); eq_1 = _assert_scalar_5 = None | |
le_1 = _local_scalar_dense <= 97 | |
_assert_scalar_4 = torch.ops.aten._assert_scalar.default(le_1, "Runtime assertion failed for expression u0 <= 97 on node 'le_2'"); le_1 = _assert_scalar_4 = None | |
ge_1 = _local_scalar_dense >= 97 | |
_assert_scalar_3 = torch.ops.aten._assert_scalar.default(ge_1, "Runtime assertion failed for expression u0 >= 97 on node 'ge_3'"); ge_1 = _assert_scalar_3 = None | |
eq = _local_scalar_dense == 97 | |
_assert_scalar_2 = torch.ops.aten._assert_scalar.default(eq, "Runtime assertion failed for expression Eq(u0, 97) on node 'eq_5'"); eq = _assert_scalar_2 = None | |
le = _local_scalar_dense <= 97 | |
_assert_scalar_1 = torch.ops.aten._assert_scalar.default(le, "Runtime assertion failed for expression u0 <= 97 on node 'le_3'"); le = _assert_scalar_1 = None | |
ge = _local_scalar_dense >= 97 | |
_assert_scalar = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u0 >= 97 on node 'ge_4'"); ge = _assert_scalar = None | |
sym_constrain_range_for_size = torch.ops.aten.sym_constrain_range_for_size.default(_local_scalar_dense); _local_scalar_dense = sym_constrain_range_for_size = None | |
getitem_21 = executorch_call_delegate_4[0] | |
getitem_22 = executorch_call_delegate_4[1] | |
getitem_23 = executorch_call_delegate_4[2] | |
getitem_24 = executorch_call_delegate_4[3] | |
getitem_25 = executorch_call_delegate_4[4]; executorch_call_delegate_4 = None | |
aten_full_like_default = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_21, 0, dtype = torch.float32, pin_memory = False, memory_format = torch.preserve_format) | |
aten_full_like_default_2 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_21, 0, dtype = torch.float32, pin_memory = False, memory_format = torch.preserve_format) | |
aten_mul_scalar = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_24, 0.7071067811865476); getitem_24 = None | |
aten_mul_scalar_1 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_25, 0.7071067811865476); getitem_25 = None | |
aten_where_self = executorch_exir_dialects_edge__ops_aten_where_self(getitem_21, scalar_tensor, aten_full_like_default); scalar_tensor = aten_full_like_default = None | |
aten_where_self_2 = executorch_exir_dialects_edge__ops_aten_where_self(getitem_21, scalar_tensor_1, aten_full_like_default_2); getitem_21 = scalar_tensor_1 = aten_full_like_default_2 = None | |
executorch_call_delegate_2 = torch.ops.higher_order.executorch_call_delegate(lowered_module_2, aten_where_self, aten_mul_scalar, aten_mul_scalar_1); lowered_module_2 = aten_where_self = aten_mul_scalar = aten_mul_scalar_1 = None | |
getitem_15 = executorch_call_delegate_2[0] | |
getitem_16 = executorch_call_delegate_2[1]; executorch_call_delegate_2 = None | |
aten_eq_scalar = executorch_exir_dialects_edge__ops_aten_eq_Scalar(getitem_15, -inf); getitem_15 = None | |
aten_full_like_default_1 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_16, 0, pin_memory = False, memory_format = torch.preserve_format) | |
aten_logical_not_default = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_eq_scalar); aten_eq_scalar = None | |
aten_any_dim = executorch_exir_dialects_edge__ops_aten_any_dim(aten_logical_not_default, -1, True); aten_logical_not_default = None | |
aten_logical_not_default_1 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_any_dim); aten_any_dim = None | |
aten_where_self_1 = executorch_exir_dialects_edge__ops_aten_where_self(aten_logical_not_default_1, aten_full_like_default_1, getitem_16); aten_logical_not_default_1 = aten_full_like_default_1 = getitem_16 = None | |
executorch_call_delegate_3 = torch.ops.higher_order.executorch_call_delegate(lowered_module_3, getitem_23, aten_where_self_1, getitem_22); lowered_module_3 = getitem_23 = aten_where_self_1 = getitem_22 = None | |
getitem_17 = executorch_call_delegate_3[0] | |
getitem_18 = executorch_call_delegate_3[1] | |
getitem_19 = executorch_call_delegate_3[2] | |
getitem_20 = executorch_call_delegate_3[3]; executorch_call_delegate_3 = None | |
aten_mul_scalar_2 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_19, 0.7071067811865476); getitem_19 = None | |
aten_mul_scalar_3 = executorch_exir_dialects_edge__ops_aten_mul_Scalar(getitem_20, 0.7071067811865476); getitem_20 = None | |
executorch_call_delegate = torch.ops.higher_order.executorch_call_delegate(lowered_module_0, aten_where_self_2, aten_mul_scalar_2, aten_mul_scalar_3); lowered_module_0 = aten_where_self_2 = aten_mul_scalar_2 = aten_mul_scalar_3 = None | |
getitem_12 = executorch_call_delegate[0] | |
getitem_13 = executorch_call_delegate[1]; executorch_call_delegate = None | |
aten_eq_scalar_1 = executorch_exir_dialects_edge__ops_aten_eq_Scalar(getitem_12, -inf); getitem_12 = None | |
aten_full_like_default_3 = executorch_exir_dialects_edge__ops_aten_full_like_default(getitem_13, 0, pin_memory = False, memory_format = torch.preserve_format) | |
aten_logical_not_default_2 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_eq_scalar_1); aten_eq_scalar_1 = None | |
aten_any_dim_1 = executorch_exir_dialects_edge__ops_aten_any_dim(aten_logical_not_default_2, -1, True); aten_logical_not_default_2 = None | |
aten_logical_not_default_3 = executorch_exir_dialects_edge__ops_aten_logical_not_default(aten_any_dim_1); aten_any_dim_1 = None | |
aten_where_self_3 = executorch_exir_dialects_edge__ops_aten_where_self(aten_logical_not_default_3, aten_full_like_default_3, getitem_13); aten_logical_not_default_3 = aten_full_like_default_3 = getitem_13 = None | |
executorch_call_delegate_1 = torch.ops.higher_order.executorch_call_delegate(lowered_module_1, getitem_18, aten_where_self_3, getitem_17); lowered_module_1 = getitem_18 = aten_where_self_3 = getitem_17 = None | |
getitem_14 = executorch_call_delegate_1[0]; executorch_call_delegate_1 = None | |
return (getitem_14, lengths) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
It looks like you're dealing with an auto-generated PyTorch model transformation and want to compare
new_code.py
withold_code.py
. Do you need a direct diff analysis, performance evaluation, or debugging for a specific issue? Let me know how you'd like to proceed!