-
-
Save AmosLewis/21d951947609c60ce967dc2e6b6dd748 to your computer and use it in GitHub Desktop.
#loc0 = loc(unknown) | |
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} { | |
func.func @forward(%arg0: tensor<1x1x5x5xi1> loc(unknown), %arg1: tensor<1x12x5x5xf32> loc(unknown), %arg2: tensor<?xf32> loc(unknown)) -> tensor<1x12x5x5xf32> { | |
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32> loc(#loc1) | |
return %0 : tensor<1x12x5x5xf32> loc(#loc0) | |
} loc(#loc0) | |
} loc(#loc0) | |
#loc1 = loc("/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/elementwise.py":150:15) |
(mlir_venv) nod% python -m e2e_testing.main -f 'ElementwiseAtenWhereSelf' --config=tosa -v
Compiling ElementwiseAtenWhereSelfModule_basic...
FAIL - "ElementwiseAtenWhereSelfModule_basic"
Unexpected outcome summary:
****** Failed tests - 1 tests
FAIL - "ElementwiseAtenWhereSelfModule_basic"
Compilation error: Traceback (most recent call last):
File "/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/framework.py", line 290, in compile_and_run_test
compiled = config.compile(test.program_factory())
File "/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/configs/tosa_backend.py", line 35, in compile
return self.backend.compile(module)
File "/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/tosa_backends/linalg_on_tensors.py", line 57, in compile
run_pipeline_with_repro_report(
File "/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir/compiler_utils.py", line 73, in run_pipeline_with_repro_report
raise TorchMlirCompilerError(trimmed_message) from None
torch_mlir.compiler_utils.TorchMlirCompilerError: Lowering TOSA to Linalg-on-Tensors failed with the following diagnostics:
error: 'linalg.generic' op expected indexing_map #2 to have 4 dim(s) to match the number of loops
note: see current operation:
%2 = "linalg.generic"(%1, %arg1, %arg2, %0) ({
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = "arith.select"(%arg3, %arg4, %arg5) : (i1, f32, f32) -> f32
"linalg.yield"(%3) : (f32) -> ()
}) {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<() -> ()>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"], operand_segment_sizes = array<i32: 3, 1>} : (tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor, tensor<1x12x5x5xf32>) -> tensor<1x12x5x5xf32>
Error can be reproduced with:
$ torch-mlir-opt -pass-pipeline='func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith)' /tmp/ElementwiseAtenWhereSelfModule.mlir
Add '-mlir-print-ir-after-all -mlir-disable-threading' to get the IR dump for debugging purpose.
Summary:
Failed: 1
(mlir_venv) nod% torch-mlir-opt -pass-pipeline='func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith)' /tmp/ElementwiseAtenWhereSelfModule.mlir -mlir-print-ir-after-all -mlir-disable-threading
// -----// IR Dump After TosaToTensor (tosa-to-tensor) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/elementwise.py:150:15: error: 'linalg.generic' op expected indexing_map #2 to have 4 dim(s) to match the number of loops
return torch.ops.aten.where(a, b, c)
^
/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/elementwise.py:150:15: note: see current operation:
%2 = "linalg.generic"(%1, %arg1, %arg2, %0) ({
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = "arith.select"(%arg3, %arg4, %arg5) : (i1, f32, f32) -> f32
"linalg.yield"(%3) : (f32) -> ()
}) {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<() -> ()>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"], operand_segment_sizes = array<i32: 3, 1>} : (tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor, tensor<1x12x5x5xf32>) -> tensor<1x12x5x5xf32>
// -----// IR Dump After TosaToLinalg Failed (tosa-to-linalg) //----- //
"func.func"() ({
^bb0(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor):
%0 = "linalg.init_tensor"() {static_sizes = [1, 12, 5, 5]} : () -> tensor<1x12x5x5xf32>
%1 = "tensor.collapse_shape"(%arg0) {reassociation = [[0, 1], [2], [3]]} : (tensor<1x1x5x5xi1>) -> tensor<1x5x5xi1>
%2 = "linalg.generic"(%1, %arg1, %arg2, %0) ({
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = "arith.select"(%arg3, %arg4, %arg5) : (i1, f32, f32) -> f32
"linalg.yield"(%3) : (f32) -> ()
}) {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<() -> ()>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"], operand_segment_sizes = array<i32: 3, 1>} : (tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor, tensor<1x12x5x5xf32>) -> tensor<1x12x5x5xf32>
"func.return"(%2) : (tensor<1x12x5x5xf32>) -> ()
}) {function_type = (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) -> tensor<1x12x5x5xf32>, sym_name = "forward"} : () -> ()
(mlir_venv) nod%
(mlir_venv) nod% torch-mlir-opt -pass-pipeline='func.func(tosa-to-tensor),func.func(tosa-to-linalg),func.func(tosa-to-arith)' /tmp/ElementwiseAtenWhereSelfModule.mlir -mlir-print-ir-after-all -mlir-disable-threading
// -----// IR Dump After TosaToTensor (tosa-to-tensor) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After TosaToLinalg (tosa-to-linalg) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor) -> tensor<1x12x5x5xf32> {
%0 = linalg.init_tensor [1, 12, 5, 5] : tensor<1x12x5x5xf32>
%1 = tensor.collapse_shape %arg0 [[0, 1], [2], [3]] : tensor<1x1x5x5xi1> into tensor<1x5x5xi1>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> ()>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%1, %arg1, %arg2 : tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) outs(%0 : tensor<1x12x5x5xf32>) {
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = arith.select %arg3, %arg4, %arg5 : f32
linalg.yield %3 : f32
} -> tensor<1x12x5x5xf32>
return %2 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After TosaToArith (tosa-to-arith) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor) -> tensor<1x12x5x5xf32> {
%0 = linalg.init_tensor [1, 12, 5, 5] : tensor<1x12x5x5xf32>
%1 = tensor.collapse_shape %arg0 [[0, 1], [2], [3]] : tensor<1x1x5x5xi1> into tensor<1x5x5xi1>
%2 = linalg.generic {indexing_maps = [affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>, affine_map<(d0, d1, d2, d3) -> ()>, affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%1, %arg1, %arg2 : tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) outs(%0 : tensor<1x12x5x5xf32>) {
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = arith.select %arg3, %arg4, %arg5 : f32
linalg.yield %3 : f32
} -> tensor<1x12x5x5xf32>
return %2 : tensor<1x12x5x5xf32>
}
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map2 = affine_map<(d0, d1, d2, d3) -> ()>
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor) -> tensor<1x12x5x5xf32> {
%0 = linalg.init_tensor [1, 12, 5, 5] : tensor<1x12x5x5xf32>
%1 = tensor.collapse_shape %arg0 [[0, 1], [2], [3]] : tensor<1x1x5x5xi1> into tensor<1x5x5xi1>
%2 = linalg.generic {indexing_maps = [#map0, #map1, #map2, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%1, %arg1, %arg2 : tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor) outs(%0 : tensor<1x12x5x5xf32>) {
^bb0(%arg3: i1, %arg4: f32, %arg5: f32, %arg6: f32):
%3 = arith.select %arg3, %arg4, %arg5 : f32
linalg.yield %3 : f32
} -> tensor<1x12x5x5xf32>
return %2 : tensor<1x12x5x5xf32>
}
}
(mlir_venv) nod%
(mlir_venv) nod% torch-mlir-opt -pass-pipeline='func.func(tosa-to-linalg)' /tmp/ElementwiseAtenWhereSelfModule.mlir | externals/llvm-project/mlir/utils/generate-test-checks.py
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
// The script is designed to make adding checks to
// a test case fast, it is *not* designed to be authoritative
// about what constitutes a good test! The CHECK should be
// minimized and named to reflect the test intent.
// CHECK-LABEL: func.func @forward(
// CHECK-SAME: %[[VAL_0:.*]]: tensor<1x1x5x5xi1>,
// CHECK-SAME: %[[VAL_1:.*]]: tensor<1x12x5x5xf32>,
// CHECK-SAME: %[[VAL_2:.*]]: tensor<f32>) -> tensor<1x12x5x5xf32> {
// CHECK: %[[VAL_3:.*]] = linalg.init_tensor [1, 12, 5, 5] : tensor<1x12x5x5xf32>
// CHECK: %[[VAL_4:.*]] = tensor.collapse_shape %[[VAL_0]] {{\[\[}}0, 1], [2], [3]] : tensor<1x1x5x5xi1> into tensor<1x5x5xi1>
// CHECK: %[[VAL_5:.*]] = linalg.generic {indexing_maps = [#map0, #map1, #map2, #map1], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[VAL_4]], %[[VAL_1]], %[[VAL_2]] : tensor<1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<f32>) outs(%[[VAL_3]] : tensor<1x12x5x5xf32>) {
// CHECK: ^bb0(%[[VAL_6:.*]]: i1, %[[VAL_7:.*]]: f32, %[[VAL_8:.*]]: f32, %[[VAL_9:.*]]: f32):
// CHECK: %[[VAL_10:.*]] = arith.select %[[VAL_6]], %[[VAL_7]], %[[VAL_8]] : f32
// CHECK: linalg.yield %[[VAL_10]] : f32
// CHECK: } -> tensor<1x12x5x5xf32>
// CHECK: return %[[VAL_11:.*]] : tensor<1x12x5x5xf32>
// CHECK: }
(mlir_venv) nod%
(mlir_venv) nod% torch-mlir-opt -pass-pipeline='torch-backend-to-tosa-backend-pipeline' /tmp/ElementwiseAtenWhereSelfModule.mlir -mlir-print-ir-after-all -mlir-pretty-debuginfo -mlir-disable-threading
// -----// IR Dump After ConvertTorchToTosa (convert-torch-to-tosa) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After TosaMakeBroadcastable (tosa-make-broadcastable) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After CSE (cse) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After FuncBackendTypeConversion (torch-func-backend-type-conversion) //----- //
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
}
// -----// IR Dump After Canonicalizer (canonicalize) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After FinalizingBackendTypeConversion (torch-finalizing-backend-type-conversion) //----- //
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
// -----// IR Dump After VerifyTosaBackendContract (torch-verify-tosa-backend-contract) //----- //
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
}
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x5x5xi1>, %arg1: tensor<1x12x5x5xf32>, %arg2: tensor<?xf32>) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32>
return %0 : tensor<1x12x5x5xf32>
}
}
(mlir_venv) nod%