This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @forward(%arg0: !torch.vtensor<[1,128],si64>) -> !torch.vtensor<[1,128,1],f32>{ | |
%int1 = torch.constant.int 1 | |
%int32 = torch.constant.int 32 | |
%int128 = torch.constant.int 128 | |
%float1.000000e00 = torch.constant.float 1.000000e+00 | |
%0 = torch.vtensor.literal(dense<0.000000e+00> : tensor<2xf32>) : !torch.vtensor<[2],f32> | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x32xf32>) : !torch.vtensor<[2,32],f32> | |
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32xf32>) : !torch.vtensor<[32,32],f32> | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x37xf32>) : !torch.vtensor<[32,37],f32> | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<37x32xf32>) : !torch.vtensor<[37,32],f32> |
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
➜ SHARK git:(main) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torch-backend-to-tosa-backend-pipeline)' /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug | |
Args: /home/chi/src/ubuntu20/shark/torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=builtin.module(torch-backend-to-tosa-backend-pipeline) /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug | |
Load new dialect in Context builtin | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr) | |
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @torch.aten.gather(%arg0: !torch.vtensor<[12,128,512],f32>, %arg1: !torch.vtensor<[1,128,128],si64>) -> !torch.vtensor<[12,128,128],f32> { | |
%int-1 = torch.constant.int -1 | |
%false = torch.constant.bool false | |
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[1,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> | |
return %0 : !torch.vtensor<[12,128,128],f32> | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @torch.aten.gather(%arg0: !torch.vtensor<[1,4,3],f32>, %arg1: !torch.vtensor<[1,4,2,3],si64>) -> !torch.vtensor<[1,4,2],f32> { | |
%int-1 = torch.constant.int -1 | |
%false = torch.constant.bool false | |
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[1,4,3],f32>, !torch.int, !torch.vtensor<[1,4,2,3],si64>, !torch.bool -> !torch.vtensor<[1,4,2],f32> | |
return %0 : !torch.vtensor<[1,4,2],f32> | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @torch.aten.gather(%arg0: !torch.vtensor<[1,4,3],f32>, %arg1: !torch.vtensor<[1,4,2,3],i32>) -> !torch.vtensor<[1,4,2],f32> { | |
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,4,3],f32> -> tensor<1x4x3xf32> | |
%0= torch.tensor([[ | |
[1, 2, 3], | |
[4, 5, 6], | |
[7, 8, 9], | |
[10, 11, 12] | |
]]) # 1*4*3 | |
%1 = torch_c.to_builtin_tensor %arg1 : !torch.vtensor<[1,4,2,3],i32> -> tensor<1x4x2x3xi32> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
value1: | |
vec:[0,0,0,0,0,0,0] | |
shape: 1*4*2*1 | |
tosa::getConstTensor | |
tensor([[ | |
[ | |
[0], [0] | |
], | |
[ | |
[0], [0] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @torch.aten.gather(%arg0: !torch.vtensor<[1,4,3],f32>, %arg1: !torch.vtensor<[1,4,2],si64>) -> !torch.vtensor<[1,4,2],f32> { | |
%int-1 = torch.constant.int -1 | |
%false = torch.constant.bool false | |
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[1,4,3],f32>, !torch.int, !torch.vtensor<[1,4,2],si64>, !torch.bool -> !torch.vtensor<[1,4,2],f32> | |
return %0 : !torch.vtensor<[1,4,2],f32> | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
func.func @torch.aten.gather(%arg0: !torch.vtensor<[12,128,512],f32>, %arg1: !torch.vtensor<[1,128,128],si64>) -> !torch.vtensor<[12,128,128],f32> { | |
%int-1 = torch.constant.int -1 | |
%false = torch.constant.bool false | |
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[1,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> | |
return %0 : !torch.vtensor<[12,128,128],f32> | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,128],si64> loc(unknown)) -> !torch.vtensor<[1,2],f32> { | |
%int0 = torch.constant.int 0 loc(#loc1) | |
%int1 = torch.constant.int 1 loc(#loc2) | |
%true = torch.constant.bool true loc(#loc3) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc3) | |
%none = torch.constant.none loc(#loc) | |
%int11 = torch.constant.int 11 loc(#loc4) | |
%false = torch.constant.bool false loc(#loc5) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
➜ deberta git:(main) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torch-backend-to-tosa-backend-pipeline)' /tmp/_lambda.mlir --mlir-print-ir-after-failure -mlir-disable-threading | |
<eval_with_key>.2:8:54: error: failed to legalize operation 'torch.constant.int' | |
<eval_with_key>.2:8:54: note: see current operation: %1 = "torch.constant.int"() {value = 0 : i64} : () -> !torch.int | |
// -----// IR Dump After FinalizingBackendTypeConversion Failed (torch-finalizing-backend-type-conversion) //----- // | |
func.func @forward(%arg0: tensor<1x128xi64>) -> tensor<1x2xf32> { | |
%0 = "tosa.const"() {value = dense<[[65536, 512, 1]]> : tensor<1x3xi32>} : () -> tensor<1x3xi32> | |
%int0 = torch.constant.int 0 | |
%1 = "tosa.const"() {value = dense_resource<__elided__> : tensor<2x768xf32>} : () -> tensor<2x768xf32> | |
%2 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x768xf32>} : () -> tensor<768x768xf32> | |
%3 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768xf32>} : () -> tensor<768xf32> |