Created
March 25, 2023 01:40
-
-
Save AmosLewis/1826326e9f85480da9f13191cb4b86f7 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
➜ torch-mlir git:(int64_max) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torchscript-module-to-torch-backend-pipeline{backend-legal-ops=torch.aten.flatten.using_ints,torch.aten.native_layer_norm,torch.aten.linear})' ./t5small/test_torchscript.mlir -mlir-print-ir-after-failure -mlir-disable-threading | |
./t5small/test_torchscript.mlir:13:12: error: unsupported by backend contract: tensor with unknown rank | |
%134 = torch.aten.new_zeros %arg2, %133, %int4, %int0, %cpu, %false : !torch.tensor, !torch.list<int>, !torch.int, !torch.int, !torch.Device, !torch.bool -> !torch.tensor | |
^ | |
./t5small/test_torchscript.mlir:13:12: note: see current operation: %11 = "torch.tensor_static_info_cast"(%10) : (!torch.vtensor<[1,4],si64>) -> !torch.vtensor<*,si64> | |
./t5small/test_torchscript.mlir:13:12: note: this is likely due to a missing transfer function in abstract_interp_lib_gen.py | |
// -----// IR Dump After LowerToBackendContract Failed (torch-lower-to-backend-contract) //----- // | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,15],si64>, %arg1: !torch.vtensor<[1,4],si64>) -> !torch.vtensor<*,si64> { | |
%int1 = torch.constant.int 1 | |
%int0 = torch.constant.int 0 | |
%false = torch.constant.bool false | |
%int4 = torch.constant.int 4 | |
%none = torch.constant.none | |
%int-1 = torch.constant.int -1 | |
%int-100 = torch.constant.int -100 | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 | |
%cpu = torch.constant.device "cpu" | |
%0 = torch.prim.ListConstruct %int1, %int4 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1 = torch.aten.zeros %0, %int4, %int0, %cpu, %false : !torch.list<int>, !torch.int, !torch.int, !torch.Device, !torch.bool -> !torch.vtensor<[1,4],si64> | |
%2 = torch.tensor_static_info_cast %1 : !torch.vtensor<[1,4],si64> to !torch.vtensor<*,si64> | |
%3 = torch.copy.to_tensor %2 : !torch.tensor<*,si64> | |
%4 = torch.aten.slice.Tensor %arg1, %int1, %int0, %int-1, %int1 : !torch.vtensor<[1,4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,3],si64> | |
%5 = torch.aten.clone %4, %none : !torch.vtensor<[1,3],si64>, !torch.none -> !torch.vtensor<[1,3],si64> | |
%6 = torch.aten.slice.Tensor %3, %int1, %int1, %int9223372036854775807, %int1 : !torch.tensor<*,si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,3],si64> | |
%7 = torch.aten.arange.start_step %int1, %int4, %int1, %none, %none, %none, %none : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[3],si64> | |
%8 = torch.copy.to_vtensor %3 : !torch.vtensor<*,si64> | |
%9 = torch.prim.ListConstruct %7 : (!torch.vtensor<[3],si64>) -> !torch.list<optional<vtensor>> | |
%10 = torch.aten._index_put_impl %8, %9, %5, %false, %false : !torch.vtensor<*,si64>, !torch.list<optional<vtensor>>, !torch.vtensor<[1,3],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64> | |
%11 = torch.tensor_static_info_cast %10 : !torch.vtensor<[1,4],si64> to !torch.vtensor<*,si64> | |
torch.overwrite.tensor.contents %11 overwrites %3 : !torch.vtensor<*,si64>, !torch.tensor<*,si64> | |
%12 = torch.aten.slice.Tensor %3, %int1, %int0, %int1, %int1 : !torch.tensor<*,si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1],si64> | |
%13 = torch.aten.squeeze.dim %12, %int1 : !torch.tensor<[1,1],si64>, !torch.int -> !torch.tensor<[1],si64> | |
%14 = torch.tensor_static_info_cast %13 : !torch.tensor<[1],si64> to !torch.tensor<*,si64> | |
%15 = torch.copy.to_vtensor %3 : !torch.vtensor<*,si64> | |
%16 = torch.aten.eq.Scalar %15, %int-100 : !torch.vtensor<*,si64>, !torch.int -> !torch.vtensor<[1,4],i1> | |
%17 = torch.copy.to_vtensor %3 : !torch.vtensor<*,si64> | |
%18 = torch.prim.ListConstruct : () -> !torch.list<int> | |
%19 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> | |
%20 = torch.aten.broadcast_to %19, %18 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[],si64> | |
%21 = torch.aten.where.self %16, %20, %17 : !torch.vtensor<[1,4],i1>, !torch.vtensor<[],si64>, !torch.vtensor<*,si64> -> !torch.vtensor<[1,4],si64> | |
%22 = torch.tensor_static_info_cast %21 : !torch.vtensor<[1,4],si64> to !torch.vtensor<*,si64> | |
torch.overwrite.tensor.contents %22 overwrites %3 : !torch.vtensor<*,si64>, !torch.tensor<*,si64> | |
%23 = torch.copy.to_vtensor %14 : !torch.vtensor<*,si64> | |
return %23 : !torch.vtensor<*,si64> | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment