Skip to content

Instantly share code, notes, and snippets.

func.func @torch.aten._index_put_impl(%input: !torch.vtensor<[1,4],si64>, %index: !torch.vtensor<[3],si64>, %fillValues: !torch.vtensor<[1,3],si64>) -> !torch.vtensor<[1,4],si64>{
%false = torch.constant.bool false
%none = torch.constant.none
%indices = torch.prim.ListConstruct %none, %index : (!torch.none, !torch.vtensor<[3],si64>) -> !torch.list<optional<vtensor>>
%out = torch.aten._index_put_impl %input, %indices, %fillValues, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional<vtensor>>, !torch.vtensor<[1,3],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64>
return %out : !torch.vtensor<[1,4],si64>
}
#loc = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,15],si64> loc(unknown), %arg1: !torch.vtensor<[1,4],si64> loc(unknown)) -> !torch.vtensor<[1,4,32128],f32> {
%int512 = torch.constant.int 512 loc(#loc1)
%int0 = torch.constant.int 0 loc(#loc2)
%int1 = torch.constant.int 1 loc(#loc3)
%int-1 = torch.constant.int -1 loc(#loc4)
%true = torch.constant.bool true loc(#loc5)
%int4 = torch.constant.int 4 loc(#loc6)
%false = torch.constant.bool false loc(#loc7)
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
class Test(torch.nn.Module):
def __init__(self):
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
class Test(torch.nn.Module):
def __init__(self):
super().__init__()
➜ torch-mlir git:(int64_max) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torchscript-module-to-torch-backend-pipeline{backend-legal-ops=torch.aten.flatten.using_ints,torch.aten.native_layer_norm,torch.aten.linear})' ./t5small/test_torchscript.mlir -mlir-print-ir-after-failure -mlir-disable-threading
./t5small/test_torchscript.mlir:13:12: error: unsupported by backend contract: tensor with unknown rank
%134 = torch.aten.new_zeros %arg2, %133, %int4, %int0, %cpu, %false : !torch.tensor, !torch.list<int>, !torch.int, !torch.int, !torch.Device, !torch.bool -> !torch.tensor
^
./t5small/test_torchscript.mlir:13:12: note: see current operation: %11 = "torch.tensor_static_info_cast"(%10) : (!torch.vtensor<[1,4],si64>) -> !torch.vtensor<*,si64>
./t5small/test_torchscript.mlir:13:12: note: this is likely due to a missing transfer function in abstract_interp_lib_gen.py
// -----// IR Dump After LowerToBackendContract Failed (torch-lower-to-backend-contract) //----- //
module attributes {torch.debug_m
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
class Test(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_ids, decoder_input_ids):
# pip install transformers==4.26.0
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
class HfMaskedLM(torch.nn.Module):
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.__code_getter(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">) -> !torch.str {
%133 = torch.prim.GetAttr %arg0["_code"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.str
return %133 : !torch.str
}
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1,15],si64>}, %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[1,4],si64>}) -> !torch.tensor {
%int6 = torch.constant.int 6
%true_0 = torch.constant.bool true
%float-3.402820e38 = torch.constant.float -3.4028234663852886E+38
%int-100 = torch.constant.int -100
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1,15],si64>}, %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[1,4],si64>}) -> !torch.tensor {
%none_1 = torch.constant.none
%int-1 = torch.constant.int -1
%false = torch.constant.bool false
%cpu = torch.constant.device "cpu"
%int1 = torch.constant.int 1
%int4 = torch.constant.int 4
%int0 = torch.constant.int 0
%int-100 = torch.constant.int -100
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1,15],si64>}, %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[1,4],si64>}) -> !torch.tensor {
%none_1 = torch.constant.none
%int-1 = torch.constant.int -1
%false = torch.constant.bool false
%cpu = torch.constant.device "cpu"
%int1 = torch.constant.int 1
%int4 = torch.constant.int 4
%int0 = torch.constant.int 0
%int-100 = torch.constant.int -100