Skip to content

Instantly share code, notes, and snippets.

// RUN: stablehlo-translate --interpret -split-input-file %s
func.func @scatter_op_test() {
%inputs = stablehlo.constant dense<[[0, 1, 2, 3]]> : tensor<1x4xi64>
%scatter_indices = stablehlo.constant dense<[[0, 1], [0, 2], [0, 3]]> : tensor<3x2xi64>
%updates = stablehlo.constant dense<[[4], [5], [6]]> : tensor<3x1xi64>
%result = "stablehlo.scatter"(%inputs, %scatter_indices, %updates) ({
^bb0(%arg0: tensor<i64>):
stablehlo.return %arg0 : tensor<i64>
}) {
spec_scatter.txt
size(inputs) = size(updates) = size(results) = 1 = N
input = inputs[0]
update = updates[0]
result = results[0]
// %input:
// [
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
#loc = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,128],si64> loc(unknown)) -> !torch.vtensor<[1,2],f32> {
%int128 = torch.constant.int 128 loc(#loc1)
%int127 = torch.constant.int 127 loc(#loc2)
%int1 = torch.constant.int 1 loc(#loc3)
%true = torch.constant.bool true loc(#loc4)
%int0 = torch.constant.int 0 loc(#loc5)
%int2 = torch.constant.int 2 loc(#loc6)
%none = torch.constant.none loc(#loc)
#loc = loc(unknown)
module attributes {torch.debug_module_name = "HuggingFaceLanguage"} {
func.func @forward(%arg0: !torch.vtensor<[?,?],si64> loc(unknown)) -> !torch.vtensor<[?,2],f32> {
%int768 = torch.constant.int 768 loc(#loc1)
%true = torch.constant.bool true loc(#loc1)
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc2)
%none = torch.constant.none loc(#loc)
%int0 = torch.constant.int 0 loc(#loc3)
%int1 = torch.constant.int 1 loc(#loc3)
%false = torch.constant.bool false loc(#loc4)
# pip install transformers==4.26.0
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
class HfMaskedLM(torch.nn.Module):
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
func.func @torch.aten._index_put_impl(%input: !torch.vtensor<[1,4],si64>, %index: !torch.vtensor<[3],si64>, %fillValues: !torch.vtensor<[],si64>) -> !torch.vtensor<[1,4],si64>{
%false = torch.constant.bool false
%none = torch.constant.none
%indices = torch.prim.ListConstruct %none, %index : (!torch.none, !torch.vtensor<[3],si64>) -> !torch.list<optional<vtensor>>
%out = torch.aten._index_put_impl %input, %indices, %fillValues, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional<vtensor>>, !torch.vtensor<[],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64>
return %out : !torch.vtensor<[1,4],si64>
}
func.func @torch.aten.le.Tensor(%arg0: !torch.vtensor<[1,4,4],si64>, %arg1: !torch.vtensor<[1,4,1],si64>) -> !torch.vtensor<[1,4,4],i1>{
%0 = torch.aten.le.Tensor %arg0, %arg1 : !torch.vtensor<[1,4,4],si64>, !torch.vtensor<[1,4,1],si64> -> !torch.vtensor<[1,4,4],i1>
return %0 : !torch.vtensor<[1,4,4],i1>
}
func.func @torch.aten.abs(%arg0: !torch.vtensor<[15,15],si64>) -> !torch.vtensor<[15,15],si64>{
%0 = torch.aten.abs %arg0 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64>
return %0 : !torch.vtensor<[15,15],si64>
}