Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0) -> (0)>
#map1 = affine_map<(d0) -> (d0)>
#map2 = affine_map<(d0) -> ()>
#map3 = affine_map<() -> ()>
#map4 = affine_map<(d0, d1) -> ()>
#map5 = affine_map<(d0, d1) -> (d0, d1)>
#map6 = affine_map<(d0, d1) -> (d0, 0)>
#map7 = affine_map<(d0, d1) -> (0, d1)>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1) -> (d1)>
// -----// IR Dump After DropShapeCalculations (torch-drop-shape-calculations) //----- //
func.func @forward(%arg0: !torch.vtensor<[2,4,64,64],f16>, %arg1: !torch.vtensor<[1],f16>, %arg2: !torch.vtensor<[2,77,768],f16>) -> !torch.vtensor<[2,4,64,64],f16> {
%int160 = torch.constant.int 160
%float1.600000e02 = torch.constant.float 1.600000e+02
%str = torch.constant.str "AssertionError: "
%int4 = torch.constant.int 4
%float0.000000e00 = torch.constant.float 0.000000e+00
%0 = torch.vtensor.literal(dense<0.079056941504209485> : tensor<f64>) : !torch.vtensor<[],f64>
%1 = torch.vtensor.literal(dense<0.11180339887498948> : tensor<f64>) : !torch.vtensor<[],f64>
%2 = torch.vtensor.literal(dense<0.15811388300841897> : tensor<f64>) : !torch.vtensor<[],f64>
torch-mlir-opt: /home/prashant/torch-mlir/externals/llvm-project/mlir/lib/IR/BuiltinAttributes.cpp:1031: static mlir::DenseElementsAttr mlir::DenseElementsAttr::get(mlir::ShapedType, ArrayRef<mlir::Attribute>): Assertion `intAttr.getType() == eltType && "expected integer attribute type to equal element type"' failed.
PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
Stack dump:
0. Program arguments: torch-mlir-opt -pass-pipeline=torch-backend-to-tosa-backend-pipeline xyz.mlir
#0 0x0000557fff3cedd3 llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/home/prashant/torch-mlir/build/bin/torch-mlir-opt+0x187cdd3)
#1 0x0000557fff3ccd4e llvm::sys::RunSignalHandlers() (/home/prashant/torch-mlir/build/bin/torch-mlir-opt+0x187ad4e)
#2 0x0000557fff3cf16a SignalHandler(int) Signals.cpp:0:0
#3 0x00007f745f34c520 (/lib/x86_64-linux-gnu/libc.so.6+0x42520)
#4 0x00007f745f3a0a7c __pthread_kill_implementation ./nptl/./nptl/pthread_kill.c:44:76
#5 0x00007f74
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
import torch
from PIL import Image
from diffusers import LMSDiscreteScheduler
from tqdm.auto import tqdm
from shark.shark_inference import SharkInference
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import torch_mlir
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,4,64,64],f32> loc(unknown)) -> !torch.vtensor<[1,3,512,512],f32> {
%int1048576 = torch.constant.int 1048576 loc(#loc1)
%int262144 = torch.constant.int 262144 loc(#loc2)
%int4 = torch.constant.int 4 loc(#loc3)
%int2097152 = torch.constant.int 2097152 loc(#loc4)
%int8 = torch.constant.int 8 loc(#loc5)
%int524288 = torch.constant.int 524288 loc(#loc6)
%int65536 = torch.constant.int 65536 loc(#loc7)
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,4,64,64],f32> loc(unknown)) -> !torch.vtensor<[1,3,512,512],f32> {
%int1048576 = torch.constant.int 1048576 loc(#loc1)
%int262144 = torch.constant.int 262144 loc(#loc2)
%int4 = torch.constant.int 4 loc(#loc3)
%int2097152 = torch.constant.int 2097152 loc(#loc4)
%int8 = torch.constant.int 8 loc(#loc5)
%int524288 = torch.constant.int 524288 loc(#loc6)
%int65536 = torch.constant.int 65536 loc(#loc7)
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>
#map2 = affine_map<(d0, d1) -> (d1)>
#map3 = affine_map<(d0, d1, d2) -> (d1)>
#map4 = affine_map<(d0, d1, d2) -> (d0)>
#map5 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map6 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map7 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map8 = affine_map<(d0, d1) -> (d0, 0)>
#map9 = affine_map<(d0, d1) -> (0)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>
#map2 = affine_map<(d0, d1) -> (0, d1)>
#map3 = affine_map<(d0, d1) -> (d1)>
#map4 = affine_map<(d0, d1, d2) -> (d1)>
#map5 = affine_map<(d0, d1, d2) -> (d0)>
#map6 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map7 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map8 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map9 = affine_map<(d0, d1) -> (0, 0)>
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d1)>
#map2 = affine_map<(d0, d1, d2, d3) -> (0, d1, d2, d3)>
#map3 = affine_map<(d0, d1) -> (d1)>
#map4 = affine_map<(d0, d1) -> (d0, d1)>
#map5 = affine_map<(d0, d1) -> (d1, d0)>
module attributes {torch.debug_module_name = "VisionModule"} {
func.func @forward(%arg0: tensor<1x3x224x224xf32>) -> tensor<1x1000xf32> {
%false = arith.constant false
%cst = arith.constant dense_resource<__elided__> : tensor<1000xf32>