Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0) -> (0)>
#map1 = affine_map<(d0) -> (d0)>
#map2 = affine_map<(d0) -> ()>
#map3 = affine_map<() -> ()>
#map4 = affine_map<(d0, d1) -> ()>
#map5 = affine_map<(d0, d1) -> (d0, d1)>
#map6 = affine_map<(d0, d1) -> (d0, 0)>
#map7 = affine_map<(d0, d1) -> (0, d1)>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1) -> (d1)>
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
import torch
from PIL import Image
from diffusers import LMSDiscreteScheduler
from tqdm.auto import tqdm
from shark.shark_inference import SharkInference
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import torch_mlir
This file has been truncated, but you can view the full file.
#map0 = affine_map<() -> ()>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map2 = affine_map<(d0, d1, d2, d3) -> ()>
#map3 = affine_map<(d0) -> (0)>
#map4 = affine_map<(d0) -> (d0)>
#map5 = affine_map<(d0) -> ()>
#map6 = affine_map<(d0, d1) -> ()>
#map7 = affine_map<(d0, d1) -> (d0, d1)>
#map8 = affine_map<(d0, d1) -> (d0, 0)>
#map9 = affine_map<(d0, d1) -> (0, d1)>
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,4,64,64],f16>, %arg1: !torch.vtensor<[1],f16>, %arg2: !torch.vtensor<[2,77,768],f16>, %arg3: !torch.vtensor<[],f32>) -> !torch.vtensor<[1,4,64,64],f16> {
%int64 = torch.constant.int 64
%int320 = torch.constant.int 320
%int2 = torch.constant.int 2
%int40960 = torch.constant.int 40960
%int4096 = torch.constant.int 4096
%int10 = torch.constant.int 10
%int32 = torch.constant.int 32
%int640 = torch.constant.int 640
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0) -> (0)>
#map1 = affine_map<(d0) -> (d0)>
#map2 = affine_map<(d0) -> ()>
#map3 = affine_map<() -> ()>
#map4 = affine_map<(d0, d1) -> ()>
#map5 = affine_map<(d0, d1) -> (d0, d1)>
#map6 = affine_map<(d0, d1) -> (d0, 0)>
#map7 = affine_map<(d0, d1) -> (0, d1)>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1) -> (d1)>
func.func @forward(%arg0: !torch.vtensor<[2,4,96,96],f32>, %arg1: !torch.vtensor<[2],si64>, %arg2: !torch.vtensor<[2,77,1024],f32>) -> !torch.vtensor<[2,4,96,96],f16> {
%int160 = torch.constant.int 160
%0 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f64>) : !torch.vtensor<[],f64>
%1 = torch.vtensor.literal(dense<9.9999999999999995E-7> : tensor<f64>) : !torch.vtensor<[],f64>
%2 = torch.vtensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.vtensor<[],f64>
%3 = torch.vtensor.literal(dense<160> : tensor<si64>) : !torch.vtensor<[],si64>
%4 = torch.vtensor.literal(dense<-9.2103403719761836> : tensor<f64>) : !torch.vtensor<[],f64>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1280x320xf32>) : !torch.vtensor<[1280,320],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<320x4x3x3xf32>) : !torch.vtensor<[320,4,3,3],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<640x320x3x3xf32>) : !torch.vtensor<[640,320,3,3],f32>
This file has been truncated, but you can view the full file.
#map = affine_map<(d0) -> (d0)>
#map1 = affine_map<(d0) -> ()>
#map2 = affine_map<() -> ()>
#map3 = affine_map<(d0, d1) -> ()>
#map4 = affine_map<(d0, d1) -> (d0, d1)>
#map5 = affine_map<(d0, d1) -> (d0, 0)>
#map6 = affine_map<(d0, d1) -> (0, d1)>
#map7 = affine_map<(d0, d1) -> (d1, d0)>
#map8 = affine_map<(d0, d1) -> (d1)>
#map9 = affine_map<(d0, d1, d2, d3) -> ()>
func.func @forward(%arg0: !torch.vtensor<[1,77],si64>) -> !torch.vtensor<[1,77,1024],f16> {
%0 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f64>) : !torch.vtensor<[],f64>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x77xsi64>) : !torch.vtensor<[1,77],si64>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<49408x1024xf16>) : !torch.vtensor<[49408,1024],f16>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<77x1024xf16>) : !torch.vtensor<[77,1024],f16>
%4 = torch.vtensor.literal(dense<-6.550400e+04> : tensor<f32>) : !torch.vtensor<[],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf16>) : !torch.vtensor<[1024,1024],f16>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf16>) : !torch.vtensor<[4096,1024],f16>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf16>) : !torch.vtensor<[4096],f16>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf16>) :
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1,77],si64>}) -> !torch.tensor {
%199 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,77],si64>
%200 = torch.prim.GetAttr %arg0["_param_constant195"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%201 = torch.prim.GetAttr %arg0["_param_constant194"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%202 = torch.prim.GetAttr %arg0["_param_constant193"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%203 = torch.prim.GetAttr %arg0["_param_constant192"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%204 = torch.prim.GetAttr %arg0["_param_constant191"] : !torch.nn.Module<"__torch__.torch.fx.g
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, 0)>
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: tensor<10x9216x9216xf16>, %arg1: tensor<1xf16>, %arg2: tensor<2x77x1024xf16>, %arg3: tensor<f32>) -> tensor<10x9216x9216xf16> {
%cst = arith.constant 0.000000e+00 : f16
%0 = tensor.empty() : tensor<10x9216x9216xf16>
%1 = tensor.empty() : tensor<10x9216x1xf16>
%2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<10x9216x9216xf16>) outs(%0 : tensor<10x9216x9216xf16>) {
^bb0(%in: f16, %out: f16):
%6 = math.exp %in : f16