Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
This file has been truncated, but you can view the full file.
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> loc(unknown), %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[2,4,64,64],f32>} loc(unknown), %arg2: !torch.tensor {torch.type_bound = !torch.vtensor<[],si64>} loc(unknown), %arg3: !torch.tensor {torch.type_bound = !torch.vtensor<[2,77,768],f32>} loc(unknown)) -> !torch.tensor {
%3919 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[2,4,64,64],f32> loc(#loc0)
%3920 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[],si64> loc(#loc0)
%3921 = torch.tensor_static_info_cast %arg3 : !torch.tensor to !torch.tensor<[2,77,768],f32> loc(#loc0)
%3922 = torch.prim.GetAttr %arg0["_param_constant365"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor loc(#loc0)
%3923 = torch.prim.GetAttr %arg0["_param_constant3
import torch
import numpy as np
from shark.shark_inference import SharkInference
from shark.shark_importer import SharkImporter
from shark.shark_downloader import download_torch_model
mlir_model, func_name, inputs, golden_out = download_torch_model(
"stable_diff_quant", tank_url="gs://shark_tank/prashant_nod"
)
import numpy as np
from shark.shark_inference import SharkInference
from shark.shark_importer import SharkImporter
from shark.shark_downloader import download_torch_model
mlir_model, func_name, inputs, golden_out = download_torch_model(
"stable_diff_quant", tank_url="gs://shark_tank/prashant_nod"
)
shark_module = SharkInference(mlir_model, func_name, mlir_dialect="linalg", device="vulkan")
This file has been truncated, but you can view the full file.
#map0 = affine_map<() -> ()>
#map1 = affine_map<(d0) -> (0)>
#map2 = affine_map<(d0) -> (d0)>
#map3 = affine_map<(d0) -> ()>
#map4 = affine_map<(d0, d1) -> (d0, 0)>
#map5 = affine_map<(d0, d1) -> (0, d1)>
#map6 = affine_map<(d0, d1) -> (d0, d1)>
#map7 = affine_map<(d0, d1) -> ()>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1) -> (d1)>
import numpy as np
from shark.shark_inference import SharkInference
from shark.shark_importer import SharkImporter
from shark.shark_downloader import download_torch_model
mlir_model, func_name, inputs, golden_out = download_torch_model(
"resnet_50_fp16_torch", tank_url="gs://shark_tank/prashant_nod"
)
shark_module = SharkInference(mlir_model, func_name, mlir_dialect="linalg")
import numpy as np
from shark.shark_inference import SharkInference
from shark.shark_importer import SharkImporter
from shark.shark_downloader import download_torch_model
mlir_model, func_name, inputs, golden_out = download_torch_model(
"resnet_50_fp16_old", tank_url="gs://shark_tank/prashant_nod"
)
shark_module = SharkInference(mlir_model, func_name, mlir_dialect="linalg")
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0) -> (0)>
#map1 = affine_map<(d0) -> (d0)>
#map2 = affine_map<(d0) -> ()>
#map3 = affine_map<() -> ()>
#map4 = affine_map<(d0, d1) -> ()>
#map5 = affine_map<(d0, d1) -> (d0, d1)>
#map6 = affine_map<(d0, d1) -> (d0, 0)>
#map7 = affine_map<(d0, d1) -> (0, d1)>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1) -> (d1)>
stable_diff_f16_elided.mlir:1730:12: error: failed to legalize operation 'vector.transfer_read' that was explicitly marked illegal
%261 = linalg.generic {indexing_maps = [#map16, #map16, #map16], iterator_types = ["parallel", "parallel", "parallel"]} ins(%expanded_770, %209 : tensor<2x4096x320xf16>, tensor<2x4096x320xf16>) outs(%156 : tensor<2x4096x320xf16>) {
^
stable_diff_f16_elided.mlir:25:3: note: called from
func.func @forward(%arg0: tensor<2x4x64x64xf16>, %arg1: tensor<1xf16>, %arg2: tensor<2x77x768xf16>) -> tensor<2x4x64x64xf16> {
^
stable_diff_f16_elided.mlir:1730:12: note: see current operation: %710 = "vector.transfer_read"(%58, %709, %45) {in_bounds = [true], operand_segment_sizes = array<i32: 1, 1, 1, 0>, permutation_map = affine_map<(d0) -> (d0)>} : (memref<320xf16>, index, f16) -> vector<16xf16> loc(callsite("stable_diff_f16_elided.mlir":1730:12 at "stable_diff_f16_elided.mlir":25:3))
%261 = linalg.generic {indexing_maps = [#map16, #map16, #map16], iterator_types = ["paral
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>
#map2 = affine_map<(d0, d1) -> (d1)>
#map3 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
module {
func.func @forward(%arg0: tensor<2x4096x320xf16>, %arg1: tensor<2x4096x320xf16>) -> tensor<2x4096x320xf16> {
%cst = arith.constant 0.000000e+00 : f16
%cst_0 = arith.constant 0.000000e+00 : f16
%0 = tensor.empty() : tensor<2x4096x320xf16>
%1 = linalg.fill ins(%cst : f16) outs(%0 : tensor<2x4096x320xf16>) -> tensor<2x4096x320xf16>
This file has been truncated, but you can view the full file.
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[2,4,64,64],f16> loc(unknown), %arg1: !torch.vtensor<[],si64> loc(unknown), %arg2: !torch.vtensor<[2,77,768],f16> loc(unknown)) -> !torch.vtensor<[2,4,64,64],f16> {
%int64 = torch.constant.int 64 loc(#loc1)
%int320 = torch.constant.int 320 loc(#loc1)
%int2 = torch.constant.int 2 loc(#loc1)
%int40960 = torch.constant.int 40960 loc(#loc1)
%int4096 = torch.constant.int 4096 loc(#loc1)
%int10 = torch.constant.int 10 loc(#loc1)
%int32 = torch.constant.int 32 loc(#loc1)