Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
graph():
%arg0_1 : [#users=1] = placeholder[target=arg0_1]
%arg1_1 : [#users=1] = placeholder[target=arg1_1]
%arg2_1 : [#users=52] = placeholder[target=arg2_1]
%arg3_1 : [#users=1] = placeholder[target=arg3_1]
%expand : [#users=1] = call_function[target=torch.ops.aten.expand](args = (%arg1_1, [2]), kwargs = {})
%arange : [#users=1] = call_function[target=torch.ops.aten.arange](args = (0, 128), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
%mul : [#users=1] = call_function[target=torch.ops.aten.mul](args = (%arange, -9.210340371976184), kwargs = {})
%div : [#users=1] = call_function[target=torch.ops.aten.div](args = (%mul, 128), kwargs = {})
%exp : [#users=1] = call_function[target=torch.ops.aten.exp](args = (%div,), kwargs = {})
# Lint as: python3
"""SHARK Importer"""
import sys
import tempfile
import os
# List of the supported frontends.
supported_frontends = {
"tensorflow",
#map = affine_map<(d0, d1, d2, d3) -> (d1)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map2 = affine_map<(d0, d1, d2, d3) -> (0, d1, d2, d3)>
#map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, 0, 0)>
#map4 = affine_map<(d0, d1, d2, d3) -> (0, d1, 0, 0)>
#map5 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map6 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map7 = affine_map<(d0, d1) -> (d0, d1)>
#map8 = affine_map<(d0, d1) -> (d1, d0)>
#map9 = affine_map<(d0, d1, d2) -> (0, d1, d2)>
// -----// IR Dump Before IREEImportPublic (iree-import-public) //----- //
#loc2 = loc("/home/prashant/test.mlir":3:22)
#loc3 = loc("/home/prashant/test.mlir":3:53)
#loc10 = loc("/home/prashant/test.mlir":10:10)
#loc11 = loc("/home/prashant/test.mlir":10:20)
#loc15 = loc("/home/prashant/test.mlir":15:10)
#loc16 = loc("/home/prashant/test.mlir":15:20)
#loc17 = loc("/home/prashant/test.mlir":15:32)
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
module attributes {torch.debug_module_name = "_lambda"} {
// -----// IR Dump Before IREEImportPublic (iree-import-public) //----- //
#loc2 = loc("/home/prashant/test.mlir":3:22)
#loc3 = loc("/home/prashant/test.mlir":3:53)
#loc10 = loc("/home/prashant/test.mlir":10:10)
#loc11 = loc("/home/prashant/test.mlir":10:20)
#loc15 = loc("/home/prashant/test.mlir":15:10)
#loc16 = loc("/home/prashant/test.mlir":15:20)
#loc17 = loc("/home/prashant/test.mlir":15:32)
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
module attributes {torch.debug_module_name = "_lambda"} {
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: tensor<10x4096x64xf16>, %arg1: tensor<10x64x4096xf16>) -> tensor<10x4096x4096xf16> {
%cst = arith.constant 0.000000e+00 : f16
%cst_0 = arith.constant 1.250000e-01 : f16
%0 = tensor.empty() : tensor<10x4096x4096xf16>
%1 = linalg.fill ins(%cst : f16) outs(%0 : tensor<10x4096x4096xf16>) -> tensor<10x4096x4096xf16>
%2 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<10x4096x64xf16>, tensor<10x64x4096xf16>) outs(%1 : tensor<10x4096x4096xf16>) -> tensor<10x4096x4096xf16>
%3 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel"]} ins(%2 : tensor<10x4096x4096xf16>) outs(%0 : tensor<10x4096x4096xf16>) {
^bb0(%in: f16, %out: f16):
This file has been truncated, but you can view the full file.
// -----// IR Dump After ConvertToSPIRV Failed (iree-convert-to-spirv) //----- //
module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.6, [Shader, Float64, Float16, Int64, Int16, Int8, StorageBuffer16BitAccess, StorageUniform16, StoragePushConstant16, StorageBuffer8BitAccess, UniformAndStorageBuffer8BitAccess, StoragePushConstant8, GroupNonUniform, GroupNonUniformVote, GroupNonUniformArithmetic, GroupNonUniformBallot, GroupNonUniformShuffle, GroupNonUniformShuffleRelative, GroupNonUniformClustered, GroupNonUniformQuad, VariablePointers, VariablePointersStorageBuffer], [SPV_KHR_16bit_storage, SPV_KHR_8bit_storage, SPV_KHR_storage_buffer_storage_class, SPV_KHR_variable_pointers]>, api=Vulkan, AMD:DiscreteGPU, #spirv.resource_limits<max_compute_shared_memory_size = 65536, max_compute_workgroup_invocations = 1024, max_compute_workgroup_size = [1024, 1024, 1024], subgroup_size = 64, min_subgroup_size = 32, max_subgroup_size = 64, cooperative_matrix_properties_nv = []>>} {
spirv.GlobalVariable @_
#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map1 = affine_map<(d0, d1, d2) -> (d0, d1, 0)>
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: tensor<10x9216x9216xf16>, %arg1: tensor<1xf16>, %arg2: tensor<2x77x1024xf16>, %arg3: tensor<f32>) -> tensor<10x9216x9216xf16> {
%cst = arith.constant 0.000000e+00 : f16
%0 = tensor.empty() : tensor<10x9216x9216xf16>
%1 = tensor.empty() : tensor<10x9216x1xf16>
%2 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel"]} ins(%arg0 : tensor<10x9216x9216xf16>) outs(%0 : tensor<10x9216x9216xf16>) {
^bb0(%in: f16, %out: f16):
%6 = math.exp %in : f16
module attributes {torch.debug_module_name = "_lambda"} {
func.func private @__torch__.torch.fx.graph_module._lambda.forward(%arg0: !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda">, %arg1: !torch.tensor {torch.type_bound = !torch.vtensor<[1,77],si64>}) -> !torch.tensor {
%199 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,77],si64>
%200 = torch.prim.GetAttr %arg0["_param_constant195"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%201 = torch.prim.GetAttr %arg0["_param_constant194"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%202 = torch.prim.GetAttr %arg0["_param_constant193"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%203 = torch.prim.GetAttr %arg0["_param_constant192"] : !torch.nn.Module<"__torch__.torch.fx.graph_module._lambda"> -> !torch.tensor
%204 = torch.prim.GetAttr %arg0["_param_constant191"] : !torch.nn.Module<"__torch__.torch.fx.g
func.func @forward(%arg0: !torch.vtensor<[1,77],si64>) -> !torch.vtensor<[1,77,1024],f16> {
%0 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f64>) : !torch.vtensor<[],f64>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x77xsi64>) : !torch.vtensor<[1,77],si64>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<49408x1024xf16>) : !torch.vtensor<[49408,1024],f16>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<77x1024xf16>) : !torch.vtensor<[77,1024],f16>
%4 = torch.vtensor.literal(dense<-6.550400e+04> : tensor<f32>) : !torch.vtensor<[],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf16>) : !torch.vtensor<[1024,1024],f16>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf16>) : !torch.vtensor<[4096,1024],f16>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf16>) : !torch.vtensor<[4096],f16>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf16>) :