Skip to content

Instantly share code, notes, and snippets.

View pashu123's full-sized avatar
๐Ÿ˜‡
Working from home

Prashant Kumar pashu123

๐Ÿ˜‡
Working from home
View GitHub Profile
@pashu123
pashu123 / minilm_canonicalize.mlir
Created June 22, 2022 13:24
After canonicalization minilm.
module attributes {torch.debug_module_name = "MiniLMSequenceClassification"} {
func.func @forward(%arg0: !torch.vtensor<[1,128],si32>, %arg1: !torch.vtensor<[1,128],si32>, %arg2: !torch.vtensor<[1,128],si32>) -> !torch.vtensor<[1,2],f32> {
%int1 = torch.constant.int 1
%none = torch.constant.none
%true = torch.constant.bool true
%float1.000000e00 = torch.constant.float 1.000000e+00
%int128 = torch.constant.int 128
%int0 = torch.constant.int 0
%int9223372036854775807 = torch.constant.int 9223372036854775807
%float9.999990e-13 = torch.constant.float 9.9999999999999998E-13
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d0, 0)>
#map2 = affine_map<(d0, d1) -> ()>
#map3 = affine_map<(d0, d1) -> (d1, d0)>
#map4 = affine_map<(d0, d1) -> (d1)>
#map5 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (0, d1, 0, d3, 0, 0, 0, 0)>
#map6 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
#map7 = affine_map<(d0, d1, d2, d3) -> (d1)>
#map8 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map9 = affine_map<(d0, d1, d2, d3) -> (d0, d1, 0, 0)>
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_torch_model
import numpy as np
mlir_model, func_name, inputs, golden_out = download_torch_model("v_diffusion")
shark_module = SharkInference(
mlir_model, func_name, device="cpu", mlir_dialect="linalg"
)
#!/bin/bash
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
# Simple script that does a CMake configure of this project as an external
# LLVM project so it can be tested in isolation to larger assemblies.
# This is meant for CI's and project maintainers.
This file has been truncated, but you can view the full file.
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d0, 0)>
#map2 = affine_map<(d0, d1) -> ()>
#map3 = affine_map<(d0, d1) -> (d1, d0)>
#map4 = affine_map<(d0, d1) -> (d1)>
#map5 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (0, d1, 0, d3, 0, 0, 0, 0)>
#map6 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7) -> (d0, d1, d2, d3, d4, d5, d6, d7)>
#map7 = affine_map<(d0, d1, d2, d3) -> (d1)>
#map8 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map9 = affine_map<(d0, d1, d2, d3) -> (d0, d1, 0, 0)>
module attributes {torch.debug_module_name = "MaskedFillScalarDefaultModule"} {
func.func @forward(%arg0: !torch.vtensor<[2,3],f32>, %arg1: !torch.vtensor<[2,3],i1>) -> !torch.vtensor<[2,3],f32> {
%float5.000000e-01 = torch.constant.float 5.000000e-01
%none = torch.constant.none
%false = torch.constant.bool false
%0 = torch.aten.tensor.float %float5.000000e-01, %none, %none, %false : !torch.float, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[],f32>
%1 = torch.aten.Float.Tensor %0 : !torch.vtensor<[],f32> -> !torch.float
%2 = torch.aten.masked_fill.Scalar %arg0, %arg1, %1 : !torch.vtensor<[2,3],f32>, !torch.vtensor<[2,3],i1>, !torch.float -> !torch.vtensor<[2,3],f32>
return %2 : !torch.vtensor<[2,3],f32>
}
from PIL import Image
import requests
from transformers import AutoModelForMaskedLM, AutoTokenizer
import torch
from shark.shark_inference import SharkInference
from shark.shark_importer import SharkImporter
from iree.compiler import tf as tfc
from iree.compiler import compile_str
from iree import runtime as ireert
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d1)>
#map2 = affine_map<(d0, d1, d2, d3) -> (0, d1, d2, d3)>
#map3 = affine_map<(d0, d1) -> (d1)>
#map4 = affine_map<(d0, d1) -> (d0, d1)>
#map5 = affine_map<(d0, d1) -> (d1, d0)>
module attributes {torch.debug_module_name = "VisionModule"} {
func.func @forward(%arg0: tensor<1x3x224x224xf32>) -> tensor<1x1000xf32> {
%false = arith.constant false
%cst = arith.constant dense_resource<__elided__> : tensor<1000xf32>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>
#map2 = affine_map<(d0, d1) -> (0, d1)>
#map3 = affine_map<(d0, d1) -> (d1)>
#map4 = affine_map<(d0, d1, d2) -> (d1)>
#map5 = affine_map<(d0, d1, d2) -> (d0)>
#map6 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map7 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map8 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map9 = affine_map<(d0, d1) -> (0, 0)>
#map0 = affine_map<(d0, d1) -> (d0, d1)>
#map1 = affine_map<(d0, d1) -> (d1, d0)>
#map2 = affine_map<(d0, d1) -> (d1)>
#map3 = affine_map<(d0, d1, d2) -> (d1)>
#map4 = affine_map<(d0, d1, d2) -> (d0)>
#map5 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map6 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
#map7 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
#map8 = affine_map<(d0, d1) -> (d0, 0)>
#map9 = affine_map<(d0, d1) -> (0)>