Skip to content

Instantly share code, notes, and snippets.

@AmosLewis
AmosLewis / ElementwiseAtenWhereSelfModule.mlir
Last active October 6, 2022 17:48
ElementwiseAtenWhereSelfModulemlir
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "ElementwiseAtenWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x5x5xi1> loc(unknown), %arg1: tensor<1x12x5x5xf32> loc(unknown), %arg2: tensor<?xf32> loc(unknown)) -> tensor<1x12x5x5xf32> {
%0 = "tosa.select"(%arg0, %arg1, %arg2) : (tensor<1x1x5x5xi1>, tensor<1x12x5x5xf32>, tensor<?xf32>) -> tensor<1x12x5x5xf32> loc(#loc1)
return %0 : tensor<1x12x5x5xf32> loc(#loc0)
} loc(#loc0)
} loc(#loc0)
#loc1 = loc("/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/elementwise.py":150:15)
@AmosLewis
AmosLewis / ElementwiseWhereSelfModule.mlir
Created October 3, 2022 21:42
ElementwiseWhereSelfModulemlir
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "ElementwiseWhereSelfModule"} {
func.func @forward(%arg0: tensor<1x1x1xf32> loc(unknown), %arg1: tensor<1x1xf32> loc(unknown), %arg2: tensor<1xf32> loc(unknown)) -> tensor<1x1x1xf32> {
%cst = arith.constant dense<5.000000e-01> : tensor<1x1x1xf32> loc(#loc1)
%0 = "tosa.greater"(%arg0, %cst) : (tensor<1x1x1xf32>, tensor<1x1x1xf32>) -> tensor<1x1x1xi1> loc(#loc1)
%1 = "tosa.select"(%0, %arg1, %arg2) : (tensor<1x1x1xi1>, tensor<1x1xf32>, tensor<1xf32>) -> tensor<1x1x1xf32> loc(#loc2)
return %1 : tensor<1x1x1xf32> loc(#loc0)
} loc(#loc0)
} loc(#loc0)
#loc1 = loc("/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/elementwise.py":150:27)
@AmosLewis
AmosLewis / gpttosa_debug.txt
Created September 29, 2022 00:31
gpttosadebug
This file has been truncated, but you can view the full file.
➜ SHARK git:(gpt) ✗ torch-mlir-opt -pass-pipeline='torch-backend-to-tosa-backend-pipeline' /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-pretty-debuginfo -mlir-disable-threading
// -----// IR Dump After ConvertTorchToTosa (convert-torch-to-tosa) //----- //
func.func @forward(%arg0: !torch.vtensor<[1,5],si64>) -> !torch.vtensor<[1,5,50257],f32> {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,5],si64> -> tensor<1x5xi64>
%int5 = torch.constant.int 5
%1 = torch_c.to_i64 %int5
%int1 = torch.constant.int 1
%2 = torch_c.to_i64 %int1
%true = torch.constant.bool true
%float0.000000e00 = torch.constant.float 0.000000e+00
@AmosLewis
AmosLewis / gpt2tosa_tmp.mlir
Created September 26, 2022 23:23
gpttosatmp
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,5],si64> loc(unknown)) -> !torch.vtensor<[1,5,50257],f32> {
%int5 = torch.constant.int 5 loc(#loc1)
%int1 = torch.constant.int 1 loc(#loc2)
%true = torch.constant.bool true loc(#loc3)
%float0.000000e00 = torch.constant.float 0.000000e+00 loc(#loc4)
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
@AmosLewis
AmosLewis / gpt2linalg.py
Last active September 26, 2022 19:39
gpt2_linalg
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
@AmosLewis
AmosLewis / bloom_symbolic_graph.txt
Last active September 26, 2022 16:34
bloomsymbolic
# from torch.fx import symbolic_trace
# # Symbolic tracing frontend - captures the semantics of the module
# symbolic_traced : torch.fx.GraphModule = symbolic_trace(fx_g)
# # High-level intermediate representation (IR) - Graph representation
# print(symbolic_traced.graph)
graph():
%arg0_1 : [#users=2] = placeholder[target=arg0_1]
%view : [#users=1] = call_function[target=torch.ops.aten.view](args = (%arg0_1, [-1, 128]), kwargs = {})
%_param_constant0 : [#users=1] = get_attr[target=_param_constant0]
@AmosLewis
AmosLewis / gpt2tosa.py
Last active May 15, 2023 20:59
gpt2tosa.py
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
@AmosLewis
AmosLewis / tmp.mlir
Created September 22, 2022 01:54
tmp.mlir
func.func @torch.aten.Int.Tensor$zero_rank(%arg0: !torch.vtensor<[],ui8>) -> !torch.int {
%0 = torch.aten.Int.Tensor %arg0 : !torch.vtensor<[],ui8> -> !torch.int
return %0 : !torch.int
}
@AmosLewis
AmosLewis / bloom_fp16.py
Last active September 26, 2022 20:03
bloom_fp16.py
import torch
from transformers import AutoModelForSequenceClassification
class HuggingFaceLanguage(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
"bigscience/bloom-560m", # The pretrained model.
num_labels=2, # The number of output labels--2 for binary classification.
output_attentions=False, # Whether the model returns attentions weights.
@AmosLewis
AmosLewis / ReduceSumDimInt_debug.txt
Last active September 14, 2022 23:18
debug ReduceSumDimInt
https://github.com/llvm/torch-mlir/blob/6c1dea1c0ff22efb7119f6453655b8b38b52e506/lib/Conversion/TorchToLinalg/Reduction.cpp#L409
->
result = convertScalarToDtype(rewriter, loc, result, mlir::IntegerType::get(op->getContext(), 64));
➜ SHARK git:(bloom) ✗ python tank/bloom_model.py
Some weights of BloomForSequenceClassification were not initialized from the model checkpoint at bigscience/bloom-560m and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
/home/chi/src/ubuntu20/shark/SHARK/shark.venv/lib/python3.10/site-packages/torch/jit/_check.py:181: UserWarning: The TorchScript type system doesn't support instance-level annotations on empty non-base types in `__init__`. Instead, either 1) use a type annotation in the class body, or 2) wrap the type in `torch.jit.Attribute`.
warnings.warn("The TorchScript type system doesn't support "