Skip to content

Instantly share code, notes, and snippets.

@AmosLewis
AmosLewis / bloom_genshark_debug.txt
Created September 14, 2022 15:25
bloom_genshark_debug.txt
This file has been truncated, but you can view the full file.
Args: ./../torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=torch-backend-to-linalg-on-tensors-backend-pipeline --debug ./shark_tmp/_lambda.mlir
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmOpInterface)
@AmosLewis
AmosLewis / debug_print_bloom_op_log.txt
Last active September 14, 2022 17:22
debug print torch_mlir_lockstep_tensor
python tank/bloom_model.py
with torch_mlir_lockstep_tensor.py
......
......
Mismatched elements: 131059 / 131072 (100%)
Max absolute difference: 2.3001497
Max relative difference: 2833.5105
x: array([[[-0.013242, -0.013242, -0.013242, ..., -0.013242, -0.013242,
➜ SHARK git:(bloom) ✗ python tank/bloom_model.py
Some weights of BloomForSequenceClassification were not initialized from the model checkpoint at bigscience/bloom-560m and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
/home/chi/src/ubuntu20/shark/SHARK/shark.venv/lib/python3.10/site-packages/torch/jit/_check.py:181: UserWarning: The TorchScript type system doesn't support instance-level annotations on empty non-base types in `__init__`. Instead, either 1) use a type annotation in the class body, or 2) wrap the type in `torch.jit.Attribute`.
warnings.warn("The TorchScript type system doesn't support "
/home/chi/src/ubuntu20/shark/SHARK/shark.venv/lib/python3.10/site-packages/torch/jit/_trace.py:744: UserWarning: The input to trace is already a ScriptModule, tracing it is a no-op. Returning the object as is.
warnings.warn(
Target triple found:x86_64-linux-gnu
tensor([[ 7.2041, -17.0263]], grad_fn=<Inde
@AmosLewis
AmosLewis / ReduceSumDimInt_debug.txt
Last active September 14, 2022 23:18
debug ReduceSumDimInt
https://github.com/llvm/torch-mlir/blob/6c1dea1c0ff22efb7119f6453655b8b38b52e506/lib/Conversion/TorchToLinalg/Reduction.cpp#L409
->
result = convertScalarToDtype(rewriter, loc, result, mlir::IntegerType::get(op->getContext(), 64));
➜ SHARK git:(bloom) ✗ python tank/bloom_model.py
Some weights of BloomForSequenceClassification were not initialized from the model checkpoint at bigscience/bloom-560m and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
/home/chi/src/ubuntu20/shark/SHARK/shark.venv/lib/python3.10/site-packages/torch/jit/_check.py:181: UserWarning: The TorchScript type system doesn't support instance-level annotations on empty non-base types in `__init__`. Instead, either 1) use a type annotation in the class body, or 2) wrap the type in `torch.jit.Attribute`.
warnings.warn("The TorchScript type system doesn't support "
@AmosLewis
AmosLewis / bloom_fp16.py
Last active September 26, 2022 20:03
bloom_fp16.py
import torch
from transformers import AutoModelForSequenceClassification
class HuggingFaceLanguage(torch.nn.Module):
def __init__(self):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
"bigscience/bloom-560m", # The pretrained model.
num_labels=2, # The number of output labels--2 for binary classification.
output_attentions=False, # Whether the model returns attentions weights.
@AmosLewis
AmosLewis / tmp.mlir
Created September 22, 2022 01:54
tmp.mlir
func.func @torch.aten.Int.Tensor$zero_rank(%arg0: !torch.vtensor<[],ui8>) -> !torch.int {
%0 = torch.aten.Int.Tensor %arg0 : !torch.vtensor<[],ui8> -> !torch.int
return %0 : !torch.int
}
@AmosLewis
AmosLewis / gpt2tosa.py
Last active May 15, 2023 20:59
gpt2tosa.py
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
@AmosLewis
AmosLewis / bloom_symbolic_graph.txt
Last active September 26, 2022 16:34
bloomsymbolic
# from torch.fx import symbolic_trace
# # Symbolic tracing frontend - captures the semantics of the module
# symbolic_traced : torch.fx.GraphModule = symbolic_trace(fx_g)
# # High-level intermediate representation (IR) - Graph representation
# print(symbolic_traced.graph)
graph():
%arg0_1 : [#users=2] = placeholder[target=arg0_1]
%view : [#users=1] = call_function[target=torch.ops.aten.view](args = (%arg0_1, [-1, 128]), kwargs = {})
%_param_constant0 : [#users=1] = get_attr[target=_param_constant0]
@AmosLewis
AmosLewis / gpt2linalg.py
Last active September 26, 2022 19:39
gpt2_linalg
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
@AmosLewis
AmosLewis / gpt2tosa_tmp.mlir
Created September 26, 2022 23:23
gpttosatmp
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,5],si64> loc(unknown)) -> !torch.vtensor<[1,5,50257],f32> {
%int5 = torch.constant.int 5 loc(#loc1)
%int1 = torch.constant.int 1 loc(#loc2)
%true = torch.constant.bool true loc(#loc3)
%float0.000000e00 = torch.constant.float 0.000000e+00 loc(#loc4)
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)