Skip to content

Instantly share code, notes, and snippets.

func.func @torch.aten.gather(%arg0: !torch.vtensor<[12,128,512],f32>, %arg1: !torch.vtensor<[1,128,128],si64>) -> !torch.vtensor<[12,128,128],f32> {
%int-1 = torch.constant.int -1
%false = torch.constant.bool false
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[1,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32>
return %0 : !torch.vtensor<[12,128,128],f32>
}
This file has been truncated, but you can view the full file.
➜ SHARK git:(main) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torch-backend-to-tosa-backend-pipeline)' /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug
Args: /home/chi/src/ubuntu20/shark/torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=builtin.module(torch-backend-to-tosa-backend-pipeline) /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface)
func.func @forward(%arg0: !torch.vtensor<[1,128],si64>) -> !torch.vtensor<[1,128,1],f32>{
%int1 = torch.constant.int 1
%int32 = torch.constant.int 32
%int128 = torch.constant.int 128
%float1.000000e00 = torch.constant.float 1.000000e+00
%0 = torch.vtensor.literal(dense<0.000000e+00> : tensor<2xf32>) : !torch.vtensor<[2],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x32xf32>) : !torch.vtensor<[2,32],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32xf32>) : !torch.vtensor<[32,32],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x37xf32>) : !torch.vtensor<[32,37],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<37x32xf32>) : !torch.vtensor<[37,32],f32>
@AmosLewis
AmosLewis / deberta_tosa.py
Last active December 19, 2022 07:11
deberta
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
➜ SHARK git:(main) ✗ python tank/pytorch/deberta/deberta_tosa.py
Some weights of the model checkpoint at hf-internal-testing/tiny-random-deberta were not used when initializing DebertaForSequenceClassification: ['cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.decoder.weight', 'qa_outputs.bias', 'cls.predictions.transform.LayerNorm.weight', 'qa_outputs.weight', 'cls.predictions.decoder.bias']
- This IS expected if you are initializing DebertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DebertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
model(test
template <typename AtenOpT>
class ConvertSelectiveAtenOpToTosaCustom : public OpConversionPattern<AtenOpT> {
public:
using OpConversionPattern<AtenOpT>::OpConversionPattern;
using OpAdaptor = typename AtenOpT::Adaptor;
LogicalResult
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
ValueRange operands = adaptor.getOperands();
(mlir_venv) nod% python -m e2e_testing.main --config=tosa -v
[W TensorShape.cpp:3154] Warning: The use of `x.T` on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Consider `x.mT` to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse the dimensions of a tensor. (function operator())
XFAIL - "AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic"
XFAIL - "AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic"
XFAIL - "AdaptiveAvgPool2dUnitOutputSizeDynamicModule_basic"
PASS - "AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic"
PASS - "AddCDivModule_basic"
PASS - "AddCMulModule_basic"
XFAIL - "AddIntModule_basic"
XFAIL - "AddSizeIntModule_basic"
func.func @torch.aten.softmax.int$cst_dim(%t: !torch.vtensor<[2,3],f32>) -> !torch.vtensor<[2,3],f32> {
%none = torch.constant.none
%dim = torch.constant.int 1
%ret = torch.aten.softmax.int %t, %dim, %none : !torch.vtensor<[2,3],f32>, !torch.int, !torch.none -> !torch.vtensor<[2,3],f32>
return %ret : !torch.vtensor<[2,3],f32>
}
(mlir_venv) nod% cmake --build build --target tools/torch-mlir/all
[54/72] Building CXX object tools/torch-mlir/lib/Conversion/TorchToTosa/CMakeFiles/obj.TorchMLIRTorchToTosa.dir/TorchToTosa.cpp.o
FAILED: tools/torch-mlir/lib/Conversion/TorchToTosa/CMakeFiles/obj.TorchMLIRTorchToTosa.dir/TorchToTosa.cpp.o
/usr/lib/ccache/clang++ -DGTEST_HAS_RTTI=0 -DTORCH_MLIR_ENABLE_MHLO -D_DEBUG -D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -I/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/lib/Conversion/TorchToTosa -I/home/chi/src/ubuntu20/shark/torch-mlir/lib/Conversion/TorchToTosa -I/home/chi/src/ubuntu20/shark/torch-mlir/build/include -I/home/chi/src/ubuntu20/shark/torch-mlir/externals/llvm-project/llvm/include -I/home/chi/src/ubuntu20/shark/torch-mlir/externals/mlir-hlo/include -I/home/chi/src/ubuntu20/shark/torch-mlir/build/tools/torch-mlir/mlir-hlo/include -I/home/chi/src/ubuntu20/shark/torch-mlir/externals/llvm-project/llvm/../mlir/include -I/home/chi/src/ubuntu
func.func @torch.aten.softmax.int$cst_dim(%arg0: !torch.vtensor<[2,3],f32>) -> !torch.vtensor<[2,3],f32> {
%none = torch.constant.none
%int1 = torch.constant.int 1
%true = torch.constant.bool true
%values, %indices = torch.aten.max.dim %arg0, %int1, %true : !torch.vtensor<[2,3],f32>, !torch.int, !torch.bool -> !torch.vtensor<[2,1],f32>, !torch.vtensor<[2,1],si64>
%float1.000000e00 = torch.constant.float 1.000000e+00
%0 = torch.aten.sub.Tensor %arg0, %values, %float1.000000e00 : !torch.vtensor<[2,3],f32>, !torch.vtensor<[2,1],f32>, !torch.float -> !torch.vtensor<[2,3],f32>
%1 = torch.aten.exp %0 : !torch.vtensor<[2,3],f32> -> !torch.vtensor<[2,3],f32>
%2 = torch.prim.ListConstruct %int1 : (!torch.int) -> !torch.list<int>
%true_0 = torch.constant.bool true