Skip to content

Instantly share code, notes, and snippets.

module {
func.func @torch.aten.gather(%arg0: !torch.vtensor<[1,4,3],f32>, %arg1: !torch.vtensor<[1,4,2,3],i32>) -> !torch.vtensor<[1,4,2],f32> {
%0 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,4,3],f32> -> tensor<1x4x3xf32>
%0= torch.tensor([[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]
]]) # 1*4*3
%1 = torch_c.to_builtin_tensor %arg1 : !torch.vtensor<[1,4,2,3],i32> -> tensor<1x4x2x3xi32>
func.func @torch.aten.gather(%arg0: !torch.vtensor<[1,4,3],f32>, %arg1: !torch.vtensor<[1,4,2,3],si64>) -> !torch.vtensor<[1,4,2],f32> {
%int-1 = torch.constant.int -1
%false = torch.constant.bool false
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[1,4,3],f32>, !torch.int, !torch.vtensor<[1,4,2,3],si64>, !torch.bool -> !torch.vtensor<[1,4,2],f32>
return %0 : !torch.vtensor<[1,4,2],f32>
}
func.func @torch.aten.gather(%arg0: !torch.vtensor<[12,128,512],f32>, %arg1: !torch.vtensor<[1,128,128],si64>) -> !torch.vtensor<[12,128,128],f32> {
%int-1 = torch.constant.int -1
%false = torch.constant.bool false
%0 = torch.aten.gather %arg0, %int-1, %arg1, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[1,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32>
return %0 : !torch.vtensor<[12,128,128],f32>
}
This file has been truncated, but you can view the full file.
➜ SHARK git:(main) ✗ torch-mlir-opt -pass-pipeline='builtin.module(torch-backend-to-tosa-backend-pipeline)' /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug
Args: /home/chi/src/ubuntu20/shark/torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=builtin.module(torch-backend-to-tosa-backend-pipeline) /tmp/_lambda.mlir -mlir-print-ir-after-all -mlir-disable-threading --debug
Load new dialect in Context builtin
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr)
ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface)
func.func @forward(%arg0: !torch.vtensor<[1,128],si64>) -> !torch.vtensor<[1,128,1],f32>{
%int1 = torch.constant.int 1
%int32 = torch.constant.int 32
%int128 = torch.constant.int 128
%float1.000000e00 = torch.constant.float 1.000000e+00
%0 = torch.vtensor.literal(dense<0.000000e+00> : tensor<2xf32>) : !torch.vtensor<[2],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x32xf32>) : !torch.vtensor<[2,32],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32xf32>) : !torch.vtensor<[32,32],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x37xf32>) : !torch.vtensor<[32,37],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<37x32xf32>) : !torch.vtensor<[37,32],f32>
@AmosLewis
AmosLewis / deberta_tosa.py
Last active December 19, 2022 07:11
deberta
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from torch._decomp import get_decompositions
import tempfile
import torch_mlir
def prepare_sentence_tokens(hf_model: str, sentence: str):
tokenizer = AutoTokenizer.from_pretrained(hf_model)
➜ SHARK git:(main) ✗ python tank/pytorch/deberta/deberta_tosa.py
Some weights of the model checkpoint at hf-internal-testing/tiny-random-deberta were not used when initializing DebertaForSequenceClassification: ['cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.decoder.weight', 'qa_outputs.bias', 'cls.predictions.transform.LayerNorm.weight', 'qa_outputs.weight', 'cls.predictions.decoder.bias']
- This IS expected if you are initializing DebertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DebertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
model(test
template <typename AtenOpT>
class ConvertSelectiveAtenOpToTosaCustom : public OpConversionPattern<AtenOpT> {
public:
using OpConversionPattern<AtenOpT>::OpConversionPattern;
using OpAdaptor = typename AtenOpT::Adaptor;
LogicalResult
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
ValueRange operands = adaptor.getOperands();
(mlir_venv) nod% python -m e2e_testing.main --config=tosa -v
[W TensorShape.cpp:3154] Warning: The use of `x.T` on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Consider `x.mT` to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse the dimensions of a tensor. (function operator())
XFAIL - "AdaptiveAvgPool2dNonUnitOutputSizeDynamicModule_basic"
XFAIL - "AdaptiveAvgPool2dNonUnitOutputSizeStaticModule_basic"
XFAIL - "AdaptiveAvgPool2dUnitOutputSizeDynamicModule_basic"
PASS - "AdaptiveAvgPool2dUnitOutputSizeStaticModule_basic"
PASS - "AddCDivModule_basic"
PASS - "AddCMulModule_basic"
XFAIL - "AddIntModule_basic"
XFAIL - "AddSizeIntModule_basic"
func.func @torch.aten.softmax.int$cst_dim(%t: !torch.vtensor<[2,3],f32>) -> !torch.vtensor<[2,3],f32> {
%none = torch.constant.none
%dim = torch.constant.int 1
%ret = torch.aten.softmax.int %t, %dim, %none : !torch.vtensor<[2,3],f32>, !torch.int, !torch.none -> !torch.vtensor<[2,3],f32>
return %ret : !torch.vtensor<[2,3],f32>
}