Skip to content

Instantly share code, notes, and snippets.

#loc = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,128],si64> loc(unknown)) -> !torch.vtensor<[1,2],f32> {
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc1)
%int0 = torch.constant.int 0 loc(#loc2)
%int1 = torch.constant.int 1 loc(#loc3)
%int-1 = torch.constant.int -1 loc(#loc4)
%true = torch.constant.bool true loc(#loc5)
%none = torch.constant.none loc(#loc)
%false = torch.constant.bool false loc(#loc)
fx_g.graph:
graph():
%arg0_1 : [#users=1] = placeholder[target=arg0_1]
%view : [#users=1] = call_function[target=torch.ops.aten.view.default](args = (%arg0_1, [-1, 128]), kwargs = {})
%arange : [#users=1] = call_function[target=torch.ops.aten.arange.start](args = (0, 128), kwargs = {dtype: torch.int64, device: cpu, pin_memory: False})
%unsqueeze : [#users=1] = call_function[target=torch.ops.aten.unsqueeze.default](args = (%arange, 0), kwargs = {})
%view_1 : [#users=1] = call_function[target=torch.ops.aten.view.default](args = (%unsqueeze, [-1, 128]), kwargs = {})
%_param_constant0 : [#users=1] = get_attr[target=_param_constant0]
%embedding : [#users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%_param_constant0, %view), kwargs = {})
%_param_constant1 : [#users=1] = get_attr[target=_param_constant1]
#loc = loc(unknown)
module attributes {torch.debug_module_name = "SliceNegativeIndexStaticModule"} {
func.func @forward(%arg0: !torch.vtensor<[1,128,2],f32> loc(unknown)) -> !torch.vtensor<[1,0,2],f32> {
%int1 = torch.constant.int 1 loc(#loc1)
%int0 = torch.constant.int 0 loc(#loc2)
%int-1 = torch.constant.int -1 loc(#loc3)
%0 = torch.aten.slice.Tensor %arg0, %int1, %int-1, %int0, %int1 : !torch.vtensor<[1,128,2],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,0,2],f32> loc(#loc4)
return %0 : !torch.vtensor<[1,0,2],f32> loc(#loc)
} loc(#loc)
} loc(#loc)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: tensor<1x128xi64>) -> tensor<1x2xf32> {
%0 = "tosa.const"() {value = dense_resource<__elided__> : tensor<2x768xf32>} : () -> tensor<2x768xf32>
%1 = "tosa.const"() {value = dense_resource<__elided__> : tensor<3072x768xf32>} : () -> tensor<3072x768xf32>
%2 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x3072xf32>} : () -> tensor<768x3072xf32>
%3 = "tosa.const"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> tensor<3072xf32>
%4 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x768xf32>} : () -> tensor<768x768xf32>
%5 = "tosa.const"() {value = dense<-3.40282347E+38> : tensor<f32>} : () -> tensor<f32>
%6 = "tosa.const"() {value = dense_resource<__elided__> : tensor<1x1x1024x1024xui8>} : () -> tensor<1x1x1024x1024xi8>
%7 = "tosa.const"() {value = dense<8.000000e+00> : tensor<f32>} : () -> tensor<f32>
func.func @torch.aten.view(%arg0: !torch.vtensor<[1,128],si64>) -> !torch.vtensor<[1,128],si64> {
%int-1 = torch.constant.int -1
%int128 = torch.constant.int 128
%0 = torch.prim.ListConstruct %int-1, %int128 : (!torch.int, !torch.int) -> !torch.list<int>
%1 = torch.aten.view %arg0, %0 : !torch.vtensor<[1,128],si64>, !torch.list<int> -> !torch.vtensor<[1,128],si64>
return %1 : !torch.vtensor<[1,128],si64>
}
module attributes {torch.debug_module_name = "IndexTensorMultiInputContiguousCenter"} {
func.func @forward(%arg0: !torch.vtensor<[?,?,?,?],f32>, %arg1: !torch.vtensor<[2,2],si64>, %arg2: !torch.vtensor<[2],si64>) -> !torch.vtensor<[?,2,2,?],f32> {
%none = torch.constant.none
%0 = torch.prim.ListConstruct %none, %arg1, %arg2, %none : (!torch.none, !torch.vtensor<[2,2],si64>, !torch.vtensor<[2],si64>, !torch.none) -> !torch.list<optional<vtensor>>
%1 = torch.aten.index.Tensor %arg0, %0 : !torch.vtensor<[?,?,?,?],f32>, !torch.list<optional<vtensor>> -> !torch.vtensor<[?,2,2,?],f32>
return %1 : !torch.vtensor<[?,2,2,?],f32>
}
}
func.func @torch.prim.NumToTensor.Scalar() -> !torch.vtensor<[],f64> {
%float8.000000e00 = torch.constant.float 8.000000e+00
%1 = "torch.prim.NumToTensor.Scalar"(%float8.000000e00) : (!torch.float) -> !torch.vtensor<[],f64>
return %1 : !torch.vtensor<[],f64>
}
cmake -GNinja -Bbuild \
-DCMAKE_BUILD_TYPE=Debug \
-DCMAKE_C_COMPILER=clang \
-DCMAKE_CXX_COMPILER=clang++ \
-DPython3_FIND_VIRTUALENV=ONLY \
-DLLVM_ENABLE_PROJECTS=mlir \
-DLLVM_EXTERNAL_PROJECTS="torch-mlir;torch-mlir-dialects" \
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR=`pwd` \
-DLLVM_EXTERNAL_TORCH_MLIR_DIALECTS_SOURCE_DIR=`pwd`/externals/llvm-external-projects/torch-mlir-dialects \
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
import torch
t = torch.tensor([
[1, 2, 3, 4, 5],
[6,7,8,9,10],
[11,12,13,14,15],
[16,17,18,19,20]
]) # 4*5
i = torch.tensor([
[1,2,3],
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: tensor<1x128xi64>) -> tensor<1x2xf32> {
%0 = "tosa.const"() {value = dense_resource<__elided__> : tensor<2x768xf32>} : () -> tensor<2x768xf32>
%1 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768xf32>} : () -> tensor<768xf32>
%2 = "tosa.const"() {value = dense_resource<__elided__> : tensor<3072x768xf32>} : () -> tensor<3072x768xf32>
%3 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x3072xf32>} : () -> tensor<768x3072xf32>
%4 = "tosa.const"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> tensor<3072xf32>
%5 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x768xf32>} : () -> tensor<768x768xf32>
%6 = "tosa.const"() {value = dense_resource<__elided__> : tensor<768x2304xf32>} : () -> tensor<768x2304xf32>
%7 = "tosa.const"() {value = dense_resource<__elided__> : tensor<2304xf32>} : () -> tensor<2304xf32>