Created
October 30, 2024 11:19
-
-
Save pashu123/de5999098193867650c1d99fe3a776df to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// -----// IR Dump After ConvertTorchOnnxToTorch (convert-torch-onnx-to-torch) //----- // | |
func.func @torch_jit(%arg0: !torch.vtensor<[1,64,88,88],f32>) -> !torch.vtensor<[?,256,88,88],f32> attributes {torch.onnx_meta.ir_version = 7 : si64, torch.onnx_meta.opset_version = 21 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} { | |
%0 = torch.vtensor.literal(dense<[[[-0.162511453, 0.196854442, -0.89627254, 0.699266493, 0.930536746]]]> : tensor<1x1x5xf32>) : !torch.vtensor<[1,1,5],f32> | |
%1 = torch.vtensor.literal(dense<0.000000e+00> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32> | |
%2 = torch.vtensor.literal(dense<"0x4684C23E7C2E86BF686622BF042F8D3F2FF0803E7CC5683F8810C9BFB750363FCE7E0DC07BCD63C0878B86BF633820BF449DBA3F88D3B0BE83C9FA3FF5A6ECBF078D44401E80D2BE524F17C01BB8823DE069453DA60F85404569F3BD599F823F0DFAA1C0CD896EBF3C98434084EF7C408067A63E4088613EAE5AA93E22BF5D3F50D6083E75EE51BF00E69BBF478287403E5C6E3FB6B11340D392C4BE4C439B3E3A22B13FCC3B4F3F34F554BD6F73203F622385BFF4BBC23E58A4983DC93B0E40D059D8BFA27736BF30514D3E80A12CBFD71399BFE09D9A3F8C7B29C0D86488C01CD2EE3FD404C1BF949A5540408530C034367CBE3086253F40EF6EBE202BA23E56F8443F7AE95F3F9CEF723F3A468D3FB6217FC02C1A48C0C728A9BFCBFE4D3E333F03404D5D71BF644238403AEC31BF22CD85BEB0879AC07018D6BF8CEC1B406EE23EBFB5EDEDBF689774BE874BDBBFE8A942401C5227407F8CB13E8A85BA3F82E0D13FB41B943EA641AE3E805744BCF0C4CC3E488A36BFC027033F001654BF643085C0B18ECDBFC6231BC09C0B0A40FA7171BFEC112440D200A8BEC4106C409F0DBC3FC5341440112F80BF8623CABF94CC6DC03386AABF04C270407CF21640288E3E3F10F5D23FB0FE7C3E842E50BE733BBE3F4283D73F4A3E53BF54B84740448AF53FC3452B4088333040D2D6B33FD95698BF20EC9E3FC1313DBD269CA2BFC0D9FA3CD01A08BEC92BB9BEF404273F3EDDB43F6311DDBF225DA23F66FFBEBF010A9DC0AC149B3DB8DA3B3F60121540149E283F6F9542409E9ED73E7C765DBF6C8B0BBF666E1DBF64F775C0D3F9423F05F1EA3E5856DA3FBBE84EC0BE8DD93F34411840C51F02BF516DFF3F687FC93FBC4131400B051540D7830D4044B08E40BE4090BF02B4E0BDB68EB9BE81BF6440A097213E54E8BE3FEC62EC3EA04BA3BC7D2F833F8E3AE7BFA1DB4FC0268D46C09C3B293FE16E913FC665C6BE741ECB3D43AE3A3FD021823F70A2FE3FFF91E63FC9628F3F97B4D93FEB53A03EE5D8CABF994723C00861493DA29CC03E86F9A83EB84ED83FAF5018BF75B3094078317140B1689D3F1F0D11BF3EAD90BF6D4201BE20B3A23F20922EBE6A6FD83FC86CE4BFC88BFEBC1099593DB610F2BE4DDF0A40B4B77E3FBE73713F18291BBFD65C713F6B94943E71611D3F009A7E3CC8B8FCBFE24EB13F14C3224010B8DC3F3331A63F7A02D03F1B948F3F7890C9BFC80750BFCC347FBF0B3699BFC485484020B1883FAB7ED33F009E13C0B861FABE0354EEBF698EEBBFDA712CC0AD5761C0E88BB0C0CC440DC07DAE933FF02EE1BEE81EC7BE5D3CA53E3E52C63E5309E53F20D6704096DD313F731B2340BAF00E3FB6ACB6BFA7908FBFA0EFDDBEDEBF7FBF2FC39940B87FC03EAC203FBDF554813FC80C95BE29DDC4BF3E5A603FD0770E3D804DFA3C"> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
%3 = torch.vtensor.literal(dense<[1, 1, -1]> : tensor<3xsi64>) : !torch.vtensor<[3],si64> | |
%4 = torch.vtensor.literal(dense<[1, -1, 1, 1]> : tensor<4xsi64>) : !torch.vtensor<[4],si64> | |
%int0 = torch.constant.int 0 | |
%int0_0 = torch.constant.int 0 | |
%5 = torch.prim.ListConstruct %int0, %int0_0 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1 = torch.constant.int 1 | |
%int1_1 = torch.constant.int 1 | |
%int1_2 = torch.constant.int 1 | |
%int1_3 = torch.constant.int 1 | |
%int0_4 = torch.constant.int 0 | |
%6 = torch.prim.ListConstruct %int1, %int1_1 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7 = torch.prim.ListConstruct %int1_2, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int> | |
%8 = torch.prim.ListConstruct %int0_4, %int0_4 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false = torch.constant.bool false | |
%int1_5 = torch.constant.int 1 | |
%9 = torch.aten.convolution %arg0, %1, %2, %7, %5, %6, %false, %8, %int1_5 : !torch.vtensor<[1,64,88,88],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,88,88],f32> | |
%10 = torch.vtensor.literal(dense<[2, 3]> : tensor<2xsi64>) : !torch.vtensor<[2],si64> | |
%int0_6 = torch.constant.int 0 | |
%int0_7 = torch.constant.int 0 | |
%11 = torch.aten.select.int %10, %int0_6, %int0_7 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%12 = torch.aten.item %11 : !torch.vtensor<[1],si64> -> !torch.int | |
%int1_8 = torch.constant.int 1 | |
%13 = torch.aten.select.int %10, %int0_6, %int1_8 : !torch.vtensor<[2],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%14 = torch.aten.item %13 : !torch.vtensor<[1],si64> -> !torch.int | |
%15 = torch.prim.ListConstruct %12, %14 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_9 = torch.constant.bool false | |
%none = torch.constant.none | |
%16 = torch.aten.mean.dim %9, %15, %false_9, %none : !torch.vtensor<[1,256,88,88],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,256],f32> | |
%int0_10 = torch.constant.int 0 | |
%int0_11 = torch.constant.int 0 | |
%17 = torch.aten.select.int %3, %int0_10, %int0_11 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%18 = torch.aten.item %17 : !torch.vtensor<[1],si64> -> !torch.int | |
%19 = torch.aten.eq.int %18, %int0_10 : !torch.int, !torch.int -> !torch.bool | |
%20 = torch.aten.Int.bool %19 : !torch.bool -> !torch.int | |
%int0_12 = torch.constant.int 0 | |
%21 = torch.aten.size.int %16, %int0_12 : !torch.vtensor<[1,256],f32>, !torch.int -> !torch.int | |
%22 = torch.prim.NumToTensor.Scalar %20 : !torch.int -> !torch.vtensor<[],i1> | |
%23 = torch.prim.NumToTensor.Scalar %21 : !torch.int -> !torch.vtensor<[],si64> | |
%24 = torch.prim.NumToTensor.Scalar %18 : !torch.int -> !torch.vtensor<[],si64> | |
%25 = torch.aten.where.self %22, %23, %24 : !torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[],si64> | |
%26 = torch.aten.item %25 : !torch.vtensor<[],si64> -> !torch.int | |
%int1_13 = torch.constant.int 1 | |
%27 = torch.aten.select.int %3, %int0_10, %int1_13 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%28 = torch.aten.item %27 : !torch.vtensor<[1],si64> -> !torch.int | |
%29 = torch.aten.eq.int %28, %int0_10 : !torch.int, !torch.int -> !torch.bool | |
%30 = torch.aten.Int.bool %29 : !torch.bool -> !torch.int | |
%int1_14 = torch.constant.int 1 | |
%31 = torch.aten.size.int %16, %int1_14 : !torch.vtensor<[1,256],f32>, !torch.int -> !torch.int | |
%32 = torch.prim.NumToTensor.Scalar %30 : !torch.int -> !torch.vtensor<[],i1> | |
%33 = torch.prim.NumToTensor.Scalar %31 : !torch.int -> !torch.vtensor<[],si64> | |
%34 = torch.prim.NumToTensor.Scalar %28 : !torch.int -> !torch.vtensor<[],si64> | |
%35 = torch.aten.where.self %32, %33, %34 : !torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[],si64> | |
%36 = torch.aten.item %35 : !torch.vtensor<[],si64> -> !torch.int | |
%int2 = torch.constant.int 2 | |
%37 = torch.aten.select.int %3, %int0_10, %int2 : !torch.vtensor<[3],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%38 = torch.aten.item %37 : !torch.vtensor<[1],si64> -> !torch.int | |
%39 = torch.aten.eq.int %38, %int0_10 : !torch.int, !torch.int -> !torch.bool | |
%40 = torch.aten.Int.bool %39 : !torch.bool -> !torch.int | |
%41 = torch.prim.ListConstruct %26, %36, %38 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%42 = torch.aten.reshape %16, %41 : !torch.vtensor<[1,256],f32>, !torch.list<int> -> !torch.vtensor<[?,?,?],f32> | |
%int2_15 = torch.constant.int 2 | |
%43 = torch.prim.ListConstruct %int2_15 : (!torch.int) -> !torch.list<int> | |
%int1_16 = torch.constant.int 1 | |
%int1_17 = torch.constant.int 1 | |
%int0_18 = torch.constant.int 0 | |
%44 = torch.prim.ListConstruct %int1_16 : (!torch.int) -> !torch.list<int> | |
%45 = torch.prim.ListConstruct %int1_17 : (!torch.int) -> !torch.list<int> | |
%46 = torch.prim.ListConstruct %int0_18, %int0_18 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_19 = torch.constant.bool false | |
%none_20 = torch.constant.none | |
%int1_21 = torch.constant.int 1 | |
%47 = torch.aten.convolution %42, %0, %none_20, %45, %43, %44, %false_19, %46, %int1_21 : !torch.vtensor<[?,?,?],f32>, !torch.vtensor<[1,1,5],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[?,1,?],f32> | |
%48 = torch.aten.sigmoid %47 : !torch.vtensor<[?,1,?],f32> -> !torch.vtensor<[?,1,?],f32> | |
%int0_22 = torch.constant.int 0 | |
%int0_23 = torch.constant.int 0 | |
%49 = torch.aten.select.int %4, %int0_22, %int0_23 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%50 = torch.aten.item %49 : !torch.vtensor<[1],si64> -> !torch.int | |
%51 = torch.aten.eq.int %50, %int0_22 : !torch.int, !torch.int -> !torch.bool | |
%52 = torch.aten.Int.bool %51 : !torch.bool -> !torch.int | |
%int0_24 = torch.constant.int 0 | |
%53 = torch.aten.size.int %48, %int0_24 : !torch.vtensor<[?,1,?],f32>, !torch.int -> !torch.int | |
%54 = torch.prim.NumToTensor.Scalar %52 : !torch.int -> !torch.vtensor<[],i1> | |
%55 = torch.prim.NumToTensor.Scalar %53 : !torch.int -> !torch.vtensor<[],si64> | |
%56 = torch.prim.NumToTensor.Scalar %50 : !torch.int -> !torch.vtensor<[],si64> | |
%57 = torch.aten.where.self %54, %55, %56 : !torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[],si64> | |
%58 = torch.aten.item %57 : !torch.vtensor<[],si64> -> !torch.int | |
%int1_25 = torch.constant.int 1 | |
%59 = torch.aten.select.int %4, %int0_22, %int1_25 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%60 = torch.aten.item %59 : !torch.vtensor<[1],si64> -> !torch.int | |
%61 = torch.aten.eq.int %60, %int0_22 : !torch.int, !torch.int -> !torch.bool | |
%62 = torch.aten.Int.bool %61 : !torch.bool -> !torch.int | |
%int1_26 = torch.constant.int 1 | |
%63 = torch.aten.size.int %48, %int1_26 : !torch.vtensor<[?,1,?],f32>, !torch.int -> !torch.int | |
%64 = torch.prim.NumToTensor.Scalar %62 : !torch.int -> !torch.vtensor<[],i1> | |
%65 = torch.prim.NumToTensor.Scalar %63 : !torch.int -> !torch.vtensor<[],si64> | |
%66 = torch.prim.NumToTensor.Scalar %60 : !torch.int -> !torch.vtensor<[],si64> | |
%67 = torch.aten.where.self %64, %65, %66 : !torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[],si64> | |
%68 = torch.aten.item %67 : !torch.vtensor<[],si64> -> !torch.int | |
%int2_27 = torch.constant.int 2 | |
%69 = torch.aten.select.int %4, %int0_22, %int2_27 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%70 = torch.aten.item %69 : !torch.vtensor<[1],si64> -> !torch.int | |
%71 = torch.aten.eq.int %70, %int0_22 : !torch.int, !torch.int -> !torch.bool | |
%72 = torch.aten.Int.bool %71 : !torch.bool -> !torch.int | |
%int2_28 = torch.constant.int 2 | |
%73 = torch.aten.size.int %48, %int2_28 : !torch.vtensor<[?,1,?],f32>, !torch.int -> !torch.int | |
%74 = torch.prim.NumToTensor.Scalar %72 : !torch.int -> !torch.vtensor<[],i1> | |
%75 = torch.prim.NumToTensor.Scalar %73 : !torch.int -> !torch.vtensor<[],si64> | |
%76 = torch.prim.NumToTensor.Scalar %70 : !torch.int -> !torch.vtensor<[],si64> | |
%77 = torch.aten.where.self %74, %75, %76 : !torch.vtensor<[],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[],si64> -> !torch.vtensor<[],si64> | |
%78 = torch.aten.item %77 : !torch.vtensor<[],si64> -> !torch.int | |
%int3 = torch.constant.int 3 | |
%79 = torch.aten.select.int %4, %int0_22, %int3 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> | |
%80 = torch.aten.item %79 : !torch.vtensor<[1],si64> -> !torch.int | |
%81 = torch.aten.eq.int %80, %int0_22 : !torch.int, !torch.int -> !torch.bool | |
%82 = torch.aten.Int.bool %81 : !torch.bool -> !torch.int | |
%83 = torch.prim.ListConstruct %58, %68, %78, %80 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%84 = torch.aten.reshape %48, %83 : !torch.vtensor<[?,1,?],f32>, !torch.list<int> -> !torch.vtensor<[?,?,?,?],f32> | |
%85 = torch.aten._shape_as_tensor %9 : !torch.vtensor<[1,256,88,88],f32> -> !torch.vtensor<[4],si64> | |
%int0_29 = torch.constant.int 0 | |
%int0_30 = torch.constant.int 0 | |
%86 = torch.aten.select.int %85, %int0_29, %int0_30 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[],si64> | |
%87 = torch.aten.item %86 : !torch.vtensor<[],si64> -> !torch.int | |
%int-1 = torch.constant.int -1 | |
%int0_31 = torch.constant.int 0 | |
%88 = torch.aten.size.int %84, %int0_31 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.int | |
%89 = torch.aten.ge.int %88, %87 : !torch.int, !torch.int -> !torch.bool | |
torch.runtime.assert %89, "onnx.Expand input has a dim that is not statically 1; expected this dim >= dim provided shape." | |
%int1_32 = torch.constant.int 1 | |
%90 = torch.aten.select.int %85, %int0_29, %int1_32 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[],si64> | |
%91 = torch.aten.item %90 : !torch.vtensor<[],si64> -> !torch.int | |
%int-1_33 = torch.constant.int -1 | |
%int1_34 = torch.constant.int 1 | |
%92 = torch.aten.size.int %84, %int1_34 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.int | |
%93 = torch.aten.ge.int %92, %91 : !torch.int, !torch.int -> !torch.bool | |
torch.runtime.assert %93, "onnx.Expand input has a dim that is not statically 1; expected this dim >= dim provided shape." | |
%int2_35 = torch.constant.int 2 | |
%94 = torch.aten.select.int %85, %int0_29, %int2_35 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[],si64> | |
%95 = torch.aten.item %94 : !torch.vtensor<[],si64> -> !torch.int | |
%int-1_36 = torch.constant.int -1 | |
%int2_37 = torch.constant.int 2 | |
%96 = torch.aten.size.int %84, %int2_37 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.int | |
%97 = torch.aten.ge.int %96, %95 : !torch.int, !torch.int -> !torch.bool | |
torch.runtime.assert %97, "onnx.Expand input has a dim that is not statically 1; expected this dim >= dim provided shape." | |
%int3_38 = torch.constant.int 3 | |
%98 = torch.aten.select.int %85, %int0_29, %int3_38 : !torch.vtensor<[4],si64>, !torch.int, !torch.int -> !torch.vtensor<[],si64> | |
%99 = torch.aten.item %98 : !torch.vtensor<[],si64> -> !torch.int | |
%int-1_39 = torch.constant.int -1 | |
%int3_40 = torch.constant.int 3 | |
%100 = torch.aten.size.int %84, %int3_40 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.int | |
%101 = torch.aten.ge.int %100, %99 : !torch.int, !torch.int -> !torch.bool | |
torch.runtime.assert %101, "onnx.Expand input has a dim that is not statically 1; expected this dim >= dim provided shape." | |
%102 = torch.prim.ListConstruct %int-1, %int-1_33, %int-1_36, %int-1_39 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%103 = torch.aten.broadcast_to %84, %102 : !torch.vtensor<[?,?,?,?],f32>, !torch.list<int> -> !torch.vtensor<[?,256,88,88],f32> | |
return %103 : !torch.vtensor<[?,256,88,88],f32> | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment