Created
April 6, 2023 20:55
-
-
Save AmosLewis/4313805d9122a540687c12983096523f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,15],si64> loc(unknown), %arg1: !torch.vtensor<[1,4],si64> loc(unknown)) -> !torch.vtensor<[1,4,32128],f32> { | |
%int512 = torch.constant.int 512 loc(#loc1) | |
%int0 = torch.constant.int 0 loc(#loc2) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc4) | |
%true = torch.constant.bool true loc(#loc5) | |
%int4 = torch.constant.int 4 loc(#loc6) | |
%false = torch.constant.bool false loc(#loc7) | |
%none = torch.constant.none loc(#loc) | |
%int15 = torch.constant.int 15 loc(#loc8) | |
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x8xf32>) : !torch.vtensor<[32,8],f32> loc(#loc) | |
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048xf32>) : !torch.vtensor<[512,2048],f32> loc(#loc) | |
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512xf32>) : !torch.vtensor<[2048,512],f32> loc(#loc) | |
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x8xf32>) : !torch.vtensor<[32,8],f32> loc(#loc) | |
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc) | |
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc) | |
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32128x512xf32>) : !torch.vtensor<[32128,512],f32> loc(#loc) | |
%131 = torch.vtensor.literal(dense<0> : tensor<si64>) : !torch.vtensor<[],si64> loc(#loc) | |
%int-100 = torch.constant.int -100 loc(#loc9) | |
%float-3.402820e38 = torch.constant.float -3.4028234663852886E+38 loc(#loc10) | |
%int6 = torch.constant.int 6 loc(#loc11) | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 loc(#loc12) | |
%int2 = torch.constant.int 2 loc(#loc13) | |
%int3 = torch.constant.int 3 loc(#loc14) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc15) | |
%float9.999990e-07 = torch.constant.float 9.9999999999999995E-7 loc(#loc16) | |
%int8 = torch.constant.int 8 loc(#loc17) | |
%int64 = torch.constant.int 64 loc(#loc18) | |
%int16 = torch.constant.int 16 loc(#loc19) | |
%float2.772590e00 = torch.constant.float 2.7725887222397811 loc(#loc20) | |
%int2048 = torch.constant.int 2048 loc(#loc21) | |
%float2.079440e00 = torch.constant.float 2.0794415416798357 loc(#loc22) | |
%int31 = torch.constant.int 31 loc(#loc23) | |
%float4.419420e-02 = torch.constant.float 0.044194173824159223 loc(#loc24) | |
%int32128 = torch.constant.int 32128 loc(#loc25) | |
%cpu = torch.constant.device "cpu" loc(#loc) | |
%132 = torch.prim.ListConstruct %int1, %int4 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1268) | |
%133 = torch.aten.zeros %132, %int4, %int0, %cpu, %false : !torch.list<int>, !torch.int, !torch.int, !torch.Device, !torch.bool -> !torch.vtensor<[1,4],si64> loc(#loc29) | |
%134 = torch.aten.slice.Tensor %arg1, %int1, %int0, %int-1, %int1 : !torch.vtensor<[1,4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,3],si64> loc(#loc30) | |
%135 = torch.aten.clone %134, %none : !torch.vtensor<[1,3],si64>, !torch.none -> !torch.vtensor<[1,3],si64> loc(#loc31) | |
%136 = torch.aten.slice.Tensor %133, %int1, %int1, %int9223372036854775807, %int1 : !torch.vtensor<[1,4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,3],si64> loc(#loc32) | |
%137 = torch.aten.arange.start_step %int1, %int4, %int1, %none, %none, %none, %none : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[3],si64> loc(#loc28) | |
%138 = torch.prim.ListConstruct %137 : (!torch.vtensor<[3],si64>) -> !torch.list<optional<vtensor>> loc(#loc28) | |
%139 = torch.aten._index_put_impl %133, %138, %135, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional<vtensor>>, !torch.vtensor<[1,3],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64> loc(#loc28) | |
%140 = torch.aten.clone %131, %none : !torch.vtensor<[],si64>, !torch.none -> !torch.vtensor<[],si64> loc(#loc33) | |
%141 = torch.aten.slice.Tensor %139, %int1, %int0, %int1, %int1 : !torch.vtensor<[1,4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1],si64> loc(#loc34) | |
%142 = torch.aten.squeeze.dim %141, %int1 : !torch.vtensor<[1,1],si64>, !torch.int -> !torch.vtensor<[1],si64> loc(#loc34) | |
%143 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc34) | |
%144 = torch.prim.ListConstruct %143 : (!torch.vtensor<[],si64>) -> !torch.list<optional<vtensor>> loc(#loc35) | |
%145 = torch.aten._index_put_impl %139, %144, %140, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional<vtensor>>, !torch.vtensor<[],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64> loc(#loc35) | |
%146 = torch.aten.eq.Scalar %145, %int-100 : !torch.vtensor<[1,4],si64>, !torch.int -> !torch.vtensor<[1,4],i1> loc(#loc36) | |
%147 = torch.prim.ListConstruct : () -> !torch.list<int> loc(#loc37) | |
%148 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc37) | |
%149 = torch.aten.broadcast_to %148, %147 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[],si64> loc(#loc37) | |
%150 = torch.aten.where.self %146, %149, %145 : !torch.vtensor<[1,4],i1>, !torch.vtensor<[],si64>, !torch.vtensor<[1,4],si64> -> !torch.vtensor<[1,4],si64> loc(#loc37) | |
%151 = torch.prim.ListConstruct %int-1, %int15 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%152 = torch.aten.view %arg0, %151 : !torch.vtensor<[1,15],si64>, !torch.list<int> -> !torch.vtensor<[1,15],si64> loc(#loc38) | |
%153 = torch.aten.embedding %130, %152, %int-1, %false, %false : !torch.vtensor<[32128,512],f32>, !torch.vtensor<[1,15],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,15,512],f32> loc(#loc39) | |
%154 = torch.prim.ListConstruct %int1, %int15 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1269) | |
%155 = torch.aten.ones %154, %none, %none, %cpu, %false : !torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,15],f32> loc(#loc43) | |
%156 = torch.aten.unsqueeze %155, %int1 : !torch.vtensor<[1,15],f32>, !torch.int -> !torch.vtensor<[1,1,15],f32> loc(#loc44) | |
%157 = torch.aten.unsqueeze %156, %int2 : !torch.vtensor<[1,1,15],f32>, !torch.int -> !torch.vtensor<[1,1,1,15],f32> loc(#loc45) | |
%158 = torch.aten.rsub.Scalar %157, %float1.000000e00, %int1 : !torch.vtensor<[1,1,1,15],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,15],f32> loc(#loc46) | |
%159 = torch.aten.mul.Scalar %158, %float-3.402820e38 : !torch.vtensor<[1,1,1,15],f32>, !torch.float -> !torch.vtensor<[1,1,1,15],f32> loc(#loc47) | |
%160 = torch.aten.pow.Tensor_Scalar %153, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc48) | |
%161 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%162 = torch.aten.sum.dim_IntList %160, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc49) | |
%163 = torch.aten.div.Scalar %162, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc49) | |
%164 = torch.aten.add.Scalar %163, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc50) | |
%165 = torch.aten.rsqrt %164 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc51) | |
%166 = torch.aten.mul.Tensor %153, %165 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc52) | |
%167 = torch.aten.mul.Tensor %129, %166 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc53) | |
%168 = torch.aten.transpose.int %128, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc54) | |
%169 = torch.prim.ListConstruct %int15, %int512 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1270) | |
%170 = torch.aten.view %167, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc57) | |
%171 = torch.aten.mm %170, %168 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc58) | |
%172 = torch.prim.ListConstruct %int1, %int15, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1271) | |
%173 = torch.aten.view %171, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc61) | |
%174 = torch.prim.ListConstruct %int1, %int-1, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%175 = torch.aten.view %173, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc62) | |
%176 = torch.aten.transpose.int %175, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc63) | |
%177 = torch.aten.transpose.int %127, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc64) | |
%178 = torch.aten.view %167, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc65) | |
%179 = torch.aten.mm %178, %177 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc66) | |
%180 = torch.aten.view %179, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc67) | |
%181 = torch.aten.view %180, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc68) | |
%182 = torch.aten.transpose.int %181, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc69) | |
%183 = torch.aten.transpose.int %126, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc70) | |
%184 = torch.aten.view %167, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc71) | |
%185 = torch.aten.mm %184, %183 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc72) | |
%186 = torch.aten.view %185, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc73) | |
%187 = torch.aten.view %186, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc74) | |
%188 = torch.aten.transpose.int %187, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc75) | |
%189 = torch.aten.transpose.int %182, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc76) | |
%190 = torch.prim.ListConstruct %int1, %int8, %int15, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1272) | |
%191 = torch.aten.broadcast_to %176, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc79) | |
%192 = torch.prim.ListConstruct %int8, %int15, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1273) | |
%193 = torch.aten.view %191, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc80) | |
%194 = torch.prim.ListConstruct %int1, %int8, %int64, %int15 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1274) | |
%195 = torch.aten.broadcast_to %189, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc81) | |
%196 = torch.prim.ListConstruct %int8, %int64, %int15 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1275) | |
%197 = torch.aten.view %195, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc82) | |
%198 = torch.aten.bmm %193, %197 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc83) | |
%199 = torch.prim.ListConstruct %int1, %int8, %int15, %int15 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1276) | |
%200 = torch.aten.view %198, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc87) | |
%201 = torch.aten.arange.start_step %int0, %int15, %int1, %int4, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[15],si64> loc(#loc88) | |
%202 = torch.aten.unsqueeze %201, %int1 : !torch.vtensor<[15],si64>, !torch.int -> !torch.vtensor<[15,1],si64> loc(#loc89) | |
%203 = torch.aten.arange.start_step %int0, %int15, %int1, %int4, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[15],si64> loc(#loc90) | |
%204 = torch.aten.unsqueeze %203, %int0 : !torch.vtensor<[15],si64>, !torch.int -> !torch.vtensor<[1,15],si64> loc(#loc91) | |
%205 = torch.aten.sub.Tensor %204, %202, %int1 : !torch.vtensor<[1,15],si64>, !torch.vtensor<[15,1],si64>, !torch.int -> !torch.vtensor<[15,15],si64> loc(#loc92) | |
%206 = torch.aten.gt.Scalar %205, %int0 : !torch.vtensor<[15,15],si64>, !torch.int -> !torch.vtensor<[15,15],i1> loc(#loc93) | |
%207 = torch.aten.to.dtype %206, %int4, %false, %false, %none : !torch.vtensor<[15,15],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[15,15],si64> loc(#loc94) | |
%208 = torch.aten.mul.Scalar %207, %int16 : !torch.vtensor<[15,15],si64>, !torch.int -> !torch.vtensor<[15,15],si64> loc(#loc95) | |
%209 = torch.aten.add.Scalar %208, %int0, %int1 : !torch.vtensor<[15,15],si64>, !torch.int, !torch.int -> !torch.vtensor<[15,15],si64> loc(#loc96) | |
%210 = torch.aten.abs %205 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64> loc(#loc97) | |
%211 = torch.aten.lt.Scalar %210, %int8 : !torch.vtensor<[15,15],si64>, !torch.int -> !torch.vtensor<[15,15],i1> loc(#loc98) | |
%212 = torch.aten.to.dtype %210, %int6, %false, %false, %none : !torch.vtensor<[15,15],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[15,15],f32> loc(#loc99) | |
%213 = torch.aten.div.Scalar %212, %int8 : !torch.vtensor<[15,15],f32>, !torch.int -> !torch.vtensor<[15,15],f32> loc(#loc100) | |
%214 = torch.aten.log %213 : !torch.vtensor<[15,15],f32> -> !torch.vtensor<[15,15],f32> loc(#loc101) | |
%215 = torch.aten.div.Scalar %214, %float2.772590e00 : !torch.vtensor<[15,15],f32>, !torch.float -> !torch.vtensor<[15,15],f32> loc(#loc102) | |
%216 = torch.aten.mul.Scalar %215, %int8 : !torch.vtensor<[15,15],f32>, !torch.int -> !torch.vtensor<[15,15],f32> loc(#loc103) | |
%217 = torch.aten.to.dtype %216, %int4, %false, %false, %none : !torch.vtensor<[15,15],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[15,15],si64> loc(#loc104) | |
%218 = torch.aten.add.Scalar %217, %int8, %int1 : !torch.vtensor<[15,15],si64>, !torch.int, !torch.int -> !torch.vtensor<[15,15],si64> loc(#loc105) | |
%219 = torch.prim.NumToTensor.Scalar %int15 : !torch.int -> !torch.vtensor<[],si64> loc(#loc106) | |
%220 = torch.prim.ListConstruct %int15, %int15 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc106) | |
%221 = torch.aten.broadcast_to %219, %220 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[15,15],si64> loc(#loc106) | |
%222 = torch.aten.minimum %218, %221 : !torch.vtensor<[15,15],si64>, !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64> loc(#loc107) | |
%223 = torch.aten.where.self %211, %210, %222 : !torch.vtensor<[15,15],i1>, !torch.vtensor<[15,15],si64>, !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64> loc(#loc108) | |
%224 = torch.aten.add.Tensor %209, %223, %int1 : !torch.vtensor<[15,15],si64>, !torch.vtensor<[15,15],si64>, !torch.int -> !torch.vtensor<[15,15],si64> loc(#loc109) | |
%225 = torch.aten.embedding %125, %224, %int-1, %false, %false : !torch.vtensor<[32,8],f32>, !torch.vtensor<[15,15],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[15,15,8],f32> loc(#loc110) | |
%226 = torch.prim.ListConstruct %int2, %int0, %int1 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%227 = torch.aten.permute %225, %226 : !torch.vtensor<[15,15,8],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc111) | |
%228 = torch.aten.unsqueeze %227, %int0 : !torch.vtensor<[8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc86) | |
%229 = torch.aten.add.Tensor %228, %159, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,1,1,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc112) | |
%230 = torch.aten.add.Tensor %200, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc113) | |
%values, %indices = torch.aten.max.dim %230, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc114) | |
%231 = torch.aten.sub.Tensor %230, %values, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc115) | |
%232 = torch.aten.exp %231 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc116) | |
%233 = torch.aten.sum.dim_IntList %232, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc117) | |
%234 = torch.aten.div.Tensor %232, %233 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc118) | |
%235 = torch.aten.broadcast_to %234, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc119) | |
%236 = torch.prim.ListConstruct %int8, %int15, %int15 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1277) | |
%237 = torch.aten.view %235, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc120) | |
%238 = torch.aten.broadcast_to %188, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc121) | |
%239 = torch.aten.view %238, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc122) | |
%240 = torch.aten.bmm %237, %239 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc123) | |
%241 = torch.aten.view %240, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc124) | |
%242 = torch.aten.transpose.int %241, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc125) | |
%243 = torch.aten.clone %242, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc126) | |
%244 = torch.prim.ListConstruct %int1, %int-1, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.view %243, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc60) | |
%246 = torch.aten.transpose.int %124, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc127) | |
%247 = torch.aten.view %245, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc128) | |
%248 = torch.aten.mm %247, %246 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc129) | |
%249 = torch.aten.view %248, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc130) | |
%250 = torch.aten.add.Tensor %153, %249, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc131) | |
%251 = torch.aten.pow.Tensor_Scalar %250, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc132) | |
%252 = torch.aten.sum.dim_IntList %251, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc133) | |
%253 = torch.aten.div.Scalar %252, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc133) | |
%254 = torch.aten.add.Scalar %253, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc134) | |
%255 = torch.aten.rsqrt %254 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc135) | |
%256 = torch.aten.mul.Tensor %250, %255 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc136) | |
%257 = torch.aten.mul.Tensor %123, %256 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc137) | |
%258 = torch.aten.transpose.int %122, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc138) | |
%259 = torch.aten.view %257, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc139) | |
%260 = torch.aten.mm %259, %258 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc140) | |
%261 = torch.prim.ListConstruct %int1, %int15, %int2048 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1278) | |
%262 = torch.aten.view %260, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc143) | |
%263 = torch.aten.relu %262 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc142) | |
%264 = torch.aten.transpose.int %121, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc144) | |
%265 = torch.prim.ListConstruct %int15, %int2048 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1279) | |
%266 = torch.aten.view %263, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc145) | |
%267 = torch.aten.mm %266, %264 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc146) | |
%268 = torch.aten.view %267, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc147) | |
%269 = torch.aten.add.Tensor %250, %268, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc148) | |
%270 = torch.aten.pow.Tensor_Scalar %269, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc149) | |
%271 = torch.aten.sum.dim_IntList %270, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc150) | |
%272 = torch.aten.div.Scalar %271, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc150) | |
%273 = torch.aten.add.Scalar %272, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc151) | |
%274 = torch.aten.rsqrt %273 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc152) | |
%275 = torch.aten.mul.Tensor %269, %274 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc153) | |
%276 = torch.aten.mul.Tensor %120, %275 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc154) | |
%277 = torch.aten.transpose.int %119, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc155) | |
%278 = torch.aten.view %276, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc156) | |
%279 = torch.aten.mm %278, %277 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc157) | |
%280 = torch.aten.view %279, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc158) | |
%281 = torch.aten.view %280, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc159) | |
%282 = torch.aten.transpose.int %281, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc160) | |
%283 = torch.aten.transpose.int %118, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc161) | |
%284 = torch.aten.view %276, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc162) | |
%285 = torch.aten.mm %284, %283 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc163) | |
%286 = torch.aten.view %285, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc164) | |
%287 = torch.aten.view %286, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc165) | |
%288 = torch.aten.transpose.int %287, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc166) | |
%289 = torch.aten.transpose.int %117, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc167) | |
%290 = torch.aten.view %276, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc168) | |
%291 = torch.aten.mm %290, %289 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc169) | |
%292 = torch.aten.view %291, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc170) | |
%293 = torch.aten.view %292, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc171) | |
%294 = torch.aten.transpose.int %293, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc172) | |
%295 = torch.aten.transpose.int %288, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc173) | |
%296 = torch.aten.broadcast_to %282, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc174) | |
%297 = torch.aten.view %296, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc175) | |
%298 = torch.aten.broadcast_to %295, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc176) | |
%299 = torch.aten.view %298, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc177) | |
%300 = torch.aten.bmm %297, %299 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc178) | |
%301 = torch.aten.view %300, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc179) | |
%302 = torch.aten.add.Tensor %301, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc180) | |
%values_0, %indices_1 = torch.aten.max.dim %302, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc181) | |
%303 = torch.aten.sub.Tensor %302, %values_0, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc182) | |
%304 = torch.aten.exp %303 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc183) | |
%305 = torch.aten.sum.dim_IntList %304, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc184) | |
%306 = torch.aten.div.Tensor %304, %305 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc185) | |
%307 = torch.aten.broadcast_to %306, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc186) | |
%308 = torch.aten.view %307, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc187) | |
%309 = torch.aten.broadcast_to %294, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc188) | |
%310 = torch.aten.view %309, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc189) | |
%311 = torch.aten.bmm %308, %310 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc190) | |
%312 = torch.aten.view %311, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc191) | |
%313 = torch.aten.transpose.int %312, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc192) | |
%314 = torch.aten.clone %313, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc193) | |
%315 = torch.aten.view %314, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc194) | |
%316 = torch.aten.transpose.int %116, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc195) | |
%317 = torch.aten.view %315, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc196) | |
%318 = torch.aten.mm %317, %316 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc197) | |
%319 = torch.aten.view %318, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc198) | |
%320 = torch.aten.add.Tensor %269, %319, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc199) | |
%321 = torch.aten.pow.Tensor_Scalar %320, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc200) | |
%322 = torch.aten.sum.dim_IntList %321, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc201) | |
%323 = torch.aten.div.Scalar %322, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc201) | |
%324 = torch.aten.add.Scalar %323, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc202) | |
%325 = torch.aten.rsqrt %324 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc203) | |
%326 = torch.aten.mul.Tensor %320, %325 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc204) | |
%327 = torch.aten.mul.Tensor %115, %326 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc205) | |
%328 = torch.aten.transpose.int %114, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc206) | |
%329 = torch.aten.view %327, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc207) | |
%330 = torch.aten.mm %329, %328 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc208) | |
%331 = torch.aten.view %330, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc209) | |
%332 = torch.aten.relu %331 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc210) | |
%333 = torch.aten.transpose.int %113, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc211) | |
%334 = torch.aten.view %332, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc212) | |
%335 = torch.aten.mm %334, %333 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc213) | |
%336 = torch.aten.view %335, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc214) | |
%337 = torch.aten.add.Tensor %320, %336, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc215) | |
%338 = torch.aten.pow.Tensor_Scalar %337, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc216) | |
%339 = torch.aten.sum.dim_IntList %338, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc217) | |
%340 = torch.aten.div.Scalar %339, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc217) | |
%341 = torch.aten.add.Scalar %340, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc218) | |
%342 = torch.aten.rsqrt %341 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc219) | |
%343 = torch.aten.mul.Tensor %337, %342 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc220) | |
%344 = torch.aten.mul.Tensor %112, %343 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc221) | |
%345 = torch.aten.transpose.int %111, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc222) | |
%346 = torch.aten.view %344, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc223) | |
%347 = torch.aten.mm %346, %345 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc224) | |
%348 = torch.aten.view %347, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc225) | |
%349 = torch.aten.view %348, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc226) | |
%350 = torch.aten.transpose.int %349, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc227) | |
%351 = torch.aten.transpose.int %110, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc228) | |
%352 = torch.aten.view %344, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc229) | |
%353 = torch.aten.mm %352, %351 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc230) | |
%354 = torch.aten.view %353, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc231) | |
%355 = torch.aten.view %354, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc232) | |
%356 = torch.aten.transpose.int %355, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc233) | |
%357 = torch.aten.transpose.int %109, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc234) | |
%358 = torch.aten.view %344, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc235) | |
%359 = torch.aten.mm %358, %357 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc236) | |
%360 = torch.aten.view %359, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc237) | |
%361 = torch.aten.view %360, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc238) | |
%362 = torch.aten.transpose.int %361, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc239) | |
%363 = torch.aten.transpose.int %356, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc240) | |
%364 = torch.aten.broadcast_to %350, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc241) | |
%365 = torch.aten.view %364, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc242) | |
%366 = torch.aten.broadcast_to %363, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc243) | |
%367 = torch.aten.view %366, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc244) | |
%368 = torch.aten.bmm %365, %367 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc245) | |
%369 = torch.aten.view %368, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc246) | |
%370 = torch.aten.add.Tensor %369, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc247) | |
%values_2, %indices_3 = torch.aten.max.dim %370, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc248) | |
%371 = torch.aten.sub.Tensor %370, %values_2, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc249) | |
%372 = torch.aten.exp %371 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc250) | |
%373 = torch.aten.sum.dim_IntList %372, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc251) | |
%374 = torch.aten.div.Tensor %372, %373 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc252) | |
%375 = torch.aten.broadcast_to %374, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc253) | |
%376 = torch.aten.view %375, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc254) | |
%377 = torch.aten.broadcast_to %362, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc255) | |
%378 = torch.aten.view %377, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc256) | |
%379 = torch.aten.bmm %376, %378 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc257) | |
%380 = torch.aten.view %379, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc258) | |
%381 = torch.aten.transpose.int %380, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc259) | |
%382 = torch.aten.clone %381, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc260) | |
%383 = torch.aten.view %382, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc261) | |
%384 = torch.aten.transpose.int %108, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc262) | |
%385 = torch.aten.view %383, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc263) | |
%386 = torch.aten.mm %385, %384 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc264) | |
%387 = torch.aten.view %386, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc265) | |
%388 = torch.aten.add.Tensor %337, %387, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc266) | |
%389 = torch.aten.pow.Tensor_Scalar %388, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc267) | |
%390 = torch.aten.sum.dim_IntList %389, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc268) | |
%391 = torch.aten.div.Scalar %390, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc268) | |
%392 = torch.aten.add.Scalar %391, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc269) | |
%393 = torch.aten.rsqrt %392 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc270) | |
%394 = torch.aten.mul.Tensor %388, %393 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc271) | |
%395 = torch.aten.mul.Tensor %107, %394 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc272) | |
%396 = torch.aten.transpose.int %106, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc273) | |
%397 = torch.aten.view %395, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc274) | |
%398 = torch.aten.mm %397, %396 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc275) | |
%399 = torch.aten.view %398, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc276) | |
%400 = torch.aten.relu %399 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc277) | |
%401 = torch.aten.transpose.int %105, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc278) | |
%402 = torch.aten.view %400, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc279) | |
%403 = torch.aten.mm %402, %401 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc280) | |
%404 = torch.aten.view %403, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc281) | |
%405 = torch.aten.add.Tensor %388, %404, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc282) | |
%406 = torch.aten.pow.Tensor_Scalar %405, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc283) | |
%407 = torch.aten.sum.dim_IntList %406, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc284) | |
%408 = torch.aten.div.Scalar %407, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc284) | |
%409 = torch.aten.add.Scalar %408, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc285) | |
%410 = torch.aten.rsqrt %409 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc286) | |
%411 = torch.aten.mul.Tensor %405, %410 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc287) | |
%412 = torch.aten.mul.Tensor %104, %411 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc288) | |
%413 = torch.aten.transpose.int %103, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc289) | |
%414 = torch.aten.view %412, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc290) | |
%415 = torch.aten.mm %414, %413 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc291) | |
%416 = torch.aten.view %415, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc292) | |
%417 = torch.aten.view %416, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc293) | |
%418 = torch.aten.transpose.int %417, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc294) | |
%419 = torch.aten.transpose.int %102, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc295) | |
%420 = torch.aten.view %412, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc296) | |
%421 = torch.aten.mm %420, %419 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc297) | |
%422 = torch.aten.view %421, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc298) | |
%423 = torch.aten.view %422, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc299) | |
%424 = torch.aten.transpose.int %423, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc300) | |
%425 = torch.aten.transpose.int %101, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc301) | |
%426 = torch.aten.view %412, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc302) | |
%427 = torch.aten.mm %426, %425 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc303) | |
%428 = torch.aten.view %427, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc304) | |
%429 = torch.aten.view %428, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc305) | |
%430 = torch.aten.transpose.int %429, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc306) | |
%431 = torch.aten.transpose.int %424, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc307) | |
%432 = torch.aten.broadcast_to %418, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc308) | |
%433 = torch.aten.view %432, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc309) | |
%434 = torch.aten.broadcast_to %431, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc310) | |
%435 = torch.aten.view %434, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc311) | |
%436 = torch.aten.bmm %433, %435 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc312) | |
%437 = torch.aten.view %436, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc313) | |
%438 = torch.aten.add.Tensor %437, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc314) | |
%values_4, %indices_5 = torch.aten.max.dim %438, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc315) | |
%439 = torch.aten.sub.Tensor %438, %values_4, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc316) | |
%440 = torch.aten.exp %439 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc317) | |
%441 = torch.aten.sum.dim_IntList %440, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc318) | |
%442 = torch.aten.div.Tensor %440, %441 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc319) | |
%443 = torch.aten.broadcast_to %442, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc320) | |
%444 = torch.aten.view %443, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc321) | |
%445 = torch.aten.broadcast_to %430, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc322) | |
%446 = torch.aten.view %445, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc323) | |
%447 = torch.aten.bmm %444, %446 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc324) | |
%448 = torch.aten.view %447, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc325) | |
%449 = torch.aten.transpose.int %448, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc326) | |
%450 = torch.aten.clone %449, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc327) | |
%451 = torch.aten.view %450, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc328) | |
%452 = torch.aten.transpose.int %100, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc329) | |
%453 = torch.aten.view %451, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc330) | |
%454 = torch.aten.mm %453, %452 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc331) | |
%455 = torch.aten.view %454, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc332) | |
%456 = torch.aten.add.Tensor %405, %455, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc333) | |
%457 = torch.aten.pow.Tensor_Scalar %456, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc334) | |
%458 = torch.aten.sum.dim_IntList %457, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc335) | |
%459 = torch.aten.div.Scalar %458, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc335) | |
%460 = torch.aten.add.Scalar %459, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc336) | |
%461 = torch.aten.rsqrt %460 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc337) | |
%462 = torch.aten.mul.Tensor %456, %461 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc338) | |
%463 = torch.aten.mul.Tensor %99, %462 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc339) | |
%464 = torch.aten.transpose.int %98, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc340) | |
%465 = torch.aten.view %463, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc341) | |
%466 = torch.aten.mm %465, %464 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc342) | |
%467 = torch.aten.view %466, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc343) | |
%468 = torch.aten.relu %467 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc344) | |
%469 = torch.aten.transpose.int %97, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc345) | |
%470 = torch.aten.view %468, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc346) | |
%471 = torch.aten.mm %470, %469 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc347) | |
%472 = torch.aten.view %471, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc348) | |
%473 = torch.aten.add.Tensor %456, %472, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc349) | |
%474 = torch.aten.pow.Tensor_Scalar %473, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc350) | |
%475 = torch.aten.sum.dim_IntList %474, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc351) | |
%476 = torch.aten.div.Scalar %475, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc351) | |
%477 = torch.aten.add.Scalar %476, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc352) | |
%478 = torch.aten.rsqrt %477 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc353) | |
%479 = torch.aten.mul.Tensor %473, %478 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc354) | |
%480 = torch.aten.mul.Tensor %96, %479 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc355) | |
%481 = torch.aten.transpose.int %95, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc356) | |
%482 = torch.aten.view %480, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc357) | |
%483 = torch.aten.mm %482, %481 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc358) | |
%484 = torch.aten.view %483, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc359) | |
%485 = torch.aten.view %484, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc360) | |
%486 = torch.aten.transpose.int %485, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc361) | |
%487 = torch.aten.transpose.int %94, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc362) | |
%488 = torch.aten.view %480, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc363) | |
%489 = torch.aten.mm %488, %487 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc364) | |
%490 = torch.aten.view %489, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc365) | |
%491 = torch.aten.view %490, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc366) | |
%492 = torch.aten.transpose.int %491, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc367) | |
%493 = torch.aten.transpose.int %93, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc368) | |
%494 = torch.aten.view %480, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc369) | |
%495 = torch.aten.mm %494, %493 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc370) | |
%496 = torch.aten.view %495, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc371) | |
%497 = torch.aten.view %496, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc372) | |
%498 = torch.aten.transpose.int %497, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc373) | |
%499 = torch.aten.transpose.int %492, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc374) | |
%500 = torch.aten.broadcast_to %486, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc375) | |
%501 = torch.aten.view %500, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc376) | |
%502 = torch.aten.broadcast_to %499, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc377) | |
%503 = torch.aten.view %502, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc378) | |
%504 = torch.aten.bmm %501, %503 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc379) | |
%505 = torch.aten.view %504, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc380) | |
%506 = torch.aten.add.Tensor %505, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc381) | |
%values_6, %indices_7 = torch.aten.max.dim %506, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc382) | |
%507 = torch.aten.sub.Tensor %506, %values_6, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc383) | |
%508 = torch.aten.exp %507 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc384) | |
%509 = torch.aten.sum.dim_IntList %508, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc385) | |
%510 = torch.aten.div.Tensor %508, %509 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc386) | |
%511 = torch.aten.broadcast_to %510, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc387) | |
%512 = torch.aten.view %511, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc388) | |
%513 = torch.aten.broadcast_to %498, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc389) | |
%514 = torch.aten.view %513, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc390) | |
%515 = torch.aten.bmm %512, %514 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc391) | |
%516 = torch.aten.view %515, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc392) | |
%517 = torch.aten.transpose.int %516, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc393) | |
%518 = torch.aten.clone %517, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc394) | |
%519 = torch.aten.view %518, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc395) | |
%520 = torch.aten.transpose.int %92, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc396) | |
%521 = torch.aten.view %519, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc397) | |
%522 = torch.aten.mm %521, %520 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc398) | |
%523 = torch.aten.view %522, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc399) | |
%524 = torch.aten.add.Tensor %473, %523, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc400) | |
%525 = torch.aten.pow.Tensor_Scalar %524, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc401) | |
%526 = torch.aten.sum.dim_IntList %525, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc402) | |
%527 = torch.aten.div.Scalar %526, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc402) | |
%528 = torch.aten.add.Scalar %527, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc403) | |
%529 = torch.aten.rsqrt %528 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc404) | |
%530 = torch.aten.mul.Tensor %524, %529 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc405) | |
%531 = torch.aten.mul.Tensor %91, %530 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc406) | |
%532 = torch.aten.transpose.int %90, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc407) | |
%533 = torch.aten.view %531, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc408) | |
%534 = torch.aten.mm %533, %532 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc409) | |
%535 = torch.aten.view %534, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc410) | |
%536 = torch.aten.relu %535 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc411) | |
%537 = torch.aten.transpose.int %89, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc412) | |
%538 = torch.aten.view %536, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc413) | |
%539 = torch.aten.mm %538, %537 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc414) | |
%540 = torch.aten.view %539, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc415) | |
%541 = torch.aten.add.Tensor %524, %540, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc416) | |
%542 = torch.aten.pow.Tensor_Scalar %541, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc417) | |
%543 = torch.aten.sum.dim_IntList %542, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc418) | |
%544 = torch.aten.div.Scalar %543, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc418) | |
%545 = torch.aten.add.Scalar %544, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc419) | |
%546 = torch.aten.rsqrt %545 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc420) | |
%547 = torch.aten.mul.Tensor %541, %546 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc421) | |
%548 = torch.aten.mul.Tensor %88, %547 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc422) | |
%549 = torch.aten.transpose.int %87, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc423) | |
%550 = torch.aten.view %548, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc424) | |
%551 = torch.aten.mm %550, %549 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc425) | |
%552 = torch.aten.view %551, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc426) | |
%553 = torch.aten.view %552, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc427) | |
%554 = torch.aten.transpose.int %553, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc428) | |
%555 = torch.aten.transpose.int %86, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc429) | |
%556 = torch.aten.view %548, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc430) | |
%557 = torch.aten.mm %556, %555 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc431) | |
%558 = torch.aten.view %557, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc432) | |
%559 = torch.aten.view %558, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc433) | |
%560 = torch.aten.transpose.int %559, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc434) | |
%561 = torch.aten.transpose.int %85, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc435) | |
%562 = torch.aten.view %548, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc436) | |
%563 = torch.aten.mm %562, %561 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc437) | |
%564 = torch.aten.view %563, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc438) | |
%565 = torch.aten.view %564, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc439) | |
%566 = torch.aten.transpose.int %565, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc440) | |
%567 = torch.aten.transpose.int %560, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc441) | |
%568 = torch.aten.broadcast_to %554, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc442) | |
%569 = torch.aten.view %568, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc443) | |
%570 = torch.aten.broadcast_to %567, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc444) | |
%571 = torch.aten.view %570, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc445) | |
%572 = torch.aten.bmm %569, %571 : !torch.vtensor<[8,15,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,15,15],f32> loc(#loc446) | |
%573 = torch.aten.view %572, %199 : !torch.vtensor<[8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc447) | |
%574 = torch.aten.add.Tensor %573, %229, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,15],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc448) | |
%values_8, %indices_9 = torch.aten.max.dim %574, %int-1, %true : !torch.vtensor<[1,8,15,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,15,1],f32>, !torch.vtensor<[1,8,15,1],si64> loc(#loc449) | |
%575 = torch.aten.sub.Tensor %574, %values_8, %int1 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32>, !torch.int -> !torch.vtensor<[1,8,15,15],f32> loc(#loc450) | |
%576 = torch.aten.exp %575 : !torch.vtensor<[1,8,15,15],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc451) | |
%577 = torch.aten.sum.dim_IntList %576, %161, %true, %none : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,15,1],f32> loc(#loc452) | |
%578 = torch.aten.div.Tensor %576, %577 : !torch.vtensor<[1,8,15,15],f32>, !torch.vtensor<[1,8,15,1],f32> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc453) | |
%579 = torch.aten.broadcast_to %578, %199 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,15],f32> loc(#loc454) | |
%580 = torch.aten.view %579, %236 : !torch.vtensor<[1,8,15,15],f32>, !torch.list<int> -> !torch.vtensor<[8,15,15],f32> loc(#loc455) | |
%581 = torch.aten.broadcast_to %566, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc456) | |
%582 = torch.aten.view %581, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc457) | |
%583 = torch.aten.bmm %580, %582 : !torch.vtensor<[8,15,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,15,64],f32> loc(#loc458) | |
%584 = torch.aten.view %583, %190 : !torch.vtensor<[8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc459) | |
%585 = torch.aten.transpose.int %584, %int1, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc460) | |
%586 = torch.aten.clone %585, %int0 : !torch.vtensor<[1,15,8,64],f32>, !torch.int -> !torch.vtensor<[1,15,8,64],f32> loc(#loc461) | |
%587 = torch.aten.view %586, %244 : !torch.vtensor<[1,15,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc462) | |
%588 = torch.aten.transpose.int %84, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc463) | |
%589 = torch.aten.view %587, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc464) | |
%590 = torch.aten.mm %589, %588 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc465) | |
%591 = torch.aten.view %590, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc466) | |
%592 = torch.aten.add.Tensor %541, %591, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc467) | |
%593 = torch.aten.pow.Tensor_Scalar %592, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc468) | |
%594 = torch.aten.sum.dim_IntList %593, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc469) | |
%595 = torch.aten.div.Scalar %594, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc469) | |
%596 = torch.aten.add.Scalar %595, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc470) | |
%597 = torch.aten.rsqrt %596 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc471) | |
%598 = torch.aten.mul.Tensor %592, %597 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc472) | |
%599 = torch.aten.mul.Tensor %83, %598 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc473) | |
%600 = torch.aten.transpose.int %82, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc474) | |
%601 = torch.aten.view %599, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc475) | |
%602 = torch.aten.mm %601, %600 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[15,2048],f32> loc(#loc476) | |
%603 = torch.aten.view %602, %261 : !torch.vtensor<[15,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,15,2048],f32> loc(#loc477) | |
%604 = torch.aten.relu %603 : !torch.vtensor<[1,15,2048],f32> -> !torch.vtensor<[1,15,2048],f32> loc(#loc478) | |
%605 = torch.aten.transpose.int %81, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc479) | |
%606 = torch.aten.view %604, %265 : !torch.vtensor<[1,15,2048],f32>, !torch.list<int> -> !torch.vtensor<[15,2048],f32> loc(#loc480) | |
%607 = torch.aten.mm %606, %605 : !torch.vtensor<[15,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc481) | |
%608 = torch.aten.view %607, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc482) | |
%609 = torch.aten.add.Tensor %592, %608, %int1 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc483) | |
%610 = torch.aten.pow.Tensor_Scalar %609, %int2 : !torch.vtensor<[1,15,512],f32>, !torch.int -> !torch.vtensor<[1,15,512],f32> loc(#loc484) | |
%611 = torch.aten.sum.dim_IntList %610, %161, %true, %none : !torch.vtensor<[1,15,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,15,1],f32> loc(#loc485) | |
%612 = torch.aten.div.Scalar %611, %int512 : !torch.vtensor<[1,15,1],f32>, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc485) | |
%613 = torch.aten.add.Scalar %612, %float9.999990e-07, %int1 : !torch.vtensor<[1,15,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,15,1],f32> loc(#loc486) | |
%614 = torch.aten.rsqrt %613 : !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,1],f32> loc(#loc487) | |
%615 = torch.aten.mul.Tensor %609, %614 : !torch.vtensor<[1,15,512],f32>, !torch.vtensor<[1,15,1],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc488) | |
%616 = torch.aten.mul.Tensor %80, %615 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,15,512],f32> -> !torch.vtensor<[1,15,512],f32> loc(#loc489) | |
%617 = torch.prim.ListConstruct %int-1, %int4 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%618 = torch.aten.view %150, %617 : !torch.vtensor<[1,4],si64>, !torch.list<int> -> !torch.vtensor<[1,4],si64> loc(#loc490) | |
%619 = torch.aten.embedding %130, %618, %int-1, %false, %false : !torch.vtensor<[32128,512],f32>, !torch.vtensor<[1,4],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,4,512],f32> loc(#loc491) | |
%620 = torch.aten.ones %132, %none, %none, %cpu, %false : !torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,4],f32> loc(#loc492) | |
%621 = torch.aten.ones %154, %int4, %none, %cpu, %false : !torch.list<int>, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,15],si64> loc(#loc493) | |
%622 = torch.aten.arange.start_step %int0, %int4, %int1, %none, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[4],si64> loc(#loc494) | |
%623 = torch.aten.unsqueeze %622, %int0 : !torch.vtensor<[4],si64>, !torch.int -> !torch.vtensor<[1,4],si64> loc(#loc495) | |
%624 = torch.aten.unsqueeze %623, %int1 : !torch.vtensor<[1,4],si64>, !torch.int -> !torch.vtensor<[1,1,4],si64> loc(#loc496) | |
%625 = torch.prim.ListConstruct %int1, %int1, %int1, %int1, %int1, %int4 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc497) | |
%626 = torch.prim.ListConstruct %int1, %int1, %int4, %int1, %int1, %int4 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc497) | |
%627 = torch.prim.ListConstruct %int1, %int4, %int4 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc497) | |
%628 = torch.aten.view %624, %625 : !torch.vtensor<[1,1,4],si64>, !torch.list<int> -> !torch.vtensor<[1,1,1,1,1,4],si64> loc(#loc497) | |
%629 = torch.aten.broadcast_to %628, %626 : !torch.vtensor<[1,1,1,1,1,4],si64>, !torch.list<int> -> !torch.vtensor<[1,1,4,1,1,4],si64> loc(#loc497) | |
%630 = torch.aten.view %629, %627 : !torch.vtensor<[1,1,4,1,1,4],si64>, !torch.list<int> -> !torch.vtensor<[1,4,4],si64> loc(#loc497) | |
%631 = torch.aten.unsqueeze %622, %int0 : !torch.vtensor<[4],si64>, !torch.int -> !torch.vtensor<[1,4],si64> loc(#loc498) | |
%632 = torch.aten.unsqueeze %631, %int2 : !torch.vtensor<[1,4],si64>, !torch.int -> !torch.vtensor<[1,4,1],si64> loc(#loc499) | |
%633 = torch.aten.le.Tensor %630, %632 : !torch.vtensor<[1,4,4],si64>, !torch.vtensor<[1,4,1],si64> -> !torch.vtensor<[1,4,4],i1> loc(#loc500) | |
%634 = torch.aten.to.dtype %633, %int6, %false, %false, %none : !torch.vtensor<[1,4,4],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,4,4],f32> loc(#loc501) | |
%635 = torch.aten.unsqueeze %634, %int1 : !torch.vtensor<[1,4,4],f32>, !torch.int -> !torch.vtensor<[1,1,4,4],f32> loc(#loc502) | |
%636 = torch.aten.unsqueeze %620, %int1 : !torch.vtensor<[1,4],f32>, !torch.int -> !torch.vtensor<[1,1,4],f32> loc(#loc503) | |
%637 = torch.aten.unsqueeze %636, %int2 : !torch.vtensor<[1,1,4],f32>, !torch.int -> !torch.vtensor<[1,1,1,4],f32> loc(#loc504) | |
%638 = torch.aten.mul.Tensor %635, %637 : !torch.vtensor<[1,1,4,4],f32>, !torch.vtensor<[1,1,1,4],f32> -> !torch.vtensor<[1,1,4,4],f32> loc(#loc505) | |
%639 = torch.aten.rsub.Scalar %638, %float1.000000e00, %int1 : !torch.vtensor<[1,1,4,4],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,1,4,4],f32> loc(#loc506) | |
%640 = torch.aten.mul.Scalar %639, %float-3.402820e38 : !torch.vtensor<[1,1,4,4],f32>, !torch.float -> !torch.vtensor<[1,1,4,4],f32> loc(#loc507) | |
%641 = torch.aten.unsqueeze %621, %int1 : !torch.vtensor<[1,15],si64>, !torch.int -> !torch.vtensor<[1,1,15],si64> loc(#loc508) | |
%642 = torch.aten.unsqueeze %641, %int2 : !torch.vtensor<[1,1,15],si64>, !torch.int -> !torch.vtensor<[1,1,1,15],si64> loc(#loc509) | |
%643 = torch.aten.to.dtype %642, %int6, %false, %false, %none : !torch.vtensor<[1,1,1,15],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,1,15],f32> loc(#loc510) | |
%644 = torch.aten.rsub.Scalar %643, %float1.000000e00, %int1 : !torch.vtensor<[1,1,1,15],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,1,1,15],f32> loc(#loc511) | |
%645 = torch.aten.mul.Scalar %644, %float-3.402820e38 : !torch.vtensor<[1,1,1,15],f32>, !torch.float -> !torch.vtensor<[1,1,1,15],f32> loc(#loc512) | |
%646 = torch.aten.pow.Tensor_Scalar %619, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc513) | |
%647 = torch.aten.sum.dim_IntList %646, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc514) | |
%648 = torch.aten.div.Scalar %647, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc514) | |
%649 = torch.aten.add.Scalar %648, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc515) | |
%650 = torch.aten.rsqrt %649 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc516) | |
%651 = torch.aten.mul.Tensor %619, %650 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc517) | |
%652 = torch.aten.mul.Tensor %79, %651 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc518) | |
%653 = torch.aten.transpose.int %78, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc519) | |
%654 = torch.prim.ListConstruct %int4, %int512 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1280) | |
%655 = torch.aten.view %652, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc520) | |
%656 = torch.aten.mm %655, %653 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc521) | |
%657 = torch.prim.ListConstruct %int1, %int4, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1281) | |
%658 = torch.aten.view %656, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc523) | |
%659 = torch.aten.view %658, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc524) | |
%660 = torch.aten.transpose.int %659, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc525) | |
%661 = torch.aten.transpose.int %77, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc526) | |
%662 = torch.aten.view %652, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc527) | |
%663 = torch.aten.mm %662, %661 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc528) | |
%664 = torch.aten.view %663, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc529) | |
%665 = torch.aten.view %664, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc530) | |
%666 = torch.aten.transpose.int %665, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc531) | |
%667 = torch.aten.transpose.int %76, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc532) | |
%668 = torch.aten.view %652, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc533) | |
%669 = torch.aten.mm %668, %667 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc534) | |
%670 = torch.aten.view %669, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc535) | |
%671 = torch.aten.view %670, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc536) | |
%672 = torch.aten.transpose.int %671, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc537) | |
%673 = torch.aten.transpose.int %666, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc538) | |
%674 = torch.prim.ListConstruct %int1, %int8, %int4, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1282) | |
%675 = torch.aten.broadcast_to %660, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc539) | |
%676 = torch.prim.ListConstruct %int8, %int4, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1283) | |
%677 = torch.aten.view %675, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc540) | |
%678 = torch.prim.ListConstruct %int1, %int8, %int64, %int4 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1284) | |
%679 = torch.aten.broadcast_to %673, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc541) | |
%680 = torch.prim.ListConstruct %int8, %int64, %int4 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1285) | |
%681 = torch.aten.view %679, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc542) | |
%682 = torch.aten.bmm %677, %681 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc543) | |
%683 = torch.prim.ListConstruct %int1, %int8, %int4, %int4 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1286) | |
%684 = torch.aten.view %682, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc545) | |
%685 = torch.aten.arange.start_step %int0, %int4, %int1, %int4, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[4],si64> loc(#loc546) | |
%686 = torch.aten.unsqueeze %685, %int1 : !torch.vtensor<[4],si64>, !torch.int -> !torch.vtensor<[4,1],si64> loc(#loc547) | |
%687 = torch.aten.arange.start_step %int0, %int4, %int1, %int4, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[4],si64> loc(#loc548) | |
%688 = torch.aten.unsqueeze %687, %int0 : !torch.vtensor<[4],si64>, !torch.int -> !torch.vtensor<[1,4],si64> loc(#loc549) | |
%689 = torch.aten.sub.Tensor %688, %686, %int1 : !torch.vtensor<[1,4],si64>, !torch.vtensor<[4,1],si64>, !torch.int -> !torch.vtensor<[4,4],si64> loc(#loc550) | |
%690 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc551) | |
%691 = torch.prim.ListConstruct %int4, %int4 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc551) | |
%692 = torch.aten.broadcast_to %690, %691 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[4,4],si64> loc(#loc551) | |
%693 = torch.aten.minimum %689, %692 : !torch.vtensor<[4,4],si64>, !torch.vtensor<[4,4],si64> -> !torch.vtensor<[4,4],si64> loc(#loc552) | |
%694 = torch.aten.neg %693 : !torch.vtensor<[4,4],si64> -> !torch.vtensor<[4,4],si64> loc(#loc553) | |
%695 = torch.aten.lt.Scalar %694, %int16 : !torch.vtensor<[4,4],si64>, !torch.int -> !torch.vtensor<[4,4],i1> loc(#loc554) | |
%696 = torch.aten.to.dtype %694, %int6, %false, %false, %none : !torch.vtensor<[4,4],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[4,4],f32> loc(#loc555) | |
%697 = torch.aten.div.Scalar %696, %int16 : !torch.vtensor<[4,4],f32>, !torch.int -> !torch.vtensor<[4,4],f32> loc(#loc556) | |
%698 = torch.aten.log %697 : !torch.vtensor<[4,4],f32> -> !torch.vtensor<[4,4],f32> loc(#loc557) | |
%699 = torch.aten.div.Scalar %698, %float2.079440e00 : !torch.vtensor<[4,4],f32>, !torch.float -> !torch.vtensor<[4,4],f32> loc(#loc558) | |
%700 = torch.aten.mul.Scalar %699, %int16 : !torch.vtensor<[4,4],f32>, !torch.int -> !torch.vtensor<[4,4],f32> loc(#loc559) | |
%701 = torch.aten.to.dtype %700, %int4, %false, %false, %none : !torch.vtensor<[4,4],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[4,4],si64> loc(#loc560) | |
%702 = torch.aten.add.Scalar %701, %int16, %int1 : !torch.vtensor<[4,4],si64>, !torch.int, !torch.int -> !torch.vtensor<[4,4],si64> loc(#loc561) | |
%703 = torch.prim.NumToTensor.Scalar %int31 : !torch.int -> !torch.vtensor<[],si64> loc(#loc562) | |
%704 = torch.aten.broadcast_to %703, %691 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[4,4],si64> loc(#loc562) | |
%705 = torch.aten.minimum %702, %704 : !torch.vtensor<[4,4],si64>, !torch.vtensor<[4,4],si64> -> !torch.vtensor<[4,4],si64> loc(#loc563) | |
%706 = torch.aten.where.self %695, %694, %705 : !torch.vtensor<[4,4],i1>, !torch.vtensor<[4,4],si64>, !torch.vtensor<[4,4],si64> -> !torch.vtensor<[4,4],si64> loc(#loc564) | |
%707 = torch.aten.add.Scalar %706, %int0, %int1 : !torch.vtensor<[4,4],si64>, !torch.int, !torch.int -> !torch.vtensor<[4,4],si64> loc(#loc565) | |
%708 = torch.aten.embedding %75, %707, %int-1, %false, %false : !torch.vtensor<[32,8],f32>, !torch.vtensor<[4,4],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[4,4,8],f32> loc(#loc566) | |
%709 = torch.aten.permute %708, %226 : !torch.vtensor<[4,4,8],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc567) | |
%710 = torch.aten.unsqueeze %709, %int0 : !torch.vtensor<[8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc544) | |
%711 = torch.aten.add.Tensor %710, %640, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,1,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc568) | |
%712 = torch.aten.add.Tensor %684, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc569) | |
%values_10, %indices_11 = torch.aten.max.dim %712, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc570) | |
%713 = torch.aten.sub.Tensor %712, %values_10, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc571) | |
%714 = torch.aten.exp %713 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc572) | |
%715 = torch.aten.sum.dim_IntList %714, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc573) | |
%716 = torch.aten.div.Tensor %714, %715 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc574) | |
%717 = torch.aten.broadcast_to %716, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc575) | |
%718 = torch.prim.ListConstruct %int8, %int4, %int4 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1287) | |
%719 = torch.aten.view %717, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc576) | |
%720 = torch.aten.broadcast_to %672, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc577) | |
%721 = torch.aten.view %720, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc578) | |
%722 = torch.aten.bmm %719, %721 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc579) | |
%723 = torch.aten.view %722, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc580) | |
%724 = torch.aten.transpose.int %723, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc581) | |
%725 = torch.aten.clone %724, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc582) | |
%726 = torch.aten.view %725, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc522) | |
%727 = torch.aten.transpose.int %74, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc583) | |
%728 = torch.aten.view %726, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc584) | |
%729 = torch.aten.mm %728, %727 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc585) | |
%730 = torch.aten.view %729, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc586) | |
%731 = torch.aten.add.Tensor %619, %730, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc587) | |
%732 = torch.aten.pow.Tensor_Scalar %731, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc588) | |
%733 = torch.aten.sum.dim_IntList %732, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc589) | |
%734 = torch.aten.div.Scalar %733, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc589) | |
%735 = torch.aten.add.Scalar %734, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc590) | |
%736 = torch.aten.rsqrt %735 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc591) | |
%737 = torch.aten.mul.Tensor %731, %736 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc592) | |
%738 = torch.aten.mul.Tensor %73, %737 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc593) | |
%739 = torch.aten.transpose.int %72, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc594) | |
%740 = torch.aten.view %738, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc595) | |
%741 = torch.aten.mm %740, %739 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc596) | |
%742 = torch.aten.view %741, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc597) | |
%743 = torch.aten.view %742, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc598) | |
%744 = torch.aten.transpose.int %743, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc599) | |
%745 = torch.aten.transpose.int %71, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc600) | |
%746 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc601) | |
%747 = torch.aten.mm %746, %745 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc602) | |
%748 = torch.aten.view %747, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc603) | |
%749 = torch.aten.view %748, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc604) | |
%750 = torch.aten.transpose.int %749, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc605) | |
%751 = torch.aten.transpose.int %70, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc606) | |
%752 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc607) | |
%753 = torch.aten.mm %752, %751 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc608) | |
%754 = torch.aten.view %753, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc609) | |
%755 = torch.aten.view %754, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc610) | |
%756 = torch.aten.transpose.int %755, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc611) | |
%757 = torch.aten.transpose.int %750, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc612) | |
%758 = torch.aten.broadcast_to %744, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc613) | |
%759 = torch.aten.view %758, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc614) | |
%760 = torch.aten.broadcast_to %757, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc615) | |
%761 = torch.aten.view %760, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc616) | |
%762 = torch.aten.bmm %759, %761 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc617) | |
%763 = torch.prim.ListConstruct %int1, %int8, %int4, %int15 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1288) | |
%764 = torch.aten.view %762, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc621) | |
%765 = torch.aten.zeros %763, %int6, %none, %cpu, %false : !torch.list<int>, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,8,4,15],f32> loc(#loc622) | |
%766 = torch.aten.add.Tensor %765, %645, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,1,1,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc620) | |
%767 = torch.aten.add.Tensor %764, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc623) | |
%values_12, %indices_13 = torch.aten.max.dim %767, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc624) | |
%768 = torch.aten.sub.Tensor %767, %values_12, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc625) | |
%769 = torch.aten.exp %768 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc626) | |
%770 = torch.aten.sum.dim_IntList %769, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc627) | |
%771 = torch.aten.div.Tensor %769, %770 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc628) | |
%772 = torch.aten.broadcast_to %771, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc629) | |
%773 = torch.prim.ListConstruct %int8, %int4, %int15 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1289) | |
%774 = torch.aten.view %772, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc630) | |
%775 = torch.aten.broadcast_to %756, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc631) | |
%776 = torch.aten.view %775, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc632) | |
%777 = torch.aten.bmm %774, %776 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc633) | |
%778 = torch.aten.view %777, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc634) | |
%779 = torch.aten.transpose.int %778, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc635) | |
%780 = torch.aten.clone %779, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc636) | |
%781 = torch.aten.view %780, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc637) | |
%782 = torch.aten.transpose.int %69, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc638) | |
%783 = torch.aten.view %781, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc639) | |
%784 = torch.aten.mm %783, %782 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc640) | |
%785 = torch.aten.view %784, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc641) | |
%786 = torch.aten.add.Tensor %731, %785, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc642) | |
%787 = torch.aten.pow.Tensor_Scalar %786, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc643) | |
%788 = torch.aten.sum.dim_IntList %787, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc644) | |
%789 = torch.aten.div.Scalar %788, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc644) | |
%790 = torch.aten.add.Scalar %789, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc645) | |
%791 = torch.aten.rsqrt %790 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc646) | |
%792 = torch.aten.mul.Tensor %786, %791 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc647) | |
%793 = torch.aten.mul.Tensor %68, %792 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc648) | |
%794 = torch.aten.transpose.int %67, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc649) | |
%795 = torch.aten.view %793, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc650) | |
%796 = torch.aten.mm %795, %794 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc651) | |
%797 = torch.prim.ListConstruct %int1, %int4, %int2048 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1290) | |
%798 = torch.aten.view %796, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc653) | |
%799 = torch.aten.relu %798 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc652) | |
%800 = torch.aten.transpose.int %66, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc654) | |
%801 = torch.prim.ListConstruct %int4, %int2048 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1291) | |
%802 = torch.aten.view %799, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc655) | |
%803 = torch.aten.mm %802, %800 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc656) | |
%804 = torch.aten.view %803, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc657) | |
%805 = torch.aten.add.Tensor %786, %804, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc658) | |
%806 = torch.aten.pow.Tensor_Scalar %805, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc659) | |
%807 = torch.aten.sum.dim_IntList %806, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc660) | |
%808 = torch.aten.div.Scalar %807, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc660) | |
%809 = torch.aten.add.Scalar %808, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc661) | |
%810 = torch.aten.rsqrt %809 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc662) | |
%811 = torch.aten.mul.Tensor %805, %810 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc663) | |
%812 = torch.aten.mul.Tensor %65, %811 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc664) | |
%813 = torch.aten.transpose.int %64, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc665) | |
%814 = torch.aten.view %812, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc666) | |
%815 = torch.aten.mm %814, %813 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc667) | |
%816 = torch.aten.view %815, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc668) | |
%817 = torch.aten.view %816, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc669) | |
%818 = torch.aten.transpose.int %817, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc670) | |
%819 = torch.aten.transpose.int %63, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc671) | |
%820 = torch.aten.view %812, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc672) | |
%821 = torch.aten.mm %820, %819 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc673) | |
%822 = torch.aten.view %821, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc674) | |
%823 = torch.aten.view %822, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc675) | |
%824 = torch.aten.transpose.int %823, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc676) | |
%825 = torch.aten.transpose.int %62, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc677) | |
%826 = torch.aten.view %812, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc678) | |
%827 = torch.aten.mm %826, %825 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc679) | |
%828 = torch.aten.view %827, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc680) | |
%829 = torch.aten.view %828, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc681) | |
%830 = torch.aten.transpose.int %829, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc682) | |
%831 = torch.aten.transpose.int %824, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc683) | |
%832 = torch.aten.broadcast_to %818, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc684) | |
%833 = torch.aten.view %832, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc685) | |
%834 = torch.aten.broadcast_to %831, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc686) | |
%835 = torch.aten.view %834, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc687) | |
%836 = torch.aten.bmm %833, %835 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc688) | |
%837 = torch.aten.view %836, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc689) | |
%838 = torch.aten.add.Tensor %837, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc690) | |
%values_14, %indices_15 = torch.aten.max.dim %838, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc691) | |
%839 = torch.aten.sub.Tensor %838, %values_14, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc692) | |
%840 = torch.aten.exp %839 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc693) | |
%841 = torch.aten.sum.dim_IntList %840, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc694) | |
%842 = torch.aten.div.Tensor %840, %841 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc695) | |
%843 = torch.aten.broadcast_to %842, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc696) | |
%844 = torch.aten.view %843, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc697) | |
%845 = torch.aten.broadcast_to %830, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc698) | |
%846 = torch.aten.view %845, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc699) | |
%847 = torch.aten.bmm %844, %846 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc700) | |
%848 = torch.aten.view %847, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc701) | |
%849 = torch.aten.transpose.int %848, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc702) | |
%850 = torch.aten.clone %849, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc703) | |
%851 = torch.aten.view %850, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc704) | |
%852 = torch.aten.transpose.int %61, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc705) | |
%853 = torch.aten.view %851, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc706) | |
%854 = torch.aten.mm %853, %852 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc707) | |
%855 = torch.aten.view %854, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc708) | |
%856 = torch.aten.add.Tensor %805, %855, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc709) | |
%857 = torch.aten.pow.Tensor_Scalar %856, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc710) | |
%858 = torch.aten.sum.dim_IntList %857, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc711) | |
%859 = torch.aten.div.Scalar %858, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc711) | |
%860 = torch.aten.add.Scalar %859, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc712) | |
%861 = torch.aten.rsqrt %860 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc713) | |
%862 = torch.aten.mul.Tensor %856, %861 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc714) | |
%863 = torch.aten.mul.Tensor %60, %862 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc715) | |
%864 = torch.aten.transpose.int %59, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc716) | |
%865 = torch.aten.view %863, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc717) | |
%866 = torch.aten.mm %865, %864 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc718) | |
%867 = torch.aten.view %866, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc719) | |
%868 = torch.aten.view %867, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc720) | |
%869 = torch.aten.transpose.int %868, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc721) | |
%870 = torch.aten.transpose.int %58, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc722) | |
%871 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc723) | |
%872 = torch.aten.mm %871, %870 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc724) | |
%873 = torch.aten.view %872, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc725) | |
%874 = torch.aten.view %873, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc726) | |
%875 = torch.aten.transpose.int %874, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc727) | |
%876 = torch.aten.transpose.int %57, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc728) | |
%877 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc729) | |
%878 = torch.aten.mm %877, %876 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc730) | |
%879 = torch.aten.view %878, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc731) | |
%880 = torch.aten.view %879, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc732) | |
%881 = torch.aten.transpose.int %880, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc733) | |
%882 = torch.aten.transpose.int %875, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc734) | |
%883 = torch.aten.broadcast_to %869, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc735) | |
%884 = torch.aten.view %883, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc736) | |
%885 = torch.aten.broadcast_to %882, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc737) | |
%886 = torch.aten.view %885, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc738) | |
%887 = torch.aten.bmm %884, %886 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc739) | |
%888 = torch.aten.view %887, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc740) | |
%889 = torch.aten.add.Tensor %888, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc741) | |
%values_16, %indices_17 = torch.aten.max.dim %889, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc742) | |
%890 = torch.aten.sub.Tensor %889, %values_16, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc743) | |
%891 = torch.aten.exp %890 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc744) | |
%892 = torch.aten.sum.dim_IntList %891, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc745) | |
%893 = torch.aten.div.Tensor %891, %892 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc746) | |
%894 = torch.aten.broadcast_to %893, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc747) | |
%895 = torch.aten.view %894, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc748) | |
%896 = torch.aten.broadcast_to %881, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc749) | |
%897 = torch.aten.view %896, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc750) | |
%898 = torch.aten.bmm %895, %897 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc751) | |
%899 = torch.aten.view %898, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc752) | |
%900 = torch.aten.transpose.int %899, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc753) | |
%901 = torch.aten.clone %900, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc754) | |
%902 = torch.aten.view %901, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc755) | |
%903 = torch.aten.transpose.int %56, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc756) | |
%904 = torch.aten.view %902, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc757) | |
%905 = torch.aten.mm %904, %903 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc758) | |
%906 = torch.aten.view %905, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc759) | |
%907 = torch.aten.add.Tensor %856, %906, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc760) | |
%908 = torch.aten.pow.Tensor_Scalar %907, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc761) | |
%909 = torch.aten.sum.dim_IntList %908, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc762) | |
%910 = torch.aten.div.Scalar %909, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc762) | |
%911 = torch.aten.add.Scalar %910, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc763) | |
%912 = torch.aten.rsqrt %911 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc764) | |
%913 = torch.aten.mul.Tensor %907, %912 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc765) | |
%914 = torch.aten.mul.Tensor %55, %913 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc766) | |
%915 = torch.aten.transpose.int %54, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc767) | |
%916 = torch.aten.view %914, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc768) | |
%917 = torch.aten.mm %916, %915 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc769) | |
%918 = torch.aten.view %917, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc770) | |
%919 = torch.aten.relu %918 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc771) | |
%920 = torch.aten.transpose.int %53, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc772) | |
%921 = torch.aten.view %919, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc773) | |
%922 = torch.aten.mm %921, %920 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc774) | |
%923 = torch.aten.view %922, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc775) | |
%924 = torch.aten.add.Tensor %907, %923, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc776) | |
%925 = torch.aten.pow.Tensor_Scalar %924, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc777) | |
%926 = torch.aten.sum.dim_IntList %925, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc778) | |
%927 = torch.aten.div.Scalar %926, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc778) | |
%928 = torch.aten.add.Scalar %927, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc779) | |
%929 = torch.aten.rsqrt %928 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc780) | |
%930 = torch.aten.mul.Tensor %924, %929 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc781) | |
%931 = torch.aten.mul.Tensor %52, %930 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc782) | |
%932 = torch.aten.transpose.int %51, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc783) | |
%933 = torch.aten.view %931, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc784) | |
%934 = torch.aten.mm %933, %932 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc785) | |
%935 = torch.aten.view %934, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc786) | |
%936 = torch.aten.view %935, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc787) | |
%937 = torch.aten.transpose.int %936, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc788) | |
%938 = torch.aten.transpose.int %50, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc789) | |
%939 = torch.aten.view %931, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc790) | |
%940 = torch.aten.mm %939, %938 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc791) | |
%941 = torch.aten.view %940, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc792) | |
%942 = torch.aten.view %941, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc793) | |
%943 = torch.aten.transpose.int %942, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc794) | |
%944 = torch.aten.transpose.int %49, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc795) | |
%945 = torch.aten.view %931, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc796) | |
%946 = torch.aten.mm %945, %944 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc797) | |
%947 = torch.aten.view %946, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc798) | |
%948 = torch.aten.view %947, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc799) | |
%949 = torch.aten.transpose.int %948, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc800) | |
%950 = torch.aten.transpose.int %943, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc801) | |
%951 = torch.aten.broadcast_to %937, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc802) | |
%952 = torch.aten.view %951, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc803) | |
%953 = torch.aten.broadcast_to %950, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc804) | |
%954 = torch.aten.view %953, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc805) | |
%955 = torch.aten.bmm %952, %954 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc806) | |
%956 = torch.aten.view %955, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc807) | |
%957 = torch.aten.add.Tensor %956, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc808) | |
%values_18, %indices_19 = torch.aten.max.dim %957, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc809) | |
%958 = torch.aten.sub.Tensor %957, %values_18, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc810) | |
%959 = torch.aten.exp %958 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc811) | |
%960 = torch.aten.sum.dim_IntList %959, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc812) | |
%961 = torch.aten.div.Tensor %959, %960 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc813) | |
%962 = torch.aten.broadcast_to %961, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc814) | |
%963 = torch.aten.view %962, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc815) | |
%964 = torch.aten.broadcast_to %949, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc816) | |
%965 = torch.aten.view %964, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc817) | |
%966 = torch.aten.bmm %963, %965 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc818) | |
%967 = torch.aten.view %966, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc819) | |
%968 = torch.aten.transpose.int %967, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc820) | |
%969 = torch.aten.clone %968, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc821) | |
%970 = torch.aten.view %969, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc822) | |
%971 = torch.aten.transpose.int %48, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc823) | |
%972 = torch.aten.view %970, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc824) | |
%973 = torch.aten.mm %972, %971 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc825) | |
%974 = torch.aten.view %973, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc826) | |
%975 = torch.aten.add.Tensor %924, %974, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc827) | |
%976 = torch.aten.pow.Tensor_Scalar %975, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc828) | |
%977 = torch.aten.sum.dim_IntList %976, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc829) | |
%978 = torch.aten.div.Scalar %977, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc829) | |
%979 = torch.aten.add.Scalar %978, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc830) | |
%980 = torch.aten.rsqrt %979 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc831) | |
%981 = torch.aten.mul.Tensor %975, %980 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc832) | |
%982 = torch.aten.mul.Tensor %47, %981 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc833) | |
%983 = torch.aten.transpose.int %46, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc834) | |
%984 = torch.aten.view %982, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc835) | |
%985 = torch.aten.mm %984, %983 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc836) | |
%986 = torch.aten.view %985, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc837) | |
%987 = torch.aten.view %986, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc838) | |
%988 = torch.aten.transpose.int %987, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc839) | |
%989 = torch.aten.transpose.int %45, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc840) | |
%990 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc841) | |
%991 = torch.aten.mm %990, %989 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc842) | |
%992 = torch.aten.view %991, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc843) | |
%993 = torch.aten.view %992, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc844) | |
%994 = torch.aten.transpose.int %993, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc845) | |
%995 = torch.aten.transpose.int %44, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc846) | |
%996 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc847) | |
%997 = torch.aten.mm %996, %995 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc848) | |
%998 = torch.aten.view %997, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc849) | |
%999 = torch.aten.view %998, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc850) | |
%1000 = torch.aten.transpose.int %999, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc851) | |
%1001 = torch.aten.transpose.int %994, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc852) | |
%1002 = torch.aten.broadcast_to %988, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc853) | |
%1003 = torch.aten.view %1002, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc854) | |
%1004 = torch.aten.broadcast_to %1001, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc855) | |
%1005 = torch.aten.view %1004, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc856) | |
%1006 = torch.aten.bmm %1003, %1005 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc857) | |
%1007 = torch.aten.view %1006, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc858) | |
%1008 = torch.aten.add.Tensor %1007, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc859) | |
%values_20, %indices_21 = torch.aten.max.dim %1008, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc860) | |
%1009 = torch.aten.sub.Tensor %1008, %values_20, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc861) | |
%1010 = torch.aten.exp %1009 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc862) | |
%1011 = torch.aten.sum.dim_IntList %1010, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc863) | |
%1012 = torch.aten.div.Tensor %1010, %1011 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc864) | |
%1013 = torch.aten.broadcast_to %1012, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc865) | |
%1014 = torch.aten.view %1013, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc866) | |
%1015 = torch.aten.broadcast_to %1000, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc867) | |
%1016 = torch.aten.view %1015, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc868) | |
%1017 = torch.aten.bmm %1014, %1016 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc869) | |
%1018 = torch.aten.view %1017, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc870) | |
%1019 = torch.aten.transpose.int %1018, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc871) | |
%1020 = torch.aten.clone %1019, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc872) | |
%1021 = torch.aten.view %1020, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc873) | |
%1022 = torch.aten.transpose.int %43, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc874) | |
%1023 = torch.aten.view %1021, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc875) | |
%1024 = torch.aten.mm %1023, %1022 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc876) | |
%1025 = torch.aten.view %1024, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc877) | |
%1026 = torch.aten.add.Tensor %975, %1025, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc878) | |
%1027 = torch.aten.pow.Tensor_Scalar %1026, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc879) | |
%1028 = torch.aten.sum.dim_IntList %1027, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc880) | |
%1029 = torch.aten.div.Scalar %1028, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc880) | |
%1030 = torch.aten.add.Scalar %1029, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc881) | |
%1031 = torch.aten.rsqrt %1030 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc882) | |
%1032 = torch.aten.mul.Tensor %1026, %1031 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc883) | |
%1033 = torch.aten.mul.Tensor %42, %1032 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc884) | |
%1034 = torch.aten.transpose.int %41, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc885) | |
%1035 = torch.aten.view %1033, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc886) | |
%1036 = torch.aten.mm %1035, %1034 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc887) | |
%1037 = torch.aten.view %1036, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc888) | |
%1038 = torch.aten.relu %1037 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc889) | |
%1039 = torch.aten.transpose.int %40, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc890) | |
%1040 = torch.aten.view %1038, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc891) | |
%1041 = torch.aten.mm %1040, %1039 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc892) | |
%1042 = torch.aten.view %1041, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc893) | |
%1043 = torch.aten.add.Tensor %1026, %1042, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc894) | |
%1044 = torch.aten.pow.Tensor_Scalar %1043, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc895) | |
%1045 = torch.aten.sum.dim_IntList %1044, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc896) | |
%1046 = torch.aten.div.Scalar %1045, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc896) | |
%1047 = torch.aten.add.Scalar %1046, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc897) | |
%1048 = torch.aten.rsqrt %1047 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc898) | |
%1049 = torch.aten.mul.Tensor %1043, %1048 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc899) | |
%1050 = torch.aten.mul.Tensor %39, %1049 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc900) | |
%1051 = torch.aten.transpose.int %38, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc901) | |
%1052 = torch.aten.view %1050, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc902) | |
%1053 = torch.aten.mm %1052, %1051 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc903) | |
%1054 = torch.aten.view %1053, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc904) | |
%1055 = torch.aten.view %1054, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc905) | |
%1056 = torch.aten.transpose.int %1055, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc906) | |
%1057 = torch.aten.transpose.int %37, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc907) | |
%1058 = torch.aten.view %1050, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc908) | |
%1059 = torch.aten.mm %1058, %1057 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc909) | |
%1060 = torch.aten.view %1059, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc910) | |
%1061 = torch.aten.view %1060, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc911) | |
%1062 = torch.aten.transpose.int %1061, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc912) | |
%1063 = torch.aten.transpose.int %36, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc913) | |
%1064 = torch.aten.view %1050, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc914) | |
%1065 = torch.aten.mm %1064, %1063 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc915) | |
%1066 = torch.aten.view %1065, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc916) | |
%1067 = torch.aten.view %1066, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc917) | |
%1068 = torch.aten.transpose.int %1067, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc918) | |
%1069 = torch.aten.transpose.int %1062, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc919) | |
%1070 = torch.aten.broadcast_to %1056, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc920) | |
%1071 = torch.aten.view %1070, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc921) | |
%1072 = torch.aten.broadcast_to %1069, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc922) | |
%1073 = torch.aten.view %1072, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc923) | |
%1074 = torch.aten.bmm %1071, %1073 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc924) | |
%1075 = torch.aten.view %1074, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc925) | |
%1076 = torch.aten.add.Tensor %1075, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc926) | |
%values_22, %indices_23 = torch.aten.max.dim %1076, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc927) | |
%1077 = torch.aten.sub.Tensor %1076, %values_22, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc928) | |
%1078 = torch.aten.exp %1077 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc929) | |
%1079 = torch.aten.sum.dim_IntList %1078, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc930) | |
%1080 = torch.aten.div.Tensor %1078, %1079 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc931) | |
%1081 = torch.aten.broadcast_to %1080, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc932) | |
%1082 = torch.aten.view %1081, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc933) | |
%1083 = torch.aten.broadcast_to %1068, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc934) | |
%1084 = torch.aten.view %1083, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc935) | |
%1085 = torch.aten.bmm %1082, %1084 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc936) | |
%1086 = torch.aten.view %1085, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc937) | |
%1087 = torch.aten.transpose.int %1086, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc938) | |
%1088 = torch.aten.clone %1087, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc939) | |
%1089 = torch.aten.view %1088, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc940) | |
%1090 = torch.aten.transpose.int %35, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc941) | |
%1091 = torch.aten.view %1089, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc942) | |
%1092 = torch.aten.mm %1091, %1090 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc943) | |
%1093 = torch.aten.view %1092, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc944) | |
%1094 = torch.aten.add.Tensor %1043, %1093, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc945) | |
%1095 = torch.aten.pow.Tensor_Scalar %1094, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc946) | |
%1096 = torch.aten.sum.dim_IntList %1095, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc947) | |
%1097 = torch.aten.div.Scalar %1096, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc947) | |
%1098 = torch.aten.add.Scalar %1097, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc948) | |
%1099 = torch.aten.rsqrt %1098 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc949) | |
%1100 = torch.aten.mul.Tensor %1094, %1099 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc950) | |
%1101 = torch.aten.mul.Tensor %34, %1100 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc951) | |
%1102 = torch.aten.transpose.int %33, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc952) | |
%1103 = torch.aten.view %1101, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc953) | |
%1104 = torch.aten.mm %1103, %1102 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc954) | |
%1105 = torch.aten.view %1104, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc955) | |
%1106 = torch.aten.view %1105, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc956) | |
%1107 = torch.aten.transpose.int %1106, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc957) | |
%1108 = torch.aten.transpose.int %32, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc958) | |
%1109 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc959) | |
%1110 = torch.aten.mm %1109, %1108 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc960) | |
%1111 = torch.aten.view %1110, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc961) | |
%1112 = torch.aten.view %1111, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc962) | |
%1113 = torch.aten.transpose.int %1112, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc963) | |
%1114 = torch.aten.transpose.int %31, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc964) | |
%1115 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc965) | |
%1116 = torch.aten.mm %1115, %1114 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc966) | |
%1117 = torch.aten.view %1116, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc967) | |
%1118 = torch.aten.view %1117, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc968) | |
%1119 = torch.aten.transpose.int %1118, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc969) | |
%1120 = torch.aten.transpose.int %1113, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc970) | |
%1121 = torch.aten.broadcast_to %1107, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc971) | |
%1122 = torch.aten.view %1121, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc972) | |
%1123 = torch.aten.broadcast_to %1120, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc973) | |
%1124 = torch.aten.view %1123, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc974) | |
%1125 = torch.aten.bmm %1122, %1124 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc975) | |
%1126 = torch.aten.view %1125, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc976) | |
%1127 = torch.aten.add.Tensor %1126, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc977) | |
%values_24, %indices_25 = torch.aten.max.dim %1127, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc978) | |
%1128 = torch.aten.sub.Tensor %1127, %values_24, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc979) | |
%1129 = torch.aten.exp %1128 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc980) | |
%1130 = torch.aten.sum.dim_IntList %1129, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc981) | |
%1131 = torch.aten.div.Tensor %1129, %1130 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc982) | |
%1132 = torch.aten.broadcast_to %1131, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc983) | |
%1133 = torch.aten.view %1132, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc984) | |
%1134 = torch.aten.broadcast_to %1119, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc985) | |
%1135 = torch.aten.view %1134, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc986) | |
%1136 = torch.aten.bmm %1133, %1135 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc987) | |
%1137 = torch.aten.view %1136, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc988) | |
%1138 = torch.aten.transpose.int %1137, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc989) | |
%1139 = torch.aten.clone %1138, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc990) | |
%1140 = torch.aten.view %1139, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc991) | |
%1141 = torch.aten.transpose.int %30, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc992) | |
%1142 = torch.aten.view %1140, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc993) | |
%1143 = torch.aten.mm %1142, %1141 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc994) | |
%1144 = torch.aten.view %1143, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc995) | |
%1145 = torch.aten.add.Tensor %1094, %1144, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc996) | |
%1146 = torch.aten.pow.Tensor_Scalar %1145, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc997) | |
%1147 = torch.aten.sum.dim_IntList %1146, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc998) | |
%1148 = torch.aten.div.Scalar %1147, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc998) | |
%1149 = torch.aten.add.Scalar %1148, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc999) | |
%1150 = torch.aten.rsqrt %1149 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1000) | |
%1151 = torch.aten.mul.Tensor %1145, %1150 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1001) | |
%1152 = torch.aten.mul.Tensor %29, %1151 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1002) | |
%1153 = torch.aten.transpose.int %28, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc1003) | |
%1154 = torch.aten.view %1152, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1004) | |
%1155 = torch.aten.mm %1154, %1153 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc1005) | |
%1156 = torch.aten.view %1155, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1006) | |
%1157 = torch.aten.relu %1156 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1007) | |
%1158 = torch.aten.transpose.int %27, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc1008) | |
%1159 = torch.aten.view %1157, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc1009) | |
%1160 = torch.aten.mm %1159, %1158 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1010) | |
%1161 = torch.aten.view %1160, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1011) | |
%1162 = torch.aten.add.Tensor %1145, %1161, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1012) | |
%1163 = torch.aten.pow.Tensor_Scalar %1162, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1013) | |
%1164 = torch.aten.sum.dim_IntList %1163, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1014) | |
%1165 = torch.aten.div.Scalar %1164, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1014) | |
%1166 = torch.aten.add.Scalar %1165, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1015) | |
%1167 = torch.aten.rsqrt %1166 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1016) | |
%1168 = torch.aten.mul.Tensor %1162, %1167 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1017) | |
%1169 = torch.aten.mul.Tensor %26, %1168 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1018) | |
%1170 = torch.aten.transpose.int %25, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1019) | |
%1171 = torch.aten.view %1169, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1020) | |
%1172 = torch.aten.mm %1171, %1170 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1021) | |
%1173 = torch.aten.view %1172, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1022) | |
%1174 = torch.aten.view %1173, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1023) | |
%1175 = torch.aten.transpose.int %1174, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1024) | |
%1176 = torch.aten.transpose.int %24, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1025) | |
%1177 = torch.aten.view %1169, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1026) | |
%1178 = torch.aten.mm %1177, %1176 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1027) | |
%1179 = torch.aten.view %1178, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1028) | |
%1180 = torch.aten.view %1179, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1029) | |
%1181 = torch.aten.transpose.int %1180, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1030) | |
%1182 = torch.aten.transpose.int %23, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1031) | |
%1183 = torch.aten.view %1169, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1032) | |
%1184 = torch.aten.mm %1183, %1182 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1033) | |
%1185 = torch.aten.view %1184, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1034) | |
%1186 = torch.aten.view %1185, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1035) | |
%1187 = torch.aten.transpose.int %1186, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1036) | |
%1188 = torch.aten.transpose.int %1181, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc1037) | |
%1189 = torch.aten.broadcast_to %1175, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1038) | |
%1190 = torch.aten.view %1189, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1039) | |
%1191 = torch.aten.broadcast_to %1188, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc1040) | |
%1192 = torch.aten.view %1191, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc1041) | |
%1193 = torch.aten.bmm %1190, %1192 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc1042) | |
%1194 = torch.aten.view %1193, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1043) | |
%1195 = torch.aten.add.Tensor %1194, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1044) | |
%values_26, %indices_27 = torch.aten.max.dim %1195, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc1045) | |
%1196 = torch.aten.sub.Tensor %1195, %values_26, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1046) | |
%1197 = torch.aten.exp %1196 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1047) | |
%1198 = torch.aten.sum.dim_IntList %1197, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc1048) | |
%1199 = torch.aten.div.Tensor %1197, %1198 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1049) | |
%1200 = torch.aten.broadcast_to %1199, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1050) | |
%1201 = torch.aten.view %1200, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc1051) | |
%1202 = torch.aten.broadcast_to %1187, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1052) | |
%1203 = torch.aten.view %1202, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1053) | |
%1204 = torch.aten.bmm %1201, %1203 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc1054) | |
%1205 = torch.aten.view %1204, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1055) | |
%1206 = torch.aten.transpose.int %1205, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1056) | |
%1207 = torch.aten.clone %1206, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1057) | |
%1208 = torch.aten.view %1207, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1058) | |
%1209 = torch.aten.transpose.int %22, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1059) | |
%1210 = torch.aten.view %1208, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1060) | |
%1211 = torch.aten.mm %1210, %1209 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1061) | |
%1212 = torch.aten.view %1211, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1062) | |
%1213 = torch.aten.add.Tensor %1162, %1212, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1063) | |
%1214 = torch.aten.pow.Tensor_Scalar %1213, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1064) | |
%1215 = torch.aten.sum.dim_IntList %1214, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1065) | |
%1216 = torch.aten.div.Scalar %1215, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1065) | |
%1217 = torch.aten.add.Scalar %1216, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1066) | |
%1218 = torch.aten.rsqrt %1217 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1067) | |
%1219 = torch.aten.mul.Tensor %1213, %1218 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1068) | |
%1220 = torch.aten.mul.Tensor %21, %1219 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1069) | |
%1221 = torch.aten.transpose.int %20, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1070) | |
%1222 = torch.aten.view %1220, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1071) | |
%1223 = torch.aten.mm %1222, %1221 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1072) | |
%1224 = torch.aten.view %1223, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1073) | |
%1225 = torch.aten.view %1224, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1074) | |
%1226 = torch.aten.transpose.int %1225, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1075) | |
%1227 = torch.aten.transpose.int %19, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1076) | |
%1228 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc1077) | |
%1229 = torch.aten.mm %1228, %1227 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc1078) | |
%1230 = torch.aten.view %1229, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc1079) | |
%1231 = torch.aten.view %1230, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc1080) | |
%1232 = torch.aten.transpose.int %1231, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1081) | |
%1233 = torch.aten.transpose.int %18, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1082) | |
%1234 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc1083) | |
%1235 = torch.aten.mm %1234, %1233 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc1084) | |
%1236 = torch.aten.view %1235, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc1085) | |
%1237 = torch.aten.view %1236, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc1086) | |
%1238 = torch.aten.transpose.int %1237, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1087) | |
%1239 = torch.aten.transpose.int %1232, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc1088) | |
%1240 = torch.aten.broadcast_to %1226, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1089) | |
%1241 = torch.aten.view %1240, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1090) | |
%1242 = torch.aten.broadcast_to %1239, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc1091) | |
%1243 = torch.aten.view %1242, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc1092) | |
%1244 = torch.aten.bmm %1241, %1243 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc1093) | |
%1245 = torch.aten.view %1244, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1094) | |
%1246 = torch.aten.add.Tensor %1245, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1095) | |
%values_28, %indices_29 = torch.aten.max.dim %1246, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc1096) | |
%1247 = torch.aten.sub.Tensor %1246, %values_28, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1097) | |
%1248 = torch.aten.exp %1247 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1098) | |
%1249 = torch.aten.sum.dim_IntList %1248, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc1099) | |
%1250 = torch.aten.div.Tensor %1248, %1249 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1100) | |
%1251 = torch.aten.broadcast_to %1250, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1101) | |
%1252 = torch.aten.view %1251, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc1102) | |
%1253 = torch.aten.broadcast_to %1238, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1103) | |
%1254 = torch.aten.view %1253, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc1104) | |
%1255 = torch.aten.bmm %1252, %1254 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc1105) | |
%1256 = torch.aten.view %1255, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1106) | |
%1257 = torch.aten.transpose.int %1256, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1107) | |
%1258 = torch.aten.clone %1257, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1108) | |
%1259 = torch.aten.view %1258, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1109) | |
%1260 = torch.aten.transpose.int %17, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1110) | |
%1261 = torch.aten.view %1259, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1111) | |
%1262 = torch.aten.mm %1261, %1260 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1112) | |
%1263 = torch.aten.view %1262, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1113) | |
%1264 = torch.aten.add.Tensor %1213, %1263, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1114) | |
%1265 = torch.aten.pow.Tensor_Scalar %1264, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1115) | |
%1266 = torch.aten.sum.dim_IntList %1265, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1116) | |
%1267 = torch.aten.div.Scalar %1266, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1116) | |
%1268 = torch.aten.add.Scalar %1267, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1117) | |
%1269 = torch.aten.rsqrt %1268 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1118) | |
%1270 = torch.aten.mul.Tensor %1264, %1269 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1119) | |
%1271 = torch.aten.mul.Tensor %16, %1270 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1120) | |
%1272 = torch.aten.transpose.int %15, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc1121) | |
%1273 = torch.aten.view %1271, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1122) | |
%1274 = torch.aten.mm %1273, %1272 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc1123) | |
%1275 = torch.aten.view %1274, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1124) | |
%1276 = torch.aten.relu %1275 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1125) | |
%1277 = torch.aten.transpose.int %14, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc1126) | |
%1278 = torch.aten.view %1276, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc1127) | |
%1279 = torch.aten.mm %1278, %1277 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1128) | |
%1280 = torch.aten.view %1279, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1129) | |
%1281 = torch.aten.add.Tensor %1264, %1280, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1130) | |
%1282 = torch.aten.pow.Tensor_Scalar %1281, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1131) | |
%1283 = torch.aten.sum.dim_IntList %1282, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1132) | |
%1284 = torch.aten.div.Scalar %1283, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1132) | |
%1285 = torch.aten.add.Scalar %1284, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1133) | |
%1286 = torch.aten.rsqrt %1285 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1134) | |
%1287 = torch.aten.mul.Tensor %1281, %1286 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1135) | |
%1288 = torch.aten.mul.Tensor %13, %1287 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1136) | |
%1289 = torch.aten.transpose.int %12, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1137) | |
%1290 = torch.aten.view %1288, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1138) | |
%1291 = torch.aten.mm %1290, %1289 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1139) | |
%1292 = torch.aten.view %1291, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1140) | |
%1293 = torch.aten.view %1292, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1141) | |
%1294 = torch.aten.transpose.int %1293, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1142) | |
%1295 = torch.aten.transpose.int %11, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1143) | |
%1296 = torch.aten.view %1288, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1144) | |
%1297 = torch.aten.mm %1296, %1295 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1145) | |
%1298 = torch.aten.view %1297, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1146) | |
%1299 = torch.aten.view %1298, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1147) | |
%1300 = torch.aten.transpose.int %1299, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1148) | |
%1301 = torch.aten.transpose.int %10, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1149) | |
%1302 = torch.aten.view %1288, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1150) | |
%1303 = torch.aten.mm %1302, %1301 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1151) | |
%1304 = torch.aten.view %1303, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1152) | |
%1305 = torch.aten.view %1304, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1153) | |
%1306 = torch.aten.transpose.int %1305, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1154) | |
%1307 = torch.aten.transpose.int %1300, %int3, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,4],f32> loc(#loc1155) | |
%1308 = torch.aten.broadcast_to %1294, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1156) | |
%1309 = torch.aten.view %1308, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1157) | |
%1310 = torch.aten.broadcast_to %1307, %678 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,4],f32> loc(#loc1158) | |
%1311 = torch.aten.view %1310, %680 : !torch.vtensor<[1,8,64,4],f32>, !torch.list<int> -> !torch.vtensor<[8,64,4],f32> loc(#loc1159) | |
%1312 = torch.aten.bmm %1309, %1311 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,4],f32> -> !torch.vtensor<[8,4,4],f32> loc(#loc1160) | |
%1313 = torch.aten.view %1312, %683 : !torch.vtensor<[8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1161) | |
%1314 = torch.aten.add.Tensor %1313, %711, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,4],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1162) | |
%values_30, %indices_31 = torch.aten.max.dim %1314, %int-1, %true : !torch.vtensor<[1,8,4,4],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc1163) | |
%1315 = torch.aten.sub.Tensor %1314, %values_30, %int1 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1164) | |
%1316 = torch.aten.exp %1315 : !torch.vtensor<[1,8,4,4],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1165) | |
%1317 = torch.aten.sum.dim_IntList %1316, %161, %true, %none : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc1166) | |
%1318 = torch.aten.div.Tensor %1316, %1317 : !torch.vtensor<[1,8,4,4],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1167) | |
%1319 = torch.aten.broadcast_to %1318, %683 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,4],f32> loc(#loc1168) | |
%1320 = torch.aten.view %1319, %718 : !torch.vtensor<[1,8,4,4],f32>, !torch.list<int> -> !torch.vtensor<[8,4,4],f32> loc(#loc1169) | |
%1321 = torch.aten.broadcast_to %1306, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1170) | |
%1322 = torch.aten.view %1321, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1171) | |
%1323 = torch.aten.bmm %1320, %1322 : !torch.vtensor<[8,4,4],f32>, !torch.vtensor<[8,4,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc1172) | |
%1324 = torch.aten.view %1323, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1173) | |
%1325 = torch.aten.transpose.int %1324, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1174) | |
%1326 = torch.aten.clone %1325, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1175) | |
%1327 = torch.aten.view %1326, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1176) | |
%1328 = torch.aten.transpose.int %9, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1177) | |
%1329 = torch.aten.view %1327, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1178) | |
%1330 = torch.aten.mm %1329, %1328 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1179) | |
%1331 = torch.aten.view %1330, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1180) | |
%1332 = torch.aten.add.Tensor %1281, %1331, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1181) | |
%1333 = torch.aten.pow.Tensor_Scalar %1332, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1182) | |
%1334 = torch.aten.sum.dim_IntList %1333, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1183) | |
%1335 = torch.aten.div.Scalar %1334, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1183) | |
%1336 = torch.aten.add.Scalar %1335, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1184) | |
%1337 = torch.aten.rsqrt %1336 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1185) | |
%1338 = torch.aten.mul.Tensor %1332, %1337 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1186) | |
%1339 = torch.aten.mul.Tensor %8, %1338 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1187) | |
%1340 = torch.aten.transpose.int %7, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1188) | |
%1341 = torch.aten.view %1339, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1189) | |
%1342 = torch.aten.mm %1341, %1340 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1190) | |
%1343 = torch.aten.view %1342, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1191) | |
%1344 = torch.aten.view %1343, %174 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1192) | |
%1345 = torch.aten.transpose.int %1344, %int1, %int2 : !torch.vtensor<[1,4,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1193) | |
%1346 = torch.aten.transpose.int %6, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1194) | |
%1347 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc1195) | |
%1348 = torch.aten.mm %1347, %1346 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc1196) | |
%1349 = torch.aten.view %1348, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc1197) | |
%1350 = torch.aten.view %1349, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc1198) | |
%1351 = torch.aten.transpose.int %1350, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1199) | |
%1352 = torch.aten.transpose.int %5, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1200) | |
%1353 = torch.aten.view %616, %169 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[15,512],f32> loc(#loc1201) | |
%1354 = torch.aten.mm %1353, %1352 : !torch.vtensor<[15,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[15,512],f32> loc(#loc1202) | |
%1355 = torch.aten.view %1354, %172 : !torch.vtensor<[15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,512],f32> loc(#loc1203) | |
%1356 = torch.aten.view %1355, %174 : !torch.vtensor<[1,15,512],f32>, !torch.list<int> -> !torch.vtensor<[1,15,8,64],f32> loc(#loc1204) | |
%1357 = torch.aten.transpose.int %1356, %int1, %int2 : !torch.vtensor<[1,15,8,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1205) | |
%1358 = torch.aten.transpose.int %1351, %int3, %int2 : !torch.vtensor<[1,8,15,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,8,64,15],f32> loc(#loc1206) | |
%1359 = torch.aten.broadcast_to %1345, %674 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1207) | |
%1360 = torch.aten.view %1359, %676 : !torch.vtensor<[1,8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[8,4,64],f32> loc(#loc1208) | |
%1361 = torch.aten.broadcast_to %1358, %194 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,64,15],f32> loc(#loc1209) | |
%1362 = torch.aten.view %1361, %196 : !torch.vtensor<[1,8,64,15],f32>, !torch.list<int> -> !torch.vtensor<[8,64,15],f32> loc(#loc1210) | |
%1363 = torch.aten.bmm %1360, %1362 : !torch.vtensor<[8,4,64],f32>, !torch.vtensor<[8,64,15],f32> -> !torch.vtensor<[8,4,15],f32> loc(#loc1211) | |
%1364 = torch.aten.view %1363, %763 : !torch.vtensor<[8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1212) | |
%1365 = torch.aten.add.Tensor %1364, %766, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,15],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1213) | |
%values_32, %indices_33 = torch.aten.max.dim %1365, %int-1, %true : !torch.vtensor<[1,8,4,15],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,8,4,1],f32>, !torch.vtensor<[1,8,4,1],si64> loc(#loc1214) | |
%1366 = torch.aten.sub.Tensor %1365, %values_32, %int1 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32>, !torch.int -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1215) | |
%1367 = torch.aten.exp %1366 : !torch.vtensor<[1,8,4,15],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1216) | |
%1368 = torch.aten.sum.dim_IntList %1367, %161, %true, %none : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,8,4,1],f32> loc(#loc1217) | |
%1369 = torch.aten.div.Tensor %1367, %1368 : !torch.vtensor<[1,8,4,15],f32>, !torch.vtensor<[1,8,4,1],f32> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1218) | |
%1370 = torch.aten.broadcast_to %1369, %763 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,15],f32> loc(#loc1219) | |
%1371 = torch.aten.view %1370, %773 : !torch.vtensor<[1,8,4,15],f32>, !torch.list<int> -> !torch.vtensor<[8,4,15],f32> loc(#loc1220) | |
%1372 = torch.aten.broadcast_to %1357, %190 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,15,64],f32> loc(#loc1221) | |
%1373 = torch.aten.view %1372, %192 : !torch.vtensor<[1,8,15,64],f32>, !torch.list<int> -> !torch.vtensor<[8,15,64],f32> loc(#loc1222) | |
%1374 = torch.aten.bmm %1371, %1373 : !torch.vtensor<[8,4,15],f32>, !torch.vtensor<[8,15,64],f32> -> !torch.vtensor<[8,4,64],f32> loc(#loc1223) | |
%1375 = torch.aten.view %1374, %674 : !torch.vtensor<[8,4,64],f32>, !torch.list<int> -> !torch.vtensor<[1,8,4,64],f32> loc(#loc1224) | |
%1376 = torch.aten.transpose.int %1375, %int1, %int2 : !torch.vtensor<[1,8,4,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1225) | |
%1377 = torch.aten.clone %1376, %int0 : !torch.vtensor<[1,4,8,64],f32>, !torch.int -> !torch.vtensor<[1,4,8,64],f32> loc(#loc1226) | |
%1378 = torch.aten.view %1377, %244 : !torch.vtensor<[1,4,8,64],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1227) | |
%1379 = torch.aten.transpose.int %4, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc1228) | |
%1380 = torch.aten.view %1378, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1229) | |
%1381 = torch.aten.mm %1380, %1379 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1230) | |
%1382 = torch.aten.view %1381, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1231) | |
%1383 = torch.aten.add.Tensor %1332, %1382, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1232) | |
%1384 = torch.aten.pow.Tensor_Scalar %1383, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1233) | |
%1385 = torch.aten.sum.dim_IntList %1384, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1234) | |
%1386 = torch.aten.div.Scalar %1385, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1234) | |
%1387 = torch.aten.add.Scalar %1386, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1235) | |
%1388 = torch.aten.rsqrt %1387 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1236) | |
%1389 = torch.aten.mul.Tensor %1383, %1388 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1237) | |
%1390 = torch.aten.mul.Tensor %3, %1389 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1238) | |
%1391 = torch.aten.transpose.int %2, %int0, %int1 : !torch.vtensor<[2048,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,2048],f32> loc(#loc1239) | |
%1392 = torch.aten.view %1390, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1240) | |
%1393 = torch.aten.mm %1392, %1391 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,2048],f32> -> !torch.vtensor<[4,2048],f32> loc(#loc1241) | |
%1394 = torch.aten.view %1393, %797 : !torch.vtensor<[4,2048],f32>, !torch.list<int> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1242) | |
%1395 = torch.aten.relu %1394 : !torch.vtensor<[1,4,2048],f32> -> !torch.vtensor<[1,4,2048],f32> loc(#loc1243) | |
%1396 = torch.aten.transpose.int %1, %int0, %int1 : !torch.vtensor<[512,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,512],f32> loc(#loc1244) | |
%1397 = torch.aten.view %1395, %801 : !torch.vtensor<[1,4,2048],f32>, !torch.list<int> -> !torch.vtensor<[4,2048],f32> loc(#loc1245) | |
%1398 = torch.aten.mm %1397, %1396 : !torch.vtensor<[4,2048],f32>, !torch.vtensor<[2048,512],f32> -> !torch.vtensor<[4,512],f32> loc(#loc1246) | |
%1399 = torch.aten.view %1398, %657 : !torch.vtensor<[4,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4,512],f32> loc(#loc1247) | |
%1400 = torch.aten.add.Tensor %1383, %1399, %int1 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1248) | |
%1401 = torch.aten.pow.Tensor_Scalar %1400, %int2 : !torch.vtensor<[1,4,512],f32>, !torch.int -> !torch.vtensor<[1,4,512],f32> loc(#loc1249) | |
%1402 = torch.aten.sum.dim_IntList %1401, %161, %true, %none : !torch.vtensor<[1,4,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,4,1],f32> loc(#loc1250) | |
%1403 = torch.aten.div.Scalar %1402, %int512 : !torch.vtensor<[1,4,1],f32>, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1250) | |
%1404 = torch.aten.add.Scalar %1403, %float9.999990e-07, %int1 : !torch.vtensor<[1,4,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,4,1],f32> loc(#loc1251) | |
%1405 = torch.aten.rsqrt %1404 : !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,1],f32> loc(#loc1252) | |
%1406 = torch.aten.mul.Tensor %1400, %1405 : !torch.vtensor<[1,4,512],f32>, !torch.vtensor<[1,4,1],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1253) | |
%1407 = torch.aten.mul.Tensor %0, %1406 : !torch.vtensor<[512],f32>, !torch.vtensor<[1,4,512],f32> -> !torch.vtensor<[1,4,512],f32> loc(#loc1254) | |
%1408 = torch.aten.mul.Scalar %1407, %float4.419420e-02 : !torch.vtensor<[1,4,512],f32>, !torch.float -> !torch.vtensor<[1,4,512],f32> loc(#loc1255) | |
%1409 = torch.aten.transpose.int %130, %int0, %int1 : !torch.vtensor<[32128,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,32128],f32> loc(#loc1256) | |
%1410 = torch.aten.view %1408, %654 : !torch.vtensor<[1,4,512],f32>, !torch.list<int> -> !torch.vtensor<[4,512],f32> loc(#loc1257) | |
%1411 = torch.aten.mm %1410, %1409 : !torch.vtensor<[4,512],f32>, !torch.vtensor<[512,32128],f32> -> !torch.vtensor<[4,32128],f32> loc(#loc1258) | |
%1412 = torch.prim.ListConstruct %int1, %int4, %int32128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1292) | |
%1413 = torch.aten.view %1411, %1412 : !torch.vtensor<[4,32128],f32>, !torch.list<int> -> !torch.vtensor<[1,4,32128],f32> loc(#loc1259) | |
return %1413 : !torch.vtensor<[1,4,32128],f32> loc(#loc) | |
} loc(#loc) | |
} loc(#loc) | |
#loc1 = loc("<eval_with_key>.2":36:45) | |
#loc2 = loc("<eval_with_key>.2":6:46) | |
#loc3 = loc("<eval_with_key>.2":5:50) | |
#loc4 = loc("<eval_with_key>.2":6:49) | |
#loc5 = loc("<eval_with_key>.2":27:44) | |
#loc6 = loc("<eval_with_key>.2":5:53) | |
#loc7 = loc("<eval_with_key>.2":5:144) | |
#loc8 = loc("<eval_with_key>.2":16:44) | |
#loc9 = loc("<eval_with_key>.2":14:38) | |
#loc10 = loc("<eval_with_key>.2":25:35) | |
#loc11 = loc("<eval_with_key>.2":75:73) | |
#loc12 = loc("<eval_with_key>.2":8:52) | |
#loc13 = loc("<eval_with_key>.2":22:54) | |
#loc14 = loc("<eval_with_key>.2":23:48) | |
#loc15 = loc("<eval_with_key>.2":24:40) | |
#loc16 = loc("<eval_with_key>.2":28:35) | |
#loc17 = loc("<eval_with_key>.2":39:55) | |
#loc18 = loc("<eval_with_key>.2":39:58) | |
#loc19 = loc("<eval_with_key>.2":71:53) | |
#loc20 = loc("<eval_with_key>.2":78:36) | |
#loc21 = loc("<eval_with_key>.2":125:63) | |
#loc22 = loc("<eval_with_key>.2":620:38) | |
#loc23 = loc("<eval_with_key>.2":624:51) | |
#loc24 = loc("<eval_with_key>.2":1429:40) | |
#loc25 = loc("<eval_with_key>.2":1434:65) | |
#loc26 = loc("-":9:12) | |
#loc27 = loc("-":6832:10) | |
#loc28 = loc("<eval_with_key>.2":9:12) | |
#loc29 = loc("<eval_with_key>.2":5:16) | |
#loc30 = loc("<eval_with_key>.2":6:14) | |
#loc31 = loc("<eval_with_key>.2":7:12) | |
#loc32 = loc("<eval_with_key>.2":8:14) | |
#loc33 = loc("<eval_with_key>.2":11:22) | |
#loc34 = loc("<eval_with_key>.2":12:13) | |
#loc35 = loc("<eval_with_key>.2":13:12) | |
#loc36 = loc("<eval_with_key>.2":14:9) | |
#loc37 = loc("<eval_with_key>.2":15:19) | |
#loc38 = loc("<eval_with_key>.2":16:11) | |
#loc39 = loc("<eval_with_key>.2":18:16) | |
#loc40 = loc("-":557:11) | |
#loc41 = loc("-":7133:10) | |
#loc42 = loc("<eval_with_key>.2":20:14) | |
#loc43 = loc("<eval_with_key>.2":19:11) | |
#loc44 = loc("<eval_with_key>.2":21:16) | |
#loc45 = loc("<eval_with_key>.2":22:18) | |
#loc46 = loc("<eval_with_key>.2":24:11) | |
#loc47 = loc("<eval_with_key>.2":25:10) | |
#loc48 = loc("<eval_with_key>.2":26:12) | |
#loc49 = loc("<eval_with_key>.2":27:11) | |
#loc50 = loc("<eval_with_key>.2":28:10) | |
#loc51 = loc("<eval_with_key>.2":29:12) | |
#loc52 = loc("<eval_with_key>.2":31:12) | |
#loc53 = loc("<eval_with_key>.2":33:12) | |
#loc54 = loc("<eval_with_key>.2":35:8) | |
#loc55 = loc("-":4783:13) | |
#loc56 = loc("-":6558:10) | |
#loc57 = loc("<eval_with_key>.2":36:13) | |
#loc58 = loc("<eval_with_key>.2":37:9) | |
#loc59 = loc("-":4790:13) | |
#loc60 = loc("<eval_with_key>.2":106:14) | |
#loc61 = loc("<eval_with_key>.2":38:19) | |
#loc62 = loc("<eval_with_key>.2":39:13) | |
#loc63 = loc("<eval_with_key>.2":40:16) | |
#loc64 = loc("<eval_with_key>.2":42:10) | |
#loc65 = loc("<eval_with_key>.2":43:13) | |
#loc66 = loc("<eval_with_key>.2":44:11) | |
#loc67 = loc("<eval_with_key>.2":45:21) | |
#loc68 = loc("<eval_with_key>.2":46:13) | |
#loc69 = loc("<eval_with_key>.2":47:18) | |
#loc70 = loc("<eval_with_key>.2":49:10) | |
#loc71 = loc("<eval_with_key>.2":50:13) | |
#loc72 = loc("<eval_with_key>.2":51:11) | |
#loc73 = loc("<eval_with_key>.2":52:21) | |
#loc74 = loc("<eval_with_key>.2":53:13) | |
#loc75 = loc("<eval_with_key>.2":54:18) | |
#loc76 = loc("<eval_with_key>.2":55:18) | |
#loc77 = loc("-":4951:15) | |
#loc78 = loc("-":6546:10) | |
#loc79 = loc("<eval_with_key>.2":56:13) | |
#loc80 = loc("<eval_with_key>.2":57:13) | |
#loc81 = loc("<eval_with_key>.2":58:15) | |
#loc82 = loc("<eval_with_key>.2":59:13) | |
#loc83 = loc("<eval_with_key>.2":60:10) | |
#loc84 = loc("-":427:5) | |
#loc85 = loc("-":6995:10) | |
#loc86 = loc("<eval_with_key>.2":89:18) | |
#loc87 = loc("<eval_with_key>.2":61:21) | |
#loc88 = loc("<eval_with_key>.2":62:13) | |
#loc89 = loc("<eval_with_key>.2":64:18) | |
#loc90 = loc("<eval_with_key>.2":65:15) | |
#loc91 = loc("<eval_with_key>.2":66:18) | |
#loc92 = loc("<eval_with_key>.2":68:10) | |
#loc93 = loc("<eval_with_key>.2":69:9) | |
#loc94 = loc("<eval_with_key>.2":70:27) | |
#loc95 = loc("<eval_with_key>.2":71:12) | |
#loc96 = loc("<eval_with_key>.2":72:12) | |
#loc97 = loc("<eval_with_key>.2":73:12) | |
#loc98 = loc("<eval_with_key>.2":74:9) | |
#loc99 = loc("<eval_with_key>.2":75:29) | |
#loc100 = loc("<eval_with_key>.2":76:10) | |
#loc101 = loc("<eval_with_key>.2":77:10) | |
#loc102 = loc("<eval_with_key>.2":78:12) | |
#loc103 = loc("<eval_with_key>.2":79:12) | |
#loc104 = loc("<eval_with_key>.2":80:29) | |
#loc105 = loc("<eval_with_key>.2":81:12) | |
#loc106 = loc("<eval_with_key>.2":82:16) | |
#loc107 = loc("<eval_with_key>.2":83:14) | |
#loc108 = loc("<eval_with_key>.2":84:12) | |
#loc109 = loc("<eval_with_key>.2":85:11) | |
#loc110 = loc("<eval_with_key>.2":87:18) | |
#loc111 = loc("<eval_with_key>.2":88:14) | |
#loc112 = loc("<eval_with_key>.2":90:12) | |
#loc113 = loc("<eval_with_key>.2":91:13) | |
#loc114 = loc("<eval_with_key>.2":92:11) | |
#loc115 = loc("<eval_with_key>.2":93:12) | |
#loc116 = loc("<eval_with_key>.2":94:10) | |
#loc117 = loc("<eval_with_key>.2":95:12) | |
#loc118 = loc("<eval_with_key>.2":96:12) | |
#loc119 = loc("<eval_with_key>.2":98:15) | |
#loc120 = loc("<eval_with_key>.2":99:13) | |
#loc121 = loc("<eval_with_key>.2":100:15) | |
#loc122 = loc("<eval_with_key>.2":101:14) | |
#loc123 = loc("<eval_with_key>.2":102:12) | |
#loc124 = loc("<eval_with_key>.2":103:21) | |
#loc125 = loc("<eval_with_key>.2":104:18) | |
#loc126 = loc("<eval_with_key>.2":105:14) | |
#loc127 = loc("<eval_with_key>.2":108:10) | |
#loc128 = loc("<eval_with_key>.2":109:14) | |
#loc129 = loc("<eval_with_key>.2":110:11) | |
#loc130 = loc("<eval_with_key>.2":111:21) | |
#loc131 = loc("<eval_with_key>.2":112:12) | |
#loc132 = loc("<eval_with_key>.2":113:12) | |
#loc133 = loc("<eval_with_key>.2":114:13) | |
#loc134 = loc("<eval_with_key>.2":115:12) | |
#loc135 = loc("<eval_with_key>.2":116:14) | |
#loc136 = loc("<eval_with_key>.2":118:12) | |
#loc137 = loc("<eval_with_key>.2":120:12) | |
#loc138 = loc("<eval_with_key>.2":122:10) | |
#loc139 = loc("<eval_with_key>.2":123:14) | |
#loc140 = loc("<eval_with_key>.2":124:11) | |
#loc141 = loc("-":6150:10) | |
#loc142 = loc("<eval_with_key>.2":126:11) | |
#loc143 = loc("<eval_with_key>.2":125:21) | |
#loc144 = loc("<eval_with_key>.2":129:10) | |
#loc145 = loc("<eval_with_key>.2":130:14) | |
#loc146 = loc("<eval_with_key>.2":131:11) | |
#loc147 = loc("<eval_with_key>.2":132:21) | |
#loc148 = loc("<eval_with_key>.2":133:12) | |
#loc149 = loc("<eval_with_key>.2":134:12) | |
#loc150 = loc("<eval_with_key>.2":135:13) | |
#loc151 = loc("<eval_with_key>.2":136:12) | |
#loc152 = loc("<eval_with_key>.2":137:14) | |
#loc153 = loc("<eval_with_key>.2":139:12) | |
#loc154 = loc("<eval_with_key>.2":141:12) | |
#loc155 = loc("<eval_with_key>.2":143:10) | |
#loc156 = loc("<eval_with_key>.2":144:14) | |
#loc157 = loc("<eval_with_key>.2":145:11) | |
#loc158 = loc("<eval_with_key>.2":146:21) | |
#loc159 = loc("<eval_with_key>.2":147:14) | |
#loc160 = loc("<eval_with_key>.2":148:18) | |
#loc161 = loc("<eval_with_key>.2":150:10) | |
#loc162 = loc("<eval_with_key>.2":151:14) | |
#loc163 = loc("<eval_with_key>.2":152:11) | |
#loc164 = loc("<eval_with_key>.2":153:21) | |
#loc165 = loc("<eval_with_key>.2":154:14) | |
#loc166 = loc("<eval_with_key>.2":155:18) | |
#loc167 = loc("<eval_with_key>.2":157:10) | |
#loc168 = loc("<eval_with_key>.2":158:14) | |
#loc169 = loc("<eval_with_key>.2":159:11) | |
#loc170 = loc("<eval_with_key>.2":160:22) | |
#loc171 = loc("<eval_with_key>.2":161:14) | |
#loc172 = loc("<eval_with_key>.2":162:18) | |
#loc173 = loc("<eval_with_key>.2":163:18) | |
#loc174 = loc("<eval_with_key>.2":164:15) | |
#loc175 = loc("<eval_with_key>.2":165:14) | |
#loc176 = loc("<eval_with_key>.2":166:15) | |
#loc177 = loc("<eval_with_key>.2":167:14) | |
#loc178 = loc("<eval_with_key>.2":168:12) | |
#loc179 = loc("<eval_with_key>.2":169:22) | |
#loc180 = loc("<eval_with_key>.2":170:13) | |
#loc181 = loc("<eval_with_key>.2":171:13) | |
#loc182 = loc("<eval_with_key>.2":172:12) | |
#loc183 = loc("<eval_with_key>.2":173:12) | |
#loc184 = loc("<eval_with_key>.2":174:12) | |
#loc185 = loc("<eval_with_key>.2":175:12) | |
#loc186 = loc("<eval_with_key>.2":177:15) | |
#loc187 = loc("<eval_with_key>.2":178:14) | |
#loc188 = loc("<eval_with_key>.2":179:15) | |
#loc189 = loc("<eval_with_key>.2":180:14) | |
#loc190 = loc("<eval_with_key>.2":181:12) | |
#loc191 = loc("<eval_with_key>.2":182:22) | |
#loc192 = loc("<eval_with_key>.2":183:18) | |
#loc193 = loc("<eval_with_key>.2":184:14) | |
#loc194 = loc("<eval_with_key>.2":185:14) | |
#loc195 = loc("<eval_with_key>.2":187:10) | |
#loc196 = loc("<eval_with_key>.2":188:14) | |
#loc197 = loc("<eval_with_key>.2":189:11) | |
#loc198 = loc("<eval_with_key>.2":190:22) | |
#loc199 = loc("<eval_with_key>.2":191:12) | |
#loc200 = loc("<eval_with_key>.2":192:12) | |
#loc201 = loc("<eval_with_key>.2":193:13) | |
#loc202 = loc("<eval_with_key>.2":194:12) | |
#loc203 = loc("<eval_with_key>.2":195:14) | |
#loc204 = loc("<eval_with_key>.2":197:12) | |
#loc205 = loc("<eval_with_key>.2":199:13) | |
#loc206 = loc("<eval_with_key>.2":201:11) | |
#loc207 = loc("<eval_with_key>.2":202:14) | |
#loc208 = loc("<eval_with_key>.2":203:12) | |
#loc209 = loc("<eval_with_key>.2":204:22) | |
#loc210 = loc("<eval_with_key>.2":205:13) | |
#loc211 = loc("<eval_with_key>.2":208:11) | |
#loc212 = loc("<eval_with_key>.2":209:14) | |
#loc213 = loc("<eval_with_key>.2":210:12) | |
#loc214 = loc("<eval_with_key>.2":211:22) | |
#loc215 = loc("<eval_with_key>.2":212:13) | |
#loc216 = loc("<eval_with_key>.2":213:12) | |
#loc217 = loc("<eval_with_key>.2":214:13) | |
#loc218 = loc("<eval_with_key>.2":215:13) | |
#loc219 = loc("<eval_with_key>.2":216:14) | |
#loc220 = loc("<eval_with_key>.2":218:13) | |
#loc221 = loc("<eval_with_key>.2":220:13) | |
#loc222 = loc("<eval_with_key>.2":222:11) | |
#loc223 = loc("<eval_with_key>.2":223:14) | |
#loc224 = loc("<eval_with_key>.2":224:12) | |
#loc225 = loc("<eval_with_key>.2":225:22) | |
#loc226 = loc("<eval_with_key>.2":226:14) | |
#loc227 = loc("<eval_with_key>.2":227:19) | |
#loc228 = loc("<eval_with_key>.2":229:11) | |
#loc229 = loc("<eval_with_key>.2":230:14) | |
#loc230 = loc("<eval_with_key>.2":231:12) | |
#loc231 = loc("<eval_with_key>.2":232:22) | |
#loc232 = loc("<eval_with_key>.2":233:14) | |
#loc233 = loc("<eval_with_key>.2":234:19) | |
#loc234 = loc("<eval_with_key>.2":236:11) | |
#loc235 = loc("<eval_with_key>.2":237:14) | |
#loc236 = loc("<eval_with_key>.2":238:12) | |
#loc237 = loc("<eval_with_key>.2":239:22) | |
#loc238 = loc("<eval_with_key>.2":240:14) | |
#loc239 = loc("<eval_with_key>.2":241:19) | |
#loc240 = loc("<eval_with_key>.2":242:19) | |
#loc241 = loc("<eval_with_key>.2":243:15) | |
#loc242 = loc("<eval_with_key>.2":244:14) | |
#loc243 = loc("<eval_with_key>.2":245:15) | |
#loc244 = loc("<eval_with_key>.2":246:14) | |
#loc245 = loc("<eval_with_key>.2":247:12) | |
#loc246 = loc("<eval_with_key>.2":248:22) | |
#loc247 = loc("<eval_with_key>.2":249:13) | |
#loc248 = loc("<eval_with_key>.2":250:13) | |
#loc249 = loc("<eval_with_key>.2":251:12) | |
#loc250 = loc("<eval_with_key>.2":252:12) | |
#loc251 = loc("<eval_with_key>.2":253:12) | |
#loc252 = loc("<eval_with_key>.2":254:12) | |
#loc253 = loc("<eval_with_key>.2":256:16) | |
#loc254 = loc("<eval_with_key>.2":257:14) | |
#loc255 = loc("<eval_with_key>.2":258:16) | |
#loc256 = loc("<eval_with_key>.2":259:14) | |
#loc257 = loc("<eval_with_key>.2":260:12) | |
#loc258 = loc("<eval_with_key>.2":261:22) | |
#loc259 = loc("<eval_with_key>.2":262:19) | |
#loc260 = loc("<eval_with_key>.2":263:14) | |
#loc261 = loc("<eval_with_key>.2":264:14) | |
#loc262 = loc("<eval_with_key>.2":266:11) | |
#loc263 = loc("<eval_with_key>.2":267:14) | |
#loc264 = loc("<eval_with_key>.2":268:12) | |
#loc265 = loc("<eval_with_key>.2":269:22) | |
#loc266 = loc("<eval_with_key>.2":270:13) | |
#loc267 = loc("<eval_with_key>.2":271:12) | |
#loc268 = loc("<eval_with_key>.2":272:13) | |
#loc269 = loc("<eval_with_key>.2":273:13) | |
#loc270 = loc("<eval_with_key>.2":274:14) | |
#loc271 = loc("<eval_with_key>.2":276:13) | |
#loc272 = loc("<eval_with_key>.2":278:13) | |
#loc273 = loc("<eval_with_key>.2":280:11) | |
#loc274 = loc("<eval_with_key>.2":281:14) | |
#loc275 = loc("<eval_with_key>.2":282:12) | |
#loc276 = loc("<eval_with_key>.2":283:22) | |
#loc277 = loc("<eval_with_key>.2":284:13) | |
#loc278 = loc("<eval_with_key>.2":287:11) | |
#loc279 = loc("<eval_with_key>.2":288:14) | |
#loc280 = loc("<eval_with_key>.2":289:12) | |
#loc281 = loc("<eval_with_key>.2":290:22) | |
#loc282 = loc("<eval_with_key>.2":291:13) | |
#loc283 = loc("<eval_with_key>.2":292:12) | |
#loc284 = loc("<eval_with_key>.2":293:13) | |
#loc285 = loc("<eval_with_key>.2":294:13) | |
#loc286 = loc("<eval_with_key>.2":295:14) | |
#loc287 = loc("<eval_with_key>.2":297:13) | |
#loc288 = loc("<eval_with_key>.2":299:13) | |
#loc289 = loc("<eval_with_key>.2":301:11) | |
#loc290 = loc("<eval_with_key>.2":302:14) | |
#loc291 = loc("<eval_with_key>.2":303:12) | |
#loc292 = loc("<eval_with_key>.2":304:22) | |
#loc293 = loc("<eval_with_key>.2":305:14) | |
#loc294 = loc("<eval_with_key>.2":306:19) | |
#loc295 = loc("<eval_with_key>.2":308:11) | |
#loc296 = loc("<eval_with_key>.2":309:14) | |
#loc297 = loc("<eval_with_key>.2":310:12) | |
#loc298 = loc("<eval_with_key>.2":311:22) | |
#loc299 = loc("<eval_with_key>.2":312:14) | |
#loc300 = loc("<eval_with_key>.2":313:19) | |
#loc301 = loc("<eval_with_key>.2":315:11) | |
#loc302 = loc("<eval_with_key>.2":316:14) | |
#loc303 = loc("<eval_with_key>.2":317:12) | |
#loc304 = loc("<eval_with_key>.2":318:22) | |
#loc305 = loc("<eval_with_key>.2":319:14) | |
#loc306 = loc("<eval_with_key>.2":320:19) | |
#loc307 = loc("<eval_with_key>.2":321:19) | |
#loc308 = loc("<eval_with_key>.2":322:16) | |
#loc309 = loc("<eval_with_key>.2":323:14) | |
#loc310 = loc("<eval_with_key>.2":324:16) | |
#loc311 = loc("<eval_with_key>.2":325:14) | |
#loc312 = loc("<eval_with_key>.2":326:12) | |
#loc313 = loc("<eval_with_key>.2":327:22) | |
#loc314 = loc("<eval_with_key>.2":328:13) | |
#loc315 = loc("<eval_with_key>.2":329:13) | |
#loc316 = loc("<eval_with_key>.2":330:12) | |
#loc317 = loc("<eval_with_key>.2":331:12) | |
#loc318 = loc("<eval_with_key>.2":332:12) | |
#loc319 = loc("<eval_with_key>.2":333:12) | |
#loc320 = loc("<eval_with_key>.2":335:16) | |
#loc321 = loc("<eval_with_key>.2":336:14) | |
#loc322 = loc("<eval_with_key>.2":337:16) | |
#loc323 = loc("<eval_with_key>.2":338:14) | |
#loc324 = loc("<eval_with_key>.2":339:12) | |
#loc325 = loc("<eval_with_key>.2":340:22) | |
#loc326 = loc("<eval_with_key>.2":341:19) | |
#loc327 = loc("<eval_with_key>.2":342:14) | |
#loc328 = loc("<eval_with_key>.2":343:14) | |
#loc329 = loc("<eval_with_key>.2":345:11) | |
#loc330 = loc("<eval_with_key>.2":346:14) | |
#loc331 = loc("<eval_with_key>.2":347:12) | |
#loc332 = loc("<eval_with_key>.2":348:22) | |
#loc333 = loc("<eval_with_key>.2":349:13) | |
#loc334 = loc("<eval_with_key>.2":350:12) | |
#loc335 = loc("<eval_with_key>.2":351:13) | |
#loc336 = loc("<eval_with_key>.2":352:13) | |
#loc337 = loc("<eval_with_key>.2":353:14) | |
#loc338 = loc("<eval_with_key>.2":355:13) | |
#loc339 = loc("<eval_with_key>.2":357:13) | |
#loc340 = loc("<eval_with_key>.2":359:11) | |
#loc341 = loc("<eval_with_key>.2":360:14) | |
#loc342 = loc("<eval_with_key>.2":361:12) | |
#loc343 = loc("<eval_with_key>.2":362:22) | |
#loc344 = loc("<eval_with_key>.2":363:13) | |
#loc345 = loc("<eval_with_key>.2":366:11) | |
#loc346 = loc("<eval_with_key>.2":367:14) | |
#loc347 = loc("<eval_with_key>.2":368:12) | |
#loc348 = loc("<eval_with_key>.2":369:22) | |
#loc349 = loc("<eval_with_key>.2":370:13) | |
#loc350 = loc("<eval_with_key>.2":371:12) | |
#loc351 = loc("<eval_with_key>.2":372:13) | |
#loc352 = loc("<eval_with_key>.2":373:13) | |
#loc353 = loc("<eval_with_key>.2":374:14) | |
#loc354 = loc("<eval_with_key>.2":376:13) | |
#loc355 = loc("<eval_with_key>.2":378:13) | |
#loc356 = loc("<eval_with_key>.2":380:11) | |
#loc357 = loc("<eval_with_key>.2":381:14) | |
#loc358 = loc("<eval_with_key>.2":382:12) | |
#loc359 = loc("<eval_with_key>.2":383:22) | |
#loc360 = loc("<eval_with_key>.2":384:14) | |
#loc361 = loc("<eval_with_key>.2":385:19) | |
#loc362 = loc("<eval_with_key>.2":387:11) | |
#loc363 = loc("<eval_with_key>.2":388:14) | |
#loc364 = loc("<eval_with_key>.2":389:12) | |
#loc365 = loc("<eval_with_key>.2":390:22) | |
#loc366 = loc("<eval_with_key>.2":391:14) | |
#loc367 = loc("<eval_with_key>.2":392:19) | |
#loc368 = loc("<eval_with_key>.2":394:11) | |
#loc369 = loc("<eval_with_key>.2":395:14) | |
#loc370 = loc("<eval_with_key>.2":396:12) | |
#loc371 = loc("<eval_with_key>.2":397:22) | |
#loc372 = loc("<eval_with_key>.2":398:14) | |
#loc373 = loc("<eval_with_key>.2":399:19) | |
#loc374 = loc("<eval_with_key>.2":400:19) | |
#loc375 = loc("<eval_with_key>.2":401:16) | |
#loc376 = loc("<eval_with_key>.2":402:14) | |
#loc377 = loc("<eval_with_key>.2":403:16) | |
#loc378 = loc("<eval_with_key>.2":404:14) | |
#loc379 = loc("<eval_with_key>.2":405:12) | |
#loc380 = loc("<eval_with_key>.2":406:22) | |
#loc381 = loc("<eval_with_key>.2":407:13) | |
#loc382 = loc("<eval_with_key>.2":408:13) | |
#loc383 = loc("<eval_with_key>.2":409:12) | |
#loc384 = loc("<eval_with_key>.2":410:12) | |
#loc385 = loc("<eval_with_key>.2":411:12) | |
#loc386 = loc("<eval_with_key>.2":412:12) | |
#loc387 = loc("<eval_with_key>.2":414:16) | |
#loc388 = loc("<eval_with_key>.2":415:14) | |
#loc389 = loc("<eval_with_key>.2":416:16) | |
#loc390 = loc("<eval_with_key>.2":417:14) | |
#loc391 = loc("<eval_with_key>.2":418:12) | |
#loc392 = loc("<eval_with_key>.2":419:22) | |
#loc393 = loc("<eval_with_key>.2":420:19) | |
#loc394 = loc("<eval_with_key>.2":421:14) | |
#loc395 = loc("<eval_with_key>.2":422:14) | |
#loc396 = loc("<eval_with_key>.2":424:11) | |
#loc397 = loc("<eval_with_key>.2":425:14) | |
#loc398 = loc("<eval_with_key>.2":426:12) | |
#loc399 = loc("<eval_with_key>.2":427:22) | |
#loc400 = loc("<eval_with_key>.2":428:13) | |
#loc401 = loc("<eval_with_key>.2":429:13) | |
#loc402 = loc("<eval_with_key>.2":430:13) | |
#loc403 = loc("<eval_with_key>.2":431:13) | |
#loc404 = loc("<eval_with_key>.2":432:14) | |
#loc405 = loc("<eval_with_key>.2":434:13) | |
#loc406 = loc("<eval_with_key>.2":436:13) | |
#loc407 = loc("<eval_with_key>.2":438:11) | |
#loc408 = loc("<eval_with_key>.2":439:14) | |
#loc409 = loc("<eval_with_key>.2":440:12) | |
#loc410 = loc("<eval_with_key>.2":441:22) | |
#loc411 = loc("<eval_with_key>.2":442:13) | |
#loc412 = loc("<eval_with_key>.2":445:11) | |
#loc413 = loc("<eval_with_key>.2":446:14) | |
#loc414 = loc("<eval_with_key>.2":447:12) | |
#loc415 = loc("<eval_with_key>.2":448:22) | |
#loc416 = loc("<eval_with_key>.2":449:13) | |
#loc417 = loc("<eval_with_key>.2":450:13) | |
#loc418 = loc("<eval_with_key>.2":451:14) | |
#loc419 = loc("<eval_with_key>.2":452:13) | |
#loc420 = loc("<eval_with_key>.2":453:15) | |
#loc421 = loc("<eval_with_key>.2":455:13) | |
#loc422 = loc("<eval_with_key>.2":457:13) | |
#loc423 = loc("<eval_with_key>.2":459:11) | |
#loc424 = loc("<eval_with_key>.2":460:14) | |
#loc425 = loc("<eval_with_key>.2":461:12) | |
#loc426 = loc("<eval_with_key>.2":462:22) | |
#loc427 = loc("<eval_with_key>.2":463:14) | |
#loc428 = loc("<eval_with_key>.2":464:19) | |
#loc429 = loc("<eval_with_key>.2":466:11) | |
#loc430 = loc("<eval_with_key>.2":467:14) | |
#loc431 = loc("<eval_with_key>.2":468:12) | |
#loc432 = loc("<eval_with_key>.2":469:22) | |
#loc433 = loc("<eval_with_key>.2":470:14) | |
#loc434 = loc("<eval_with_key>.2":471:19) | |
#loc435 = loc("<eval_with_key>.2":473:11) | |
#loc436 = loc("<eval_with_key>.2":474:14) | |
#loc437 = loc("<eval_with_key>.2":475:12) | |
#loc438 = loc("<eval_with_key>.2":476:22) | |
#loc439 = loc("<eval_with_key>.2":477:14) | |
#loc440 = loc("<eval_with_key>.2":478:19) | |
#loc441 = loc("<eval_with_key>.2":479:19) | |
#loc442 = loc("<eval_with_key>.2":480:16) | |
#loc443 = loc("<eval_with_key>.2":481:14) | |
#loc444 = loc("<eval_with_key>.2":482:16) | |
#loc445 = loc("<eval_with_key>.2":483:14) | |
#loc446 = loc("<eval_with_key>.2":484:13) | |
#loc447 = loc("<eval_with_key>.2":485:22) | |
#loc448 = loc("<eval_with_key>.2":486:13) | |
#loc449 = loc("<eval_with_key>.2":487:13) | |
#loc450 = loc("<eval_with_key>.2":488:12) | |
#loc451 = loc("<eval_with_key>.2":489:12) | |
#loc452 = loc("<eval_with_key>.2":490:12) | |
#loc453 = loc("<eval_with_key>.2":491:12) | |
#loc454 = loc("<eval_with_key>.2":493:16) | |
#loc455 = loc("<eval_with_key>.2":494:14) | |
#loc456 = loc("<eval_with_key>.2":495:16) | |
#loc457 = loc("<eval_with_key>.2":496:14) | |
#loc458 = loc("<eval_with_key>.2":497:13) | |
#loc459 = loc("<eval_with_key>.2":498:22) | |
#loc460 = loc("<eval_with_key>.2":499:19) | |
#loc461 = loc("<eval_with_key>.2":500:14) | |
#loc462 = loc("<eval_with_key>.2":501:14) | |
#loc463 = loc("<eval_with_key>.2":503:11) | |
#loc464 = loc("<eval_with_key>.2":504:14) | |
#loc465 = loc("<eval_with_key>.2":505:12) | |
#loc466 = loc("<eval_with_key>.2":506:22) | |
#loc467 = loc("<eval_with_key>.2":507:13) | |
#loc468 = loc("<eval_with_key>.2":508:13) | |
#loc469 = loc("<eval_with_key>.2":509:14) | |
#loc470 = loc("<eval_with_key>.2":510:13) | |
#loc471 = loc("<eval_with_key>.2":511:15) | |
#loc472 = loc("<eval_with_key>.2":513:13) | |
#loc473 = loc("<eval_with_key>.2":515:13) | |
#loc474 = loc("<eval_with_key>.2":517:11) | |
#loc475 = loc("<eval_with_key>.2":518:14) | |
#loc476 = loc("<eval_with_key>.2":519:12) | |
#loc477 = loc("<eval_with_key>.2":520:22) | |
#loc478 = loc("<eval_with_key>.2":521:13) | |
#loc479 = loc("<eval_with_key>.2":524:11) | |
#loc480 = loc("<eval_with_key>.2":525:14) | |
#loc481 = loc("<eval_with_key>.2":526:12) | |
#loc482 = loc("<eval_with_key>.2":527:22) | |
#loc483 = loc("<eval_with_key>.2":528:13) | |
#loc484 = loc("<eval_with_key>.2":529:13) | |
#loc485 = loc("<eval_with_key>.2":530:14) | |
#loc486 = loc("<eval_with_key>.2":531:13) | |
#loc487 = loc("<eval_with_key>.2":532:15) | |
#loc488 = loc("<eval_with_key>.2":534:13) | |
#loc489 = loc("<eval_with_key>.2":536:13) | |
#loc490 = loc("<eval_with_key>.2":537:14) | |
#loc491 = loc("<eval_with_key>.2":539:18) | |
#loc492 = loc("<eval_with_key>.2":540:13) | |
#loc493 = loc("<eval_with_key>.2":541:13) | |
#loc494 = loc("<eval_with_key>.2":542:15) | |
#loc495 = loc("<eval_with_key>.2":543:18) | |
#loc496 = loc("<eval_with_key>.2":544:18) | |
#loc497 = loc("<eval_with_key>.2":546:13) | |
#loc498 = loc("<eval_with_key>.2":547:18) | |
#loc499 = loc("<eval_with_key>.2":549:18) | |
#loc500 = loc("<eval_with_key>.2":550:9) | |
#loc501 = loc("<eval_with_key>.2":551:29) | |
#loc502 = loc("<eval_with_key>.2":553:18) | |
#loc503 = loc("<eval_with_key>.2":557:19) | |
#loc504 = loc("<eval_with_key>.2":558:19) | |
#loc505 = loc("<eval_with_key>.2":560:13) | |
#loc506 = loc("<eval_with_key>.2":561:13) | |
#loc507 = loc("<eval_with_key>.2":562:13) | |
#loc508 = loc("<eval_with_key>.2":564:19) | |
#loc509 = loc("<eval_with_key>.2":565:19) | |
#loc510 = loc("<eval_with_key>.2":567:29) | |
#loc511 = loc("<eval_with_key>.2":568:13) | |
#loc512 = loc("<eval_with_key>.2":569:13) | |
#loc513 = loc("<eval_with_key>.2":570:13) | |
#loc514 = loc("<eval_with_key>.2":571:14) | |
#loc515 = loc("<eval_with_key>.2":572:13) | |
#loc516 = loc("<eval_with_key>.2":573:15) | |
#loc517 = loc("<eval_with_key>.2":575:13) | |
#loc518 = loc("<eval_with_key>.2":577:13) | |
#loc519 = loc("<eval_with_key>.2":579:11) | |
#loc520 = loc("<eval_with_key>.2":580:14) | |
#loc521 = loc("<eval_with_key>.2":581:12) | |
#loc522 = loc("<eval_with_key>.2":648:14) | |
#loc523 = loc("<eval_with_key>.2":582:22) | |
#loc524 = loc("<eval_with_key>.2":583:14) | |
#loc525 = loc("<eval_with_key>.2":584:19) | |
#loc526 = loc("<eval_with_key>.2":586:11) | |
#loc527 = loc("<eval_with_key>.2":587:14) | |
#loc528 = loc("<eval_with_key>.2":588:12) | |
#loc529 = loc("<eval_with_key>.2":589:22) | |
#loc530 = loc("<eval_with_key>.2":590:14) | |
#loc531 = loc("<eval_with_key>.2":591:19) | |
#loc532 = loc("<eval_with_key>.2":593:11) | |
#loc533 = loc("<eval_with_key>.2":594:14) | |
#loc534 = loc("<eval_with_key>.2":595:12) | |
#loc535 = loc("<eval_with_key>.2":596:22) | |
#loc536 = loc("<eval_with_key>.2":597:14) | |
#loc537 = loc("<eval_with_key>.2":598:19) | |
#loc538 = loc("<eval_with_key>.2":599:19) | |
#loc539 = loc("<eval_with_key>.2":600:16) | |
#loc540 = loc("<eval_with_key>.2":601:14) | |
#loc541 = loc("<eval_with_key>.2":602:16) | |
#loc542 = loc("<eval_with_key>.2":603:14) | |
#loc543 = loc("<eval_with_key>.2":604:13) | |
#loc544 = loc("<eval_with_key>.2":631:19) | |
#loc545 = loc("<eval_with_key>.2":605:22) | |
#loc546 = loc("<eval_with_key>.2":606:15) | |
#loc547 = loc("<eval_with_key>.2":608:19) | |
#loc548 = loc("<eval_with_key>.2":609:15) | |
#loc549 = loc("<eval_with_key>.2":610:19) | |
#loc550 = loc("<eval_with_key>.2":612:12) | |
#loc551 = loc("<eval_with_key>.2":613:17) | |
#loc552 = loc("<eval_with_key>.2":614:16) | |
#loc553 = loc("<eval_with_key>.2":615:10) | |
#loc554 = loc("<eval_with_key>.2":616:11) | |
#loc555 = loc("<eval_with_key>.2":617:29) | |
#loc556 = loc("<eval_with_key>.2":618:12) | |
#loc557 = loc("<eval_with_key>.2":619:12) | |
#loc558 = loc("<eval_with_key>.2":620:12) | |
#loc559 = loc("<eval_with_key>.2":621:13) | |
#loc560 = loc("<eval_with_key>.2":622:29) | |
#loc561 = loc("<eval_with_key>.2":623:13) | |
#loc562 = loc("<eval_with_key>.2":624:18) | |
#loc563 = loc("<eval_with_key>.2":625:16) | |
#loc564 = loc("<eval_with_key>.2":626:14) | |
#loc565 = loc("<eval_with_key>.2":627:13) | |
#loc566 = loc("<eval_with_key>.2":629:18) | |
#loc567 = loc("<eval_with_key>.2":630:16) | |
#loc568 = loc("<eval_with_key>.2":632:13) | |
#loc569 = loc("<eval_with_key>.2":633:13) | |
#loc570 = loc("<eval_with_key>.2":634:13) | |
#loc571 = loc("<eval_with_key>.2":635:12) | |
#loc572 = loc("<eval_with_key>.2":636:12) | |
#loc573 = loc("<eval_with_key>.2":637:12) | |
#loc574 = loc("<eval_with_key>.2":638:13) | |
#loc575 = loc("<eval_with_key>.2":640:16) | |
#loc576 = loc("<eval_with_key>.2":641:14) | |
#loc577 = loc("<eval_with_key>.2":642:16) | |
#loc578 = loc("<eval_with_key>.2":643:14) | |
#loc579 = loc("<eval_with_key>.2":644:13) | |
#loc580 = loc("<eval_with_key>.2":645:22) | |
#loc581 = loc("<eval_with_key>.2":646:19) | |
#loc582 = loc("<eval_with_key>.2":647:14) | |
#loc583 = loc("<eval_with_key>.2":650:11) | |
#loc584 = loc("<eval_with_key>.2":651:14) | |
#loc585 = loc("<eval_with_key>.2":652:12) | |
#loc586 = loc("<eval_with_key>.2":653:22) | |
#loc587 = loc("<eval_with_key>.2":654:13) | |
#loc588 = loc("<eval_with_key>.2":655:13) | |
#loc589 = loc("<eval_with_key>.2":656:14) | |
#loc590 = loc("<eval_with_key>.2":657:13) | |
#loc591 = loc("<eval_with_key>.2":658:15) | |
#loc592 = loc("<eval_with_key>.2":660:13) | |
#loc593 = loc("<eval_with_key>.2":662:13) | |
#loc594 = loc("<eval_with_key>.2":664:11) | |
#loc595 = loc("<eval_with_key>.2":665:14) | |
#loc596 = loc("<eval_with_key>.2":666:12) | |
#loc597 = loc("<eval_with_key>.2":667:22) | |
#loc598 = loc("<eval_with_key>.2":668:14) | |
#loc599 = loc("<eval_with_key>.2":669:19) | |
#loc600 = loc("<eval_with_key>.2":671:11) | |
#loc601 = loc("<eval_with_key>.2":672:15) | |
#loc602 = loc("<eval_with_key>.2":673:12) | |
#loc603 = loc("<eval_with_key>.2":674:22) | |
#loc604 = loc("<eval_with_key>.2":675:15) | |
#loc605 = loc("<eval_with_key>.2":676:19) | |
#loc606 = loc("<eval_with_key>.2":678:11) | |
#loc607 = loc("<eval_with_key>.2":679:15) | |
#loc608 = loc("<eval_with_key>.2":680:12) | |
#loc609 = loc("<eval_with_key>.2":681:22) | |
#loc610 = loc("<eval_with_key>.2":682:15) | |
#loc611 = loc("<eval_with_key>.2":683:19) | |
#loc612 = loc("<eval_with_key>.2":684:19) | |
#loc613 = loc("<eval_with_key>.2":685:16) | |
#loc614 = loc("<eval_with_key>.2":686:15) | |
#loc615 = loc("<eval_with_key>.2":687:16) | |
#loc616 = loc("<eval_with_key>.2":688:15) | |
#loc617 = loc("<eval_with_key>.2":689:13) | |
#loc618 = loc("-":1322:13) | |
#loc619 = loc("-":6887:10) | |
#loc620 = loc("<eval_with_key>.2":692:13) | |
#loc621 = loc("<eval_with_key>.2":690:22) | |
#loc622 = loc("<eval_with_key>.2":691:12) | |
#loc623 = loc("<eval_with_key>.2":693:13) | |
#loc624 = loc("<eval_with_key>.2":694:13) | |
#loc625 = loc("<eval_with_key>.2":695:12) | |
#loc626 = loc("<eval_with_key>.2":696:12) | |
#loc627 = loc("<eval_with_key>.2":697:12) | |
#loc628 = loc("<eval_with_key>.2":698:13) | |
#loc629 = loc("<eval_with_key>.2":700:16) | |
#loc630 = loc("<eval_with_key>.2":701:15) | |
#loc631 = loc("<eval_with_key>.2":702:16) | |
#loc632 = loc("<eval_with_key>.2":703:15) | |
#loc633 = loc("<eval_with_key>.2":704:13) | |
#loc634 = loc("<eval_with_key>.2":705:22) | |
#loc635 = loc("<eval_with_key>.2":706:19) | |
#loc636 = loc("<eval_with_key>.2":707:14) | |
#loc637 = loc("<eval_with_key>.2":708:15) | |
#loc638 = loc("<eval_with_key>.2":710:11) | |
#loc639 = loc("<eval_with_key>.2":711:15) | |
#loc640 = loc("<eval_with_key>.2":712:12) | |
#loc641 = loc("<eval_with_key>.2":713:22) | |
#loc642 = loc("<eval_with_key>.2":714:13) | |
#loc643 = loc("<eval_with_key>.2":715:13) | |
#loc644 = loc("<eval_with_key>.2":716:14) | |
#loc645 = loc("<eval_with_key>.2":717:13) | |
#loc646 = loc("<eval_with_key>.2":718:15) | |
#loc647 = loc("<eval_with_key>.2":720:13) | |
#loc648 = loc("<eval_with_key>.2":722:13) | |
#loc649 = loc("<eval_with_key>.2":724:11) | |
#loc650 = loc("<eval_with_key>.2":725:15) | |
#loc651 = loc("<eval_with_key>.2":726:12) | |
#loc652 = loc("<eval_with_key>.2":728:13) | |
#loc653 = loc("<eval_with_key>.2":727:22) | |
#loc654 = loc("<eval_with_key>.2":731:11) | |
#loc655 = loc("<eval_with_key>.2":732:15) | |
#loc656 = loc("<eval_with_key>.2":733:12) | |
#loc657 = loc("<eval_with_key>.2":734:22) | |
#loc658 = loc("<eval_with_key>.2":735:13) | |
#loc659 = loc("<eval_with_key>.2":736:13) | |
#loc660 = loc("<eval_with_key>.2":737:14) | |
#loc661 = loc("<eval_with_key>.2":738:13) | |
#loc662 = loc("<eval_with_key>.2":739:15) | |
#loc663 = loc("<eval_with_key>.2":741:13) | |
#loc664 = loc("<eval_with_key>.2":743:13) | |
#loc665 = loc("<eval_with_key>.2":745:11) | |
#loc666 = loc("<eval_with_key>.2":746:15) | |
#loc667 = loc("<eval_with_key>.2":747:12) | |
#loc668 = loc("<eval_with_key>.2":748:22) | |
#loc669 = loc("<eval_with_key>.2":749:15) | |
#loc670 = loc("<eval_with_key>.2":750:19) | |
#loc671 = loc("<eval_with_key>.2":752:11) | |
#loc672 = loc("<eval_with_key>.2":753:15) | |
#loc673 = loc("<eval_with_key>.2":754:12) | |
#loc674 = loc("<eval_with_key>.2":755:22) | |
#loc675 = loc("<eval_with_key>.2":756:15) | |
#loc676 = loc("<eval_with_key>.2":757:19) | |
#loc677 = loc("<eval_with_key>.2":759:11) | |
#loc678 = loc("<eval_with_key>.2":760:15) | |
#loc679 = loc("<eval_with_key>.2":761:12) | |
#loc680 = loc("<eval_with_key>.2":762:22) | |
#loc681 = loc("<eval_with_key>.2":763:15) | |
#loc682 = loc("<eval_with_key>.2":764:19) | |
#loc683 = loc("<eval_with_key>.2":765:19) | |
#loc684 = loc("<eval_with_key>.2":766:16) | |
#loc685 = loc("<eval_with_key>.2":767:15) | |
#loc686 = loc("<eval_with_key>.2":768:16) | |
#loc687 = loc("<eval_with_key>.2":769:15) | |
#loc688 = loc("<eval_with_key>.2":770:13) | |
#loc689 = loc("<eval_with_key>.2":771:22) | |
#loc690 = loc("<eval_with_key>.2":772:13) | |
#loc691 = loc("<eval_with_key>.2":773:13) | |
#loc692 = loc("<eval_with_key>.2":774:13) | |
#loc693 = loc("<eval_with_key>.2":775:12) | |
#loc694 = loc("<eval_with_key>.2":776:12) | |
#loc695 = loc("<eval_with_key>.2":777:13) | |
#loc696 = loc("<eval_with_key>.2":779:16) | |
#loc697 = loc("<eval_with_key>.2":780:15) | |
#loc698 = loc("<eval_with_key>.2":781:16) | |
#loc699 = loc("<eval_with_key>.2":782:15) | |
#loc700 = loc("<eval_with_key>.2":783:13) | |
#loc701 = loc("<eval_with_key>.2":784:22) | |
#loc702 = loc("<eval_with_key>.2":785:19) | |
#loc703 = loc("<eval_with_key>.2":786:14) | |
#loc704 = loc("<eval_with_key>.2":787:15) | |
#loc705 = loc("<eval_with_key>.2":789:11) | |
#loc706 = loc("<eval_with_key>.2":790:15) | |
#loc707 = loc("<eval_with_key>.2":791:12) | |
#loc708 = loc("<eval_with_key>.2":792:22) | |
#loc709 = loc("<eval_with_key>.2":793:13) | |
#loc710 = loc("<eval_with_key>.2":794:13) | |
#loc711 = loc("<eval_with_key>.2":795:14) | |
#loc712 = loc("<eval_with_key>.2":796:13) | |
#loc713 = loc("<eval_with_key>.2":797:15) | |
#loc714 = loc("<eval_with_key>.2":799:13) | |
#loc715 = loc("<eval_with_key>.2":801:13) | |
#loc716 = loc("<eval_with_key>.2":803:11) | |
#loc717 = loc("<eval_with_key>.2":804:15) | |
#loc718 = loc("<eval_with_key>.2":805:12) | |
#loc719 = loc("<eval_with_key>.2":806:22) | |
#loc720 = loc("<eval_with_key>.2":807:15) | |
#loc721 = loc("<eval_with_key>.2":808:19) | |
#loc722 = loc("<eval_with_key>.2":810:11) | |
#loc723 = loc("<eval_with_key>.2":811:15) | |
#loc724 = loc("<eval_with_key>.2":812:12) | |
#loc725 = loc("<eval_with_key>.2":813:22) | |
#loc726 = loc("<eval_with_key>.2":814:15) | |
#loc727 = loc("<eval_with_key>.2":815:19) | |
#loc728 = loc("<eval_with_key>.2":817:11) | |
#loc729 = loc("<eval_with_key>.2":818:15) | |
#loc730 = loc("<eval_with_key>.2":819:12) | |
#loc731 = loc("<eval_with_key>.2":820:22) | |
#loc732 = loc("<eval_with_key>.2":821:15) | |
#loc733 = loc("<eval_with_key>.2":822:19) | |
#loc734 = loc("<eval_with_key>.2":823:19) | |
#loc735 = loc("<eval_with_key>.2":824:16) | |
#loc736 = loc("<eval_with_key>.2":825:15) | |
#loc737 = loc("<eval_with_key>.2":826:16) | |
#loc738 = loc("<eval_with_key>.2":827:15) | |
#loc739 = loc("<eval_with_key>.2":828:13) | |
#loc740 = loc("<eval_with_key>.2":829:22) | |
#loc741 = loc("<eval_with_key>.2":830:14) | |
#loc742 = loc("<eval_with_key>.2":831:13) | |
#loc743 = loc("<eval_with_key>.2":832:13) | |
#loc744 = loc("<eval_with_key>.2":833:12) | |
#loc745 = loc("<eval_with_key>.2":834:13) | |
#loc746 = loc("<eval_with_key>.2":835:13) | |
#loc747 = loc("<eval_with_key>.2":837:16) | |
#loc748 = loc("<eval_with_key>.2":838:15) | |
#loc749 = loc("<eval_with_key>.2":839:16) | |
#loc750 = loc("<eval_with_key>.2":840:15) | |
#loc751 = loc("<eval_with_key>.2":841:13) | |
#loc752 = loc("<eval_with_key>.2":842:22) | |
#loc753 = loc("<eval_with_key>.2":843:19) | |
#loc754 = loc("<eval_with_key>.2":844:15) | |
#loc755 = loc("<eval_with_key>.2":845:15) | |
#loc756 = loc("<eval_with_key>.2":847:11) | |
#loc757 = loc("<eval_with_key>.2":848:15) | |
#loc758 = loc("<eval_with_key>.2":849:12) | |
#loc759 = loc("<eval_with_key>.2":850:22) | |
#loc760 = loc("<eval_with_key>.2":851:13) | |
#loc761 = loc("<eval_with_key>.2":852:13) | |
#loc762 = loc("<eval_with_key>.2":853:14) | |
#loc763 = loc("<eval_with_key>.2":854:13) | |
#loc764 = loc("<eval_with_key>.2":855:15) | |
#loc765 = loc("<eval_with_key>.2":857:13) | |
#loc766 = loc("<eval_with_key>.2":859:13) | |
#loc767 = loc("<eval_with_key>.2":861:11) | |
#loc768 = loc("<eval_with_key>.2":862:15) | |
#loc769 = loc("<eval_with_key>.2":863:12) | |
#loc770 = loc("<eval_with_key>.2":864:22) | |
#loc771 = loc("<eval_with_key>.2":865:13) | |
#loc772 = loc("<eval_with_key>.2":868:11) | |
#loc773 = loc("<eval_with_key>.2":869:15) | |
#loc774 = loc("<eval_with_key>.2":870:12) | |
#loc775 = loc("<eval_with_key>.2":871:22) | |
#loc776 = loc("<eval_with_key>.2":872:13) | |
#loc777 = loc("<eval_with_key>.2":873:13) | |
#loc778 = loc("<eval_with_key>.2":874:14) | |
#loc779 = loc("<eval_with_key>.2":875:13) | |
#loc780 = loc("<eval_with_key>.2":876:15) | |
#loc781 = loc("<eval_with_key>.2":878:13) | |
#loc782 = loc("<eval_with_key>.2":880:13) | |
#loc783 = loc("<eval_with_key>.2":882:11) | |
#loc784 = loc("<eval_with_key>.2":883:15) | |
#loc785 = loc("<eval_with_key>.2":884:12) | |
#loc786 = loc("<eval_with_key>.2":885:22) | |
#loc787 = loc("<eval_with_key>.2":886:15) | |
#loc788 = loc("<eval_with_key>.2":887:19) | |
#loc789 = loc("<eval_with_key>.2":889:11) | |
#loc790 = loc("<eval_with_key>.2":890:15) | |
#loc791 = loc("<eval_with_key>.2":891:12) | |
#loc792 = loc("<eval_with_key>.2":892:22) | |
#loc793 = loc("<eval_with_key>.2":893:15) | |
#loc794 = loc("<eval_with_key>.2":894:19) | |
#loc795 = loc("<eval_with_key>.2":896:11) | |
#loc796 = loc("<eval_with_key>.2":897:15) | |
#loc797 = loc("<eval_with_key>.2":898:12) | |
#loc798 = loc("<eval_with_key>.2":899:22) | |
#loc799 = loc("<eval_with_key>.2":900:15) | |
#loc800 = loc("<eval_with_key>.2":901:19) | |
#loc801 = loc("<eval_with_key>.2":902:19) | |
#loc802 = loc("<eval_with_key>.2":903:16) | |
#loc803 = loc("<eval_with_key>.2":904:15) | |
#loc804 = loc("<eval_with_key>.2":905:16) | |
#loc805 = loc("<eval_with_key>.2":906:15) | |
#loc806 = loc("<eval_with_key>.2":907:13) | |
#loc807 = loc("<eval_with_key>.2":908:22) | |
#loc808 = loc("<eval_with_key>.2":909:14) | |
#loc809 = loc("<eval_with_key>.2":910:14) | |
#loc810 = loc("<eval_with_key>.2":911:13) | |
#loc811 = loc("<eval_with_key>.2":912:13) | |
#loc812 = loc("<eval_with_key>.2":913:13) | |
#loc813 = loc("<eval_with_key>.2":914:13) | |
#loc814 = loc("<eval_with_key>.2":916:16) | |
#loc815 = loc("<eval_with_key>.2":917:15) | |
#loc816 = loc("<eval_with_key>.2":918:16) | |
#loc817 = loc("<eval_with_key>.2":919:15) | |
#loc818 = loc("<eval_with_key>.2":920:13) | |
#loc819 = loc("<eval_with_key>.2":921:22) | |
#loc820 = loc("<eval_with_key>.2":922:19) | |
#loc821 = loc("<eval_with_key>.2":923:15) | |
#loc822 = loc("<eval_with_key>.2":924:15) | |
#loc823 = loc("<eval_with_key>.2":926:11) | |
#loc824 = loc("<eval_with_key>.2":927:15) | |
#loc825 = loc("<eval_with_key>.2":928:12) | |
#loc826 = loc("<eval_with_key>.2":929:22) | |
#loc827 = loc("<eval_with_key>.2":930:13) | |
#loc828 = loc("<eval_with_key>.2":931:13) | |
#loc829 = loc("<eval_with_key>.2":932:14) | |
#loc830 = loc("<eval_with_key>.2":933:13) | |
#loc831 = loc("<eval_with_key>.2":934:15) | |
#loc832 = loc("<eval_with_key>.2":936:13) | |
#loc833 = loc("<eval_with_key>.2":938:13) | |
#loc834 = loc("<eval_with_key>.2":940:11) | |
#loc835 = loc("<eval_with_key>.2":941:15) | |
#loc836 = loc("<eval_with_key>.2":942:12) | |
#loc837 = loc("<eval_with_key>.2":943:22) | |
#loc838 = loc("<eval_with_key>.2":944:15) | |
#loc839 = loc("<eval_with_key>.2":945:19) | |
#loc840 = loc("<eval_with_key>.2":947:11) | |
#loc841 = loc("<eval_with_key>.2":948:15) | |
#loc842 = loc("<eval_with_key>.2":949:12) | |
#loc843 = loc("<eval_with_key>.2":950:22) | |
#loc844 = loc("<eval_with_key>.2":951:15) | |
#loc845 = loc("<eval_with_key>.2":952:19) | |
#loc846 = loc("<eval_with_key>.2":954:11) | |
#loc847 = loc("<eval_with_key>.2":955:15) | |
#loc848 = loc("<eval_with_key>.2":956:12) | |
#loc849 = loc("<eval_with_key>.2":957:22) | |
#loc850 = loc("<eval_with_key>.2":958:15) | |
#loc851 = loc("<eval_with_key>.2":959:19) | |
#loc852 = loc("<eval_with_key>.2":960:19) | |
#loc853 = loc("<eval_with_key>.2":961:16) | |
#loc854 = loc("<eval_with_key>.2":962:15) | |
#loc855 = loc("<eval_with_key>.2":963:16) | |
#loc856 = loc("<eval_with_key>.2":964:15) | |
#loc857 = loc("<eval_with_key>.2":965:13) | |
#loc858 = loc("<eval_with_key>.2":966:22) | |
#loc859 = loc("<eval_with_key>.2":967:14) | |
#loc860 = loc("<eval_with_key>.2":968:14) | |
#loc861 = loc("<eval_with_key>.2":969:13) | |
#loc862 = loc("<eval_with_key>.2":970:13) | |
#loc863 = loc("<eval_with_key>.2":971:13) | |
#loc864 = loc("<eval_with_key>.2":972:13) | |
#loc865 = loc("<eval_with_key>.2":974:16) | |
#loc866 = loc("<eval_with_key>.2":975:15) | |
#loc867 = loc("<eval_with_key>.2":976:16) | |
#loc868 = loc("<eval_with_key>.2":977:15) | |
#loc869 = loc("<eval_with_key>.2":978:13) | |
#loc870 = loc("<eval_with_key>.2":979:22) | |
#loc871 = loc("<eval_with_key>.2":980:19) | |
#loc872 = loc("<eval_with_key>.2":981:15) | |
#loc873 = loc("<eval_with_key>.2":982:15) | |
#loc874 = loc("<eval_with_key>.2":984:11) | |
#loc875 = loc("<eval_with_key>.2":985:15) | |
#loc876 = loc("<eval_with_key>.2":986:12) | |
#loc877 = loc("<eval_with_key>.2":987:22) | |
#loc878 = loc("<eval_with_key>.2":988:13) | |
#loc879 = loc("<eval_with_key>.2":989:13) | |
#loc880 = loc("<eval_with_key>.2":990:14) | |
#loc881 = loc("<eval_with_key>.2":991:13) | |
#loc882 = loc("<eval_with_key>.2":992:15) | |
#loc883 = loc("<eval_with_key>.2":994:13) | |
#loc884 = loc("<eval_with_key>.2":996:13) | |
#loc885 = loc("<eval_with_key>.2":998:11) | |
#loc886 = loc("<eval_with_key>.2":999:15) | |
#loc887 = loc("<eval_with_key>.2":1000:12) | |
#loc888 = loc("<eval_with_key>.2":1001:22) | |
#loc889 = loc("<eval_with_key>.2":1002:13) | |
#loc890 = loc("<eval_with_key>.2":1005:11) | |
#loc891 = loc("<eval_with_key>.2":1006:15) | |
#loc892 = loc("<eval_with_key>.2":1007:12) | |
#loc893 = loc("<eval_with_key>.2":1008:22) | |
#loc894 = loc("<eval_with_key>.2":1009:13) | |
#loc895 = loc("<eval_with_key>.2":1010:13) | |
#loc896 = loc("<eval_with_key>.2":1011:14) | |
#loc897 = loc("<eval_with_key>.2":1012:13) | |
#loc898 = loc("<eval_with_key>.2":1013:15) | |
#loc899 = loc("<eval_with_key>.2":1015:13) | |
#loc900 = loc("<eval_with_key>.2":1017:13) | |
#loc901 = loc("<eval_with_key>.2":1019:11) | |
#loc902 = loc("<eval_with_key>.2":1020:15) | |
#loc903 = loc("<eval_with_key>.2":1021:12) | |
#loc904 = loc("<eval_with_key>.2":1022:22) | |
#loc905 = loc("<eval_with_key>.2":1023:15) | |
#loc906 = loc("<eval_with_key>.2":1024:19) | |
#loc907 = loc("<eval_with_key>.2":1026:11) | |
#loc908 = loc("<eval_with_key>.2":1027:15) | |
#loc909 = loc("<eval_with_key>.2":1028:12) | |
#loc910 = loc("<eval_with_key>.2":1029:22) | |
#loc911 = loc("<eval_with_key>.2":1030:15) | |
#loc912 = loc("<eval_with_key>.2":1031:19) | |
#loc913 = loc("<eval_with_key>.2":1033:11) | |
#loc914 = loc("<eval_with_key>.2":1034:15) | |
#loc915 = loc("<eval_with_key>.2":1035:12) | |
#loc916 = loc("<eval_with_key>.2":1036:22) | |
#loc917 = loc("<eval_with_key>.2":1037:15) | |
#loc918 = loc("<eval_with_key>.2":1038:19) | |
#loc919 = loc("<eval_with_key>.2":1039:19) | |
#loc920 = loc("<eval_with_key>.2":1040:16) | |
#loc921 = loc("<eval_with_key>.2":1041:15) | |
#loc922 = loc("<eval_with_key>.2":1042:16) | |
#loc923 = loc("<eval_with_key>.2":1043:15) | |
#loc924 = loc("<eval_with_key>.2":1044:13) | |
#loc925 = loc("<eval_with_key>.2":1045:22) | |
#loc926 = loc("<eval_with_key>.2":1046:14) | |
#loc927 = loc("<eval_with_key>.2":1047:14) | |
#loc928 = loc("<eval_with_key>.2":1048:13) | |
#loc929 = loc("<eval_with_key>.2":1049:13) | |
#loc930 = loc("<eval_with_key>.2":1050:13) | |
#loc931 = loc("<eval_with_key>.2":1051:13) | |
#loc932 = loc("<eval_with_key>.2":1053:16) | |
#loc933 = loc("<eval_with_key>.2":1054:15) | |
#loc934 = loc("<eval_with_key>.2":1055:16) | |
#loc935 = loc("<eval_with_key>.2":1056:15) | |
#loc936 = loc("<eval_with_key>.2":1057:13) | |
#loc937 = loc("<eval_with_key>.2":1058:22) | |
#loc938 = loc("<eval_with_key>.2":1059:19) | |
#loc939 = loc("<eval_with_key>.2":1060:15) | |
#loc940 = loc("<eval_with_key>.2":1061:15) | |
#loc941 = loc("<eval_with_key>.2":1063:11) | |
#loc942 = loc("<eval_with_key>.2":1064:15) | |
#loc943 = loc("<eval_with_key>.2":1065:12) | |
#loc944 = loc("<eval_with_key>.2":1066:22) | |
#loc945 = loc("<eval_with_key>.2":1067:13) | |
#loc946 = loc("<eval_with_key>.2":1068:13) | |
#loc947 = loc("<eval_with_key>.2":1069:14) | |
#loc948 = loc("<eval_with_key>.2":1070:13) | |
#loc949 = loc("<eval_with_key>.2":1071:15) | |
#loc950 = loc("<eval_with_key>.2":1073:13) | |
#loc951 = loc("<eval_with_key>.2":1075:13) | |
#loc952 = loc("<eval_with_key>.2":1077:11) | |
#loc953 = loc("<eval_with_key>.2":1078:15) | |
#loc954 = loc("<eval_with_key>.2":1079:12) | |
#loc955 = loc("<eval_with_key>.2":1080:22) | |
#loc956 = loc("<eval_with_key>.2":1081:15) | |
#loc957 = loc("<eval_with_key>.2":1082:19) | |
#loc958 = loc("<eval_with_key>.2":1084:11) | |
#loc959 = loc("<eval_with_key>.2":1085:15) | |
#loc960 = loc("<eval_with_key>.2":1086:12) | |
#loc961 = loc("<eval_with_key>.2":1087:22) | |
#loc962 = loc("<eval_with_key>.2":1088:15) | |
#loc963 = loc("<eval_with_key>.2":1089:19) | |
#loc964 = loc("<eval_with_key>.2":1091:11) | |
#loc965 = loc("<eval_with_key>.2":1092:15) | |
#loc966 = loc("<eval_with_key>.2":1093:12) | |
#loc967 = loc("<eval_with_key>.2":1094:22) | |
#loc968 = loc("<eval_with_key>.2":1095:15) | |
#loc969 = loc("<eval_with_key>.2":1096:19) | |
#loc970 = loc("<eval_with_key>.2":1097:19) | |
#loc971 = loc("<eval_with_key>.2":1098:16) | |
#loc972 = loc("<eval_with_key>.2":1099:15) | |
#loc973 = loc("<eval_with_key>.2":1100:16) | |
#loc974 = loc("<eval_with_key>.2":1101:15) | |
#loc975 = loc("<eval_with_key>.2":1102:13) | |
#loc976 = loc("<eval_with_key>.2":1103:22) | |
#loc977 = loc("<eval_with_key>.2":1104:14) | |
#loc978 = loc("<eval_with_key>.2":1105:14) | |
#loc979 = loc("<eval_with_key>.2":1106:13) | |
#loc980 = loc("<eval_with_key>.2":1107:13) | |
#loc981 = loc("<eval_with_key>.2":1108:13) | |
#loc982 = loc("<eval_with_key>.2":1109:13) | |
#loc983 = loc("<eval_with_key>.2":1111:16) | |
#loc984 = loc("<eval_with_key>.2":1112:15) | |
#loc985 = loc("<eval_with_key>.2":1113:16) | |
#loc986 = loc("<eval_with_key>.2":1114:15) | |
#loc987 = loc("<eval_with_key>.2":1115:13) | |
#loc988 = loc("<eval_with_key>.2":1116:23) | |
#loc989 = loc("<eval_with_key>.2":1117:19) | |
#loc990 = loc("<eval_with_key>.2":1118:15) | |
#loc991 = loc("<eval_with_key>.2":1119:15) | |
#loc992 = loc("<eval_with_key>.2":1121:11) | |
#loc993 = loc("<eval_with_key>.2":1122:15) | |
#loc994 = loc("<eval_with_key>.2":1123:12) | |
#loc995 = loc("<eval_with_key>.2":1124:23) | |
#loc996 = loc("<eval_with_key>.2":1125:13) | |
#loc997 = loc("<eval_with_key>.2":1126:13) | |
#loc998 = loc("<eval_with_key>.2":1127:14) | |
#loc999 = loc("<eval_with_key>.2":1128:13) | |
#loc1000 = loc("<eval_with_key>.2":1129:15) | |
#loc1001 = loc("<eval_with_key>.2":1131:13) | |
#loc1002 = loc("<eval_with_key>.2":1133:13) | |
#loc1003 = loc("<eval_with_key>.2":1135:11) | |
#loc1004 = loc("<eval_with_key>.2":1136:15) | |
#loc1005 = loc("<eval_with_key>.2":1137:12) | |
#loc1006 = loc("<eval_with_key>.2":1138:23) | |
#loc1007 = loc("<eval_with_key>.2":1139:13) | |
#loc1008 = loc("<eval_with_key>.2":1142:11) | |
#loc1009 = loc("<eval_with_key>.2":1143:15) | |
#loc1010 = loc("<eval_with_key>.2":1144:12) | |
#loc1011 = loc("<eval_with_key>.2":1145:23) | |
#loc1012 = loc("<eval_with_key>.2":1146:13) | |
#loc1013 = loc("<eval_with_key>.2":1147:13) | |
#loc1014 = loc("<eval_with_key>.2":1148:14) | |
#loc1015 = loc("<eval_with_key>.2":1149:13) | |
#loc1016 = loc("<eval_with_key>.2":1150:15) | |
#loc1017 = loc("<eval_with_key>.2":1152:13) | |
#loc1018 = loc("<eval_with_key>.2":1154:13) | |
#loc1019 = loc("<eval_with_key>.2":1156:11) | |
#loc1020 = loc("<eval_with_key>.2":1157:15) | |
#loc1021 = loc("<eval_with_key>.2":1158:12) | |
#loc1022 = loc("<eval_with_key>.2":1159:23) | |
#loc1023 = loc("<eval_with_key>.2":1160:15) | |
#loc1024 = loc("<eval_with_key>.2":1161:19) | |
#loc1025 = loc("<eval_with_key>.2":1163:11) | |
#loc1026 = loc("<eval_with_key>.2":1164:15) | |
#loc1027 = loc("<eval_with_key>.2":1165:12) | |
#loc1028 = loc("<eval_with_key>.2":1166:23) | |
#loc1029 = loc("<eval_with_key>.2":1167:15) | |
#loc1030 = loc("<eval_with_key>.2":1168:19) | |
#loc1031 = loc("<eval_with_key>.2":1170:11) | |
#loc1032 = loc("<eval_with_key>.2":1171:15) | |
#loc1033 = loc("<eval_with_key>.2":1172:12) | |
#loc1034 = loc("<eval_with_key>.2":1173:23) | |
#loc1035 = loc("<eval_with_key>.2":1174:15) | |
#loc1036 = loc("<eval_with_key>.2":1175:19) | |
#loc1037 = loc("<eval_with_key>.2":1176:19) | |
#loc1038 = loc("<eval_with_key>.2":1177:16) | |
#loc1039 = loc("<eval_with_key>.2":1178:15) | |
#loc1040 = loc("<eval_with_key>.2":1179:16) | |
#loc1041 = loc("<eval_with_key>.2":1180:15) | |
#loc1042 = loc("<eval_with_key>.2":1181:13) | |
#loc1043 = loc("<eval_with_key>.2":1182:23) | |
#loc1044 = loc("<eval_with_key>.2":1183:14) | |
#loc1045 = loc("<eval_with_key>.2":1184:14) | |
#loc1046 = loc("<eval_with_key>.2":1185:13) | |
#loc1047 = loc("<eval_with_key>.2":1186:13) | |
#loc1048 = loc("<eval_with_key>.2":1187:13) | |
#loc1049 = loc("<eval_with_key>.2":1188:13) | |
#loc1050 = loc("<eval_with_key>.2":1190:16) | |
#loc1051 = loc("<eval_with_key>.2":1191:15) | |
#loc1052 = loc("<eval_with_key>.2":1192:16) | |
#loc1053 = loc("<eval_with_key>.2":1193:15) | |
#loc1054 = loc("<eval_with_key>.2":1194:13) | |
#loc1055 = loc("<eval_with_key>.2":1195:23) | |
#loc1056 = loc("<eval_with_key>.2":1196:19) | |
#loc1057 = loc("<eval_with_key>.2":1197:15) | |
#loc1058 = loc("<eval_with_key>.2":1198:15) | |
#loc1059 = loc("<eval_with_key>.2":1200:11) | |
#loc1060 = loc("<eval_with_key>.2":1201:15) | |
#loc1061 = loc("<eval_with_key>.2":1202:12) | |
#loc1062 = loc("<eval_with_key>.2":1203:23) | |
#loc1063 = loc("<eval_with_key>.2":1204:13) | |
#loc1064 = loc("<eval_with_key>.2":1205:13) | |
#loc1065 = loc("<eval_with_key>.2":1206:14) | |
#loc1066 = loc("<eval_with_key>.2":1207:13) | |
#loc1067 = loc("<eval_with_key>.2":1208:15) | |
#loc1068 = loc("<eval_with_key>.2":1210:13) | |
#loc1069 = loc("<eval_with_key>.2":1212:13) | |
#loc1070 = loc("<eval_with_key>.2":1214:11) | |
#loc1071 = loc("<eval_with_key>.2":1215:15) | |
#loc1072 = loc("<eval_with_key>.2":1216:12) | |
#loc1073 = loc("<eval_with_key>.2":1217:23) | |
#loc1074 = loc("<eval_with_key>.2":1218:15) | |
#loc1075 = loc("<eval_with_key>.2":1219:19) | |
#loc1076 = loc("<eval_with_key>.2":1221:11) | |
#loc1077 = loc("<eval_with_key>.2":1222:15) | |
#loc1078 = loc("<eval_with_key>.2":1223:12) | |
#loc1079 = loc("<eval_with_key>.2":1224:23) | |
#loc1080 = loc("<eval_with_key>.2":1225:15) | |
#loc1081 = loc("<eval_with_key>.2":1226:19) | |
#loc1082 = loc("<eval_with_key>.2":1228:11) | |
#loc1083 = loc("<eval_with_key>.2":1229:15) | |
#loc1084 = loc("<eval_with_key>.2":1230:12) | |
#loc1085 = loc("<eval_with_key>.2":1231:23) | |
#loc1086 = loc("<eval_with_key>.2":1232:15) | |
#loc1087 = loc("<eval_with_key>.2":1233:19) | |
#loc1088 = loc("<eval_with_key>.2":1234:19) | |
#loc1089 = loc("<eval_with_key>.2":1235:16) | |
#loc1090 = loc("<eval_with_key>.2":1236:15) | |
#loc1091 = loc("<eval_with_key>.2":1237:16) | |
#loc1092 = loc("<eval_with_key>.2":1238:15) | |
#loc1093 = loc("<eval_with_key>.2":1239:13) | |
#loc1094 = loc("<eval_with_key>.2":1240:23) | |
#loc1095 = loc("<eval_with_key>.2":1241:14) | |
#loc1096 = loc("<eval_with_key>.2":1242:14) | |
#loc1097 = loc("<eval_with_key>.2":1243:13) | |
#loc1098 = loc("<eval_with_key>.2":1244:13) | |
#loc1099 = loc("<eval_with_key>.2":1245:13) | |
#loc1100 = loc("<eval_with_key>.2":1246:13) | |
#loc1101 = loc("<eval_with_key>.2":1248:16) | |
#loc1102 = loc("<eval_with_key>.2":1249:15) | |
#loc1103 = loc("<eval_with_key>.2":1250:16) | |
#loc1104 = loc("<eval_with_key>.2":1251:15) | |
#loc1105 = loc("<eval_with_key>.2":1252:13) | |
#loc1106 = loc("<eval_with_key>.2":1253:23) | |
#loc1107 = loc("<eval_with_key>.2":1254:19) | |
#loc1108 = loc("<eval_with_key>.2":1255:15) | |
#loc1109 = loc("<eval_with_key>.2":1256:15) | |
#loc1110 = loc("<eval_with_key>.2":1258:11) | |
#loc1111 = loc("<eval_with_key>.2":1259:15) | |
#loc1112 = loc("<eval_with_key>.2":1260:12) | |
#loc1113 = loc("<eval_with_key>.2":1261:23) | |
#loc1114 = loc("<eval_with_key>.2":1262:13) | |
#loc1115 = loc("<eval_with_key>.2":1263:13) | |
#loc1116 = loc("<eval_with_key>.2":1264:14) | |
#loc1117 = loc("<eval_with_key>.2":1265:13) | |
#loc1118 = loc("<eval_with_key>.2":1266:15) | |
#loc1119 = loc("<eval_with_key>.2":1268:13) | |
#loc1120 = loc("<eval_with_key>.2":1270:13) | |
#loc1121 = loc("<eval_with_key>.2":1272:11) | |
#loc1122 = loc("<eval_with_key>.2":1273:15) | |
#loc1123 = loc("<eval_with_key>.2":1274:12) | |
#loc1124 = loc("<eval_with_key>.2":1275:23) | |
#loc1125 = loc("<eval_with_key>.2":1276:14) | |
#loc1126 = loc("<eval_with_key>.2":1279:11) | |
#loc1127 = loc("<eval_with_key>.2":1280:15) | |
#loc1128 = loc("<eval_with_key>.2":1281:12) | |
#loc1129 = loc("<eval_with_key>.2":1282:23) | |
#loc1130 = loc("<eval_with_key>.2":1283:13) | |
#loc1131 = loc("<eval_with_key>.2":1284:13) | |
#loc1132 = loc("<eval_with_key>.2":1285:14) | |
#loc1133 = loc("<eval_with_key>.2":1286:13) | |
#loc1134 = loc("<eval_with_key>.2":1287:15) | |
#loc1135 = loc("<eval_with_key>.2":1289:13) | |
#loc1136 = loc("<eval_with_key>.2":1291:13) | |
#loc1137 = loc("<eval_with_key>.2":1293:11) | |
#loc1138 = loc("<eval_with_key>.2":1294:15) | |
#loc1139 = loc("<eval_with_key>.2":1295:12) | |
#loc1140 = loc("<eval_with_key>.2":1296:23) | |
#loc1141 = loc("<eval_with_key>.2":1297:15) | |
#loc1142 = loc("<eval_with_key>.2":1298:19) | |
#loc1143 = loc("<eval_with_key>.2":1300:11) | |
#loc1144 = loc("<eval_with_key>.2":1301:15) | |
#loc1145 = loc("<eval_with_key>.2":1302:12) | |
#loc1146 = loc("<eval_with_key>.2":1303:23) | |
#loc1147 = loc("<eval_with_key>.2":1304:15) | |
#loc1148 = loc("<eval_with_key>.2":1305:19) | |
#loc1149 = loc("<eval_with_key>.2":1307:11) | |
#loc1150 = loc("<eval_with_key>.2":1308:15) | |
#loc1151 = loc("<eval_with_key>.2":1309:12) | |
#loc1152 = loc("<eval_with_key>.2":1310:23) | |
#loc1153 = loc("<eval_with_key>.2":1311:15) | |
#loc1154 = loc("<eval_with_key>.2":1312:19) | |
#loc1155 = loc("<eval_with_key>.2":1313:19) | |
#loc1156 = loc("<eval_with_key>.2":1314:16) | |
#loc1157 = loc("<eval_with_key>.2":1315:15) | |
#loc1158 = loc("<eval_with_key>.2":1316:16) | |
#loc1159 = loc("<eval_with_key>.2":1317:15) | |
#loc1160 = loc("<eval_with_key>.2":1318:13) | |
#loc1161 = loc("<eval_with_key>.2":1319:23) | |
#loc1162 = loc("<eval_with_key>.2":1320:14) | |
#loc1163 = loc("<eval_with_key>.2":1321:14) | |
#loc1164 = loc("<eval_with_key>.2":1322:13) | |
#loc1165 = loc("<eval_with_key>.2":1323:13) | |
#loc1166 = loc("<eval_with_key>.2":1324:13) | |
#loc1167 = loc("<eval_with_key>.2":1325:13) | |
#loc1168 = loc("<eval_with_key>.2":1327:16) | |
#loc1169 = loc("<eval_with_key>.2":1328:15) | |
#loc1170 = loc("<eval_with_key>.2":1329:16) | |
#loc1171 = loc("<eval_with_key>.2":1330:15) | |
#loc1172 = loc("<eval_with_key>.2":1331:13) | |
#loc1173 = loc("<eval_with_key>.2":1332:23) | |
#loc1174 = loc("<eval_with_key>.2":1333:19) | |
#loc1175 = loc("<eval_with_key>.2":1334:15) | |
#loc1176 = loc("<eval_with_key>.2":1335:15) | |
#loc1177 = loc("<eval_with_key>.2":1337:11) | |
#loc1178 = loc("<eval_with_key>.2":1338:15) | |
#loc1179 = loc("<eval_with_key>.2":1339:12) | |
#loc1180 = loc("<eval_with_key>.2":1340:23) | |
#loc1181 = loc("<eval_with_key>.2":1341:13) | |
#loc1182 = loc("<eval_with_key>.2":1342:13) | |
#loc1183 = loc("<eval_with_key>.2":1343:14) | |
#loc1184 = loc("<eval_with_key>.2":1344:13) | |
#loc1185 = loc("<eval_with_key>.2":1345:15) | |
#loc1186 = loc("<eval_with_key>.2":1347:13) | |
#loc1187 = loc("<eval_with_key>.2":1349:13) | |
#loc1188 = loc("<eval_with_key>.2":1351:11) | |
#loc1189 = loc("<eval_with_key>.2":1352:15) | |
#loc1190 = loc("<eval_with_key>.2":1353:12) | |
#loc1191 = loc("<eval_with_key>.2":1354:23) | |
#loc1192 = loc("<eval_with_key>.2":1355:15) | |
#loc1193 = loc("<eval_with_key>.2":1356:19) | |
#loc1194 = loc("<eval_with_key>.2":1358:11) | |
#loc1195 = loc("<eval_with_key>.2":1359:15) | |
#loc1196 = loc("<eval_with_key>.2":1360:12) | |
#loc1197 = loc("<eval_with_key>.2":1361:23) | |
#loc1198 = loc("<eval_with_key>.2":1362:15) | |
#loc1199 = loc("<eval_with_key>.2":1363:19) | |
#loc1200 = loc("<eval_with_key>.2":1365:11) | |
#loc1201 = loc("<eval_with_key>.2":1366:15) | |
#loc1202 = loc("<eval_with_key>.2":1367:12) | |
#loc1203 = loc("<eval_with_key>.2":1368:23) | |
#loc1204 = loc("<eval_with_key>.2":1369:15) | |
#loc1205 = loc("<eval_with_key>.2":1370:19) | |
#loc1206 = loc("<eval_with_key>.2":1371:19) | |
#loc1207 = loc("<eval_with_key>.2":1372:16) | |
#loc1208 = loc("<eval_with_key>.2":1373:15) | |
#loc1209 = loc("<eval_with_key>.2":1374:16) | |
#loc1210 = loc("<eval_with_key>.2":1375:15) | |
#loc1211 = loc("<eval_with_key>.2":1376:13) | |
#loc1212 = loc("<eval_with_key>.2":1377:23) | |
#loc1213 = loc("<eval_with_key>.2":1378:14) | |
#loc1214 = loc("<eval_with_key>.2":1379:14) | |
#loc1215 = loc("<eval_with_key>.2":1380:13) | |
#loc1216 = loc("<eval_with_key>.2":1381:13) | |
#loc1217 = loc("<eval_with_key>.2":1382:13) | |
#loc1218 = loc("<eval_with_key>.2":1383:13) | |
#loc1219 = loc("<eval_with_key>.2":1385:16) | |
#loc1220 = loc("<eval_with_key>.2":1386:15) | |
#loc1221 = loc("<eval_with_key>.2":1387:16) | |
#loc1222 = loc("<eval_with_key>.2":1388:15) | |
#loc1223 = loc("<eval_with_key>.2":1389:13) | |
#loc1224 = loc("<eval_with_key>.2":1390:23) | |
#loc1225 = loc("<eval_with_key>.2":1391:19) | |
#loc1226 = loc("<eval_with_key>.2":1392:15) | |
#loc1227 = loc("<eval_with_key>.2":1393:15) | |
#loc1228 = loc("<eval_with_key>.2":1395:11) | |
#loc1229 = loc("<eval_with_key>.2":1396:15) | |
#loc1230 = loc("<eval_with_key>.2":1397:12) | |
#loc1231 = loc("<eval_with_key>.2":1398:23) | |
#loc1232 = loc("<eval_with_key>.2":1399:13) | |
#loc1233 = loc("<eval_with_key>.2":1400:13) | |
#loc1234 = loc("<eval_with_key>.2":1401:14) | |
#loc1235 = loc("<eval_with_key>.2":1402:13) | |
#loc1236 = loc("<eval_with_key>.2":1403:15) | |
#loc1237 = loc("<eval_with_key>.2":1405:13) | |
#loc1238 = loc("<eval_with_key>.2":1407:13) | |
#loc1239 = loc("<eval_with_key>.2":1409:11) | |
#loc1240 = loc("<eval_with_key>.2":1410:15) | |
#loc1241 = loc("<eval_with_key>.2":1411:12) | |
#loc1242 = loc("<eval_with_key>.2":1412:23) | |
#loc1243 = loc("<eval_with_key>.2":1413:14) | |
#loc1244 = loc("<eval_with_key>.2":1416:11) | |
#loc1245 = loc("<eval_with_key>.2":1417:15) | |
#loc1246 = loc("<eval_with_key>.2":1418:12) | |
#loc1247 = loc("<eval_with_key>.2":1419:23) | |
#loc1248 = loc("<eval_with_key>.2":1420:13) | |
#loc1249 = loc("<eval_with_key>.2":1421:13) | |
#loc1250 = loc("<eval_with_key>.2":1422:14) | |
#loc1251 = loc("<eval_with_key>.2":1423:13) | |
#loc1252 = loc("<eval_with_key>.2":1424:15) | |
#loc1253 = loc("<eval_with_key>.2":1426:13) | |
#loc1254 = loc("<eval_with_key>.2":1428:13) | |
#loc1255 = loc("<eval_with_key>.2":1429:13) | |
#loc1256 = loc("<eval_with_key>.2":1431:11) | |
#loc1257 = loc("<eval_with_key>.2":1432:15) | |
#loc1258 = loc("<eval_with_key>.2":1433:12) | |
#loc1259 = loc("<eval_with_key>.2":1434:23) | |
#loc1260 = loc(callsite(#loc26 at #loc27)) | |
#loc1261 = loc(callsite(#loc40 at #loc41)) | |
#loc1262 = loc(callsite(#loc55 at #loc56)) | |
#loc1263 = loc(callsite(#loc59 at #loc56)) | |
#loc1264 = loc(callsite(#loc77 at #loc78)) | |
#loc1265 = loc(callsite(#loc84 at #loc85)) | |
#loc1266 = loc(callsite(#loc26 at #loc141)) | |
#loc1267 = loc(callsite(#loc618 at #loc619)) | |
#loc1268 = loc(callsite(#loc1260 at #loc28)) | |
#loc1269 = loc(callsite(#loc1261 at #loc42)) | |
#loc1270 = loc(callsite(#loc1262 at #loc57)) | |
#loc1271 = loc(callsite(#loc1263 at #loc60)) | |
#loc1272 = loc(callsite(#loc1264 at #loc79)) | |
#loc1273 = loc(callsite(#loc1262 at #loc80)) | |
#loc1274 = loc(callsite(#loc1264 at #loc81)) | |
#loc1275 = loc(callsite(#loc1262 at #loc82)) | |
#loc1276 = loc(callsite(#loc1265 at #loc86)) | |
#loc1277 = loc(callsite(#loc1262 at #loc120)) | |
#loc1278 = loc(callsite(#loc1266 at #loc142)) | |
#loc1279 = loc(callsite(#loc1262 at #loc145)) | |
#loc1280 = loc(callsite(#loc1262 at #loc520)) | |
#loc1281 = loc(callsite(#loc1263 at #loc522)) | |
#loc1282 = loc(callsite(#loc1264 at #loc539)) | |
#loc1283 = loc(callsite(#loc1262 at #loc540)) | |
#loc1284 = loc(callsite(#loc1264 at #loc541)) | |
#loc1285 = loc(callsite(#loc1262 at #loc542)) | |
#loc1286 = loc(callsite(#loc1265 at #loc544)) | |
#loc1287 = loc(callsite(#loc1262 at #loc576)) | |
#loc1288 = loc(callsite(#loc1267 at #loc620)) | |
#loc1289 = loc(callsite(#loc1262 at #loc630)) | |
#loc1290 = loc(callsite(#loc1266 at #loc652)) | |
#loc1291 = loc(callsite(#loc1262 at #loc655)) | |
#loc1292 = loc(callsite(#loc1262 at #loc1259)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
➜ t5small git:(main) ✗ torch-mlir-opt -convert-torch-to-tosa t5small_torchbackend_0327_transformers4.26.0_elide.mlir
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,15],si64>, %arg1: !torch.vtensor<[1,4],si64>) -> !torch.vtensor<[1,4,32128],f32> {
%0 = torch_c.to_builtin_tensor %arg1 : !torch.vtensor<[1,4],si64> -> tensor<1x4xi64>
%1 = torch_c.to_builtin_tensor %arg0 : !torch.vtensor<[1,15],si64> -> tensor<1x15xi64>
%int512 = torch.constant.int 512
%2 = torch_c.to_i64 %int512
%int0 = torch.constant.int 0
%3 = torch_c.to_i64 %int0
%int1 = torch.constant.int 1
%4 = torch_c.to_i64 %int1
%int-1 = torch.constant.int -1
%5 = torch_c.to_i64 %int-1
%true = torch.constant.bool true
%int4 = torch.constant.int 4
%6 = torch_c.to_i64 %int4
%false = torch.constant.bool false
%7 = torch_c.to_i1 %false
%none = torch.constant.none
%int15 = torch.constant.int 15
%8 = torch_c.to_i64 %int15
%9 = "tosa.const"() {value = dense_resource<elided> : tensor<512x2048xf32>} : () -> tensor<512x2048xf32>
%10 = "tosa.const"() {value = dense_resource<elided> : tensor<2048x512xf32>} : () -> tensor<2048x512xf32>
%11 = "tosa.const"() {value = dense_resource<elided> : tensor<32x8xf32>} : () -> tensor<32x8xf32>
%12 = "tosa.const"() {value = dense_resource<elided> : tensor<512x512xf32>} : () -> tensor<512x512xf32>
%13 = "tosa.const"() {value = dense_resource<elided> : tensor<512xf32>} : () -> tensor<512xf32>
%14 = "tosa.const"() {value = dense_resource<elided> : tensor<32128x512xf32>} : () -> tensor<32128x512xf32>
%15 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor
%int-100 = torch.constant.int -100
%float-3.402820e38 = torch.constant.float -3.4028234663852886E+38
%int6 = torch.constant.int 6
%int9223372036854775807 = torch.constant.int 9223372036854775807
%int2 = torch.constant.int 2
%16 = torch_c.to_i64 %int2
%int3 = torch.constant.int 3
%float1.000000e00 = torch.constant.float 1.000000e+00
%float9.999990e-07 = torch.constant.float 9.9999999999999995E-7
%int8 = torch.constant.int 8
%17 = torch_c.to_i64 %int8
%int64 = torch.constant.int 64
%int16 = torch.constant.int 16
%float2.772590e00 = torch.constant.float 2.7725887222397811
%int2048 = torch.constant.int 2048
%float2.079440e00 = torch.constant.float 2.0794415416798357
%int31 = torch.constant.int 31
%float4.419420e-02 = torch.constant.float 0.044194173824159223
%int32128 = torch.constant.int 32128
%cpu = torch.constant.device "cpu"
%18 = torch.prim.ListConstruct %int1, %int4 : (!torch.int, !torch.int) -> !torch.list
%19 = "tosa.const"() {value = dense<0> : tensor<1x4xi32>} : () -> tensor<1x4xi32>
%20 = "tosa.cast"(%19) : (tensor<1x4xi32>) -> tensor<1x4xi64>
%21 = torch_c.from_builtin_tensor %20 : tensor<1x4xi64> -> !torch.vtensor<[1,4],si64>
%22 = "tosa.slice"(%0) {size = array<i64: 1, 3>, start = array<i64: 0, 0>} : (tensor<1x4xi64>) -> tensor<1x3xi64>
%23 = "tosa.cast"(%22) : (tensor<1x3xi64>) -> tensor<1x3xi64>
%24 = torch_c.from_builtin_tensor %23 : tensor<1x3xi64> -> !torch.vtensor<[1,3],si64>
%25 = "tosa.slice"(%20) {size = array<i64: 1, 9223372036854775806>, start = array<i64: 0, 1>} : (tensor<1x4xi64>) -> tensor<1x3xi64>
%26 = "tosa.const"() {value = dense<[1, 2, 3]> : tensor<3xi64>} : () -> tensor<3xi64>
%27 = "tosa.cast"(%26) : (tensor<3xi64>) -> tensor<3xi64>
%28 = torch_c.from_builtin_tensor %27 : tensor<3xi64> -> !torch.vtensor<[3],si64>
%29 = torch.prim.ListConstruct %28 : (!torch.vtensor<[3],si64>) -> !torch.list<optional>
%30 = torch.aten._index_put_impl %21, %29, %24, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional>, !torch.vtensor<[1,3],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64>
%31 = torch_c.to_builtin_tensor %30 : !torch.vtensor<[1,4],si64> -> tensor<1x4xi64>
%32 = "tosa.cast"(%15) : (tensor) -> tensor
%33 = torch_c.from_builtin_tensor %32 : tensor -> !torch.vtensor<[],si64>
%34 = "tosa.slice"(%31) {size = array<i64: 1, 1>, start = array<i64: 0, 0>} : (tensor<1x4xi64>) -> tensor<1x1xi64>
%35 = "tosa.reshape"(%34) {new_shape = array<i64: 1>} : (tensor<1x1xi64>) -> tensor<1xi64>
%cast = tensor.cast %35 : tensor<1xi64> to tensor<1xi64>
%36 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor
%37 = torch_c.from_builtin_tensor %36 : tensor -> !torch.vtensor<[],si64>
%38 = torch.prim.ListConstruct %37 : (!torch.vtensor<[],si64>) -> !torch.list<optional>
%39 = torch.aten._index_put_impl %30, %38, %33, %false, %false : !torch.vtensor<[1,4],si64>, !torch.list<optional>, !torch.vtensor<[],si64>, !torch.bool, !torch.bool -> !torch.vtensor<[1,4],si64>
%40 = torch_c.to_builtin_tensor %39 : !torch.vtensor<[1,4],si64> -> tensor<1x4xi64>
%41 = "tosa.const"() {value = dense<-100> : tensor} : () -> tensor
%42 = "tosa.equal"(%40, %41) : (tensor<1x4xi64>, tensor) -> tensor<1x4xi1>
%43 = torch.prim.ListConstruct : () -> !torch.list
%44 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor
%45 = "tosa.select"(%42, %44, %40) : (tensor<1x4xi1>, tensor, tensor<1x4xi64>) -> tensor<1x4xi64>
%46 = torch.prim.ListConstruct %int-1, %int15 : (!torch.int, !torch.int) -> !torch.list
%47 = "tosa.reshape"(%1) {new_shape = array<i64: 1, 15>} : (tensor<1x15xi64>) -> tensor<1x15xi64>
%48 = "tosa.reshape"(%14) {new_shape = array<i64: 1, 32128, 512>} : (tensor<32128x512xf32>) -> tensor<1x32128x512xf32>
%49 = "tosa.reshape"(%47) {new_shape = array<i64: 1, 15>} : (tensor<1x15xi64>) -> tensor<1x15xi64>
%50 = "tosa.cast"(%49) : (tensor<1x15xi64>) -> tensor<1x15xi32>
%51 = "tosa.gather"(%48, %50) : (tensor<1x32128x512xf32>, tensor<1x15xi32>) -> tensor<1x15x512xf32>
%52 = "tosa.reshape"(%51) {new_shape = array<i64: 1, 15, 512>} : (tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%53 = torch.prim.ListConstruct %int1, %int15 : (!torch.int, !torch.int) -> !torch.list
%54 = "tosa.const"() {value = dense<1> : tensor<1x15xi32>} : () -> tensor<1x15xi32>
%55 = "tosa.cast"(%54) : (tensor<1x15xi32>) -> tensor<1x15xf32>
%56 = "tosa.reshape"(%55) {new_shape = array<i64: 1, 1, 15>} : (tensor<1x15xf32>) -> tensor<1x1x15xf32>
%57 = "tosa.reshape"(%56) {new_shape = array<i64: 1, 1, 1, 15>} : (tensor<1x1x15xf32>) -> tensor<1x1x1x15xf32>
%58 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%59 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%60 = "tosa.mul"(%57, %59) {shift = 0 : i32} : (tensor<1x1x1x15xf32>, tensor) -> tensor<1x1x1x15xf32>
%61 = "tosa.sub"(%58, %60) : (tensor, tensor<1x1x1x15xf32>) -> tensor<1x1x1x15xf32>
%62 = "tosa.const"() {value = dense<-3.40282347E+38> : tensor} : () -> tensor
%63 = "tosa.mul"(%61, %62) {shift = 0 : i32} : (tensor<1x1x1x15xf32>, tensor) -> tensor<1x1x1x15xf32>
%64 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%65 = "tosa.pow"(%52, %64) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%66 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list
%67 = "tosa.reduce_sum"(%65) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%68 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%69 = "tosa.reciprocal"(%68) : (tensor) -> tensor
%70 = "tosa.mul"(%67, %69) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%71 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%72 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%73 = "tosa.mul"(%71, %72) {shift = 0 : i32} : (tensor, tensor) -> tensor
%74 = "tosa.add"(%70, %73) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%75 = "tosa.rsqrt"(%74) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%76 = "tosa.mul"(%52, %75) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%77 = "tosa.mul"(%13, %76) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%78 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%79 = "tosa.transpose"(%12, %78) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%80 = torch.prim.ListConstruct %int15, %int512 : (!torch.int, !torch.int) -> !torch.list
%81 = "tosa.reshape"(%77) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%82 = "tosa.reshape"(%81) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%83 = "tosa.reshape"(%79) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%84 = "tosa.matmul"(%82, %83) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%85 = "tosa.reshape"(%84) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_0 = tensor.cast %85 : tensor<15x512xf32> to tensor<15x512xf32>
%86 = torch.prim.ListConstruct %int1, %int15, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%87 = "tosa.reshape"(%cast_0) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%88 = torch.prim.ListConstruct %int1, %int-1, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list
%89 = "tosa.reshape"(%87) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%90 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%91 = "tosa.transpose"(%89, %90) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%92 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%93 = "tosa.transpose"(%12, %92) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%94 = "tosa.reshape"(%77) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%95 = "tosa.reshape"(%94) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%96 = "tosa.reshape"(%93) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%97 = "tosa.matmul"(%95, %96) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%98 = "tosa.reshape"(%97) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_1 = tensor.cast %98 : tensor<15x512xf32> to tensor<15x512xf32>
%99 = "tosa.reshape"(%cast_1) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%100 = "tosa.reshape"(%99) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%101 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%102 = "tosa.transpose"(%100, %101) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%103 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%104 = "tosa.transpose"(%12, %103) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%105 = "tosa.reshape"(%77) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%106 = "tosa.reshape"(%105) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%107 = "tosa.reshape"(%104) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%108 = "tosa.matmul"(%106, %107) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%109 = "tosa.reshape"(%108) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_2 = tensor.cast %109 : tensor<15x512xf32> to tensor<15x512xf32>
%110 = "tosa.reshape"(%cast_2) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%111 = "tosa.reshape"(%110) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%112 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%113 = "tosa.transpose"(%111, %112) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%114 = "tosa.const"() {value = dense<[0, 1, 3, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
%115 = "tosa.transpose"(%102, %114) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x8x64x15xf32>
%116 = torch.prim.ListConstruct %int1, %int8, %int15, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list
%117 = torch.prim.ListConstruct %int8, %int15, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%118 = "tosa.reshape"(%91) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%119 = torch.prim.ListConstruct %int1, %int8, %int64, %int15 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list
%120 = torch.prim.ListConstruct %int8, %int64, %int15 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%121 = "tosa.reshape"(%115) {new_shape = array<i64: 8, 64, 15>} : (tensor<1x8x64x15xf32>) -> tensor<8x64x15xf32>
%122 = "tosa.matmul"(%118, %121) : (tensor<8x15x64xf32>, tensor<8x64x15xf32>) -> tensor<8x15x15xf32>
%cast_3 = tensor.cast %122 : tensor<8x15x15xf32> to tensor<8x15x15xf32>
%123 = torch.prim.ListConstruct %int1, %int8, %int15, %int15 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list
%124 = "tosa.reshape"(%cast_3) {new_shape = array<i64: 1, 8, 15, 15>} : (tensor<8x15x15xf32>) -> tensor<1x8x15x15xf32>
%125 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]> : tensor<15xi64>} : () -> tensor<15xi64>
%126 = "tosa.cast"(%125) : (tensor<15xi64>) -> tensor<15xi64>
%127 = "tosa.reshape"(%126) {new_shape = array<i64: 15, 1>} : (tensor<15xi64>) -> tensor<15x1xi64>
%128 = "tosa.const"() {value = dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]> : tensor<15xi64>} : () -> tensor<15xi64>
%129 = "tosa.cast"(%128) : (tensor<15xi64>) -> tensor<15xi64>
%130 = "tosa.reshape"(%129) {new_shape = array<i64: 1, 15>} : (tensor<15xi64>) -> tensor<1x15xi64>
%131 = "tosa.cast"(%127) : (tensor<15x1xi64>) -> tensor<15x1xi32>
%132 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor
%133 = "tosa.mul"(%131, %132) {shift = 0 : i32} : (tensor<15x1xi32>, tensor) -> tensor<15x1xi32>
%134 = "tosa.cast"(%130) : (tensor<1x15xi64>) -> tensor<1x15xi32>
%135 = "tosa.sub"(%134, %133) : (tensor<1x15xi32>, tensor<15x1xi32>) -> tensor<15x15xi32>
%136 = "tosa.cast"(%135) : (tensor<15x15xi32>) -> tensor<15x15xi64>
%137 = torch_c.from_builtin_tensor %136 : tensor<15x15xi64> -> !torch.vtensor<[15,15],si64>
%138 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor
%139 = "tosa.greater"(%136, %138) : (tensor<15x15xi64>, tensor) -> tensor<15x15xi1>
%140 = "tosa.cast"(%139) : (tensor<15x15xi1>) -> tensor<15x15xi64>
%141 = "tosa.const"() {value = dense<16> : tensor} : () -> tensor
%142 = "tosa.mul"(%140, %141) {shift = 0 : i32} : (tensor<15x15xi64>, tensor) -> tensor<15x15xi64>
%143 = "tosa.const"() {value = dense<0> : tensor} : () -> tensor
%144 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor
%145 = "tosa.mul"(%143, %144) {shift = 0 : i32} : (tensor, tensor) -> tensor
%146 = "tosa.cast"(%142) : (tensor<15x15xi64>) -> tensor<15x15xi32>
%147 = "tosa.add"(%146, %145) : (tensor<15x15xi32>, tensor) -> tensor<15x15xi32>
%148 = "tosa.cast"(%147) : (tensor<15x15xi32>) -> tensor<15x15xi64>
%149 = torch.aten.abs %137 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64>
%150 = torch_c.to_builtin_tensor %149 : !torch.vtensor<[15,15],si64> -> tensor<15x15xi64>
%151 = "tosa.const"() {value = dense<8> : tensor} : () -> tensor
%152 = "tosa.greater"(%151, %150) : (tensor, tensor<15x15xi64>) -> tensor<15x15xi1>
%153 = "tosa.cast"(%150) : (tensor<15x15xi64>) -> tensor<15x15xf32>
%154 = "tosa.const"() {value = dense<8.000000e+00> : tensor} : () -> tensor
%155 = "tosa.reciprocal"(%154) : (tensor) -> tensor
%156 = "tosa.mul"(%153, %155) {shift = 0 : i32} : (tensor<15x15xf32>, tensor) -> tensor<15x15xf32>
%157 = "tosa.log"(%156) : (tensor<15x15xf32>) -> tensor<15x15xf32>
%158 = "tosa.const"() {value = dense<2.77258873> : tensor} : () -> tensor
%159 = "tosa.reciprocal"(%158) : (tensor) -> tensor
%160 = "tosa.mul"(%157, %159) {shift = 0 : i32} : (tensor<15x15xf32>, tensor) -> tensor<15x15xf32>
%161 = "tosa.const"() {value = dense<8.000000e+00> : tensor} : () -> tensor
%162 = "tosa.mul"(%160, %161) {shift = 0 : i32} : (tensor<15x15xf32>, tensor) -> tensor<15x15xf32>
%163 = "tosa.cast"(%162) : (tensor<15x15xf32>) -> tensor<15x15xi64>
%164 = "tosa.const"() {value = dense<8> : tensor} : () -> tensor
%165 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor
%166 = "tosa.mul"(%164, %165) {shift = 0 : i32} : (tensor, tensor) -> tensor
%167 = "tosa.cast"(%163) : (tensor<15x15xi64>) -> tensor<15x15xi32>
%168 = "tosa.add"(%167, %166) : (tensor<15x15xi32>, tensor) -> tensor<15x15xi32>
%169 = "tosa.cast"(%168) : (tensor<15x15xi32>) -> tensor<15x15xi64>
%170 = "tosa.const"() {value = dense<15> : tensor} : () -> tensor
%171 = torch.prim.ListConstruct %int15, %int15 : (!torch.int, !torch.int) -> !torch.list
%172 = "tosa.const"() {value = dense<0> : tensor<15x15xi64>} : () -> tensor<15x15xi64>
%173 = "tosa.add"(%170, %172) : (tensor, tensor<15x15xi64>) -> tensor<15x15xi64>
%174 = "tosa.minimum"(%169, %173) : (tensor<15x15xi64>, tensor<15x15xi64>) -> tensor<15x15xi64>
%175 = "tosa.select"(%152, %150, %174) : (tensor<15x15xi1>, tensor<15x15xi64>, tensor<15x15xi64>) -> tensor<15x15xi64>
%176 = "tosa.cast"(%175) : (tensor<15x15xi64>) -> tensor<15x15xi32>
%177 = "tosa.const"() {value = dense<1> : tensor} : () -> tensor
%178 = "tosa.mul"(%176, %177) {shift = 0 : i32} : (tensor<15x15xi32>, tensor) -> tensor<15x15xi32>
%179 = "tosa.cast"(%148) : (tensor<15x15xi64>) -> tensor<15x15xi32>
%180 = "tosa.add"(%179, %178) : (tensor<15x15xi32>, tensor<15x15xi32>) -> tensor<15x15xi32>
%181 = "tosa.cast"(%180) : (tensor<15x15xi32>) -> tensor<15x15xi64>
%182 = "tosa.reshape"(%11) {new_shape = array<i64: 1, 32, 8>} : (tensor<32x8xf32>) -> tensor<1x32x8xf32>
%183 = "tosa.reshape"(%181) {new_shape = array<i64: 1, 225>} : (tensor<15x15xi64>) -> tensor<1x225xi64>
%184 = "tosa.cast"(%183) : (tensor<1x225xi64>) -> tensor<1x225xi32>
%185 = "tosa.gather"(%182, %184) : (tensor<1x32x8xf32>, tensor<1x225xi32>) -> tensor<1x225x8xf32>
%186 = "tosa.reshape"(%185) {new_shape = array<i64: 15, 15, 8>} : (tensor<1x225x8xf32>) -> tensor<15x15x8xf32>
%187 = torch.prim.ListConstruct %int2, %int0, %int1 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%188 = "tosa.const"() {value = dense<[2, 0, 1]> : tensor<3xi64>} : () -> tensor<3xi64>
%189 = "tosa.transpose"(%186, %188) : (tensor<15x15x8xf32>, tensor<3xi64>) -> tensor<8x15x15xf32>
%190 = "tosa.reshape"(%189) {new_shape = array<i64: 1, 8, 15, 15>} : (tensor<8x15x15xf32>) -> tensor<1x8x15x15xf32>
%191 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%192 = "tosa.mul"(%63, %191) {shift = 0 : i32} : (tensor<1x1x1x15xf32>, tensor) -> tensor<1x1x1x15xf32>
%193 = "tosa.add"(%190, %192) : (tensor<1x8x15x15xf32>, tensor<1x1x1x15xf32>) -> tensor<1x8x15x15xf32>
%194 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%195 = "tosa.mul"(%193, %194) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor) -> tensor<1x8x15x15xf32>
%196 = "tosa.add"(%124, %195) : (tensor<1x8x15x15xf32>, tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%197 = "tosa.reduce_max"(%196) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%198 = "tosa.argmax"(%196) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15xi64>
%199 = "tosa.reshape"(%198) {new_shape = array<i64: 1, 8, 15, 1>} : (tensor<1x8x15xi64>) -> tensor<1x8x15x1xi64>
%200 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%201 = "tosa.mul"(%197, %200) {shift = 0 : i32} : (tensor<1x8x15x1xf32>, tensor) -> tensor<1x8x15x1xf32>
%202 = "tosa.sub"(%196, %201) : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%203 = "tosa.exp"(%202) : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%204 = "tosa.reduce_sum"(%203) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%205 = "tosa.reciprocal"(%204) : (tensor<1x8x15x1xf32>) -> tensor<1x8x15x1xf32>
%206 = "tosa.mul"(%203, %205) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%207 = torch.prim.ListConstruct %int8, %int15, %int15 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%208 = "tosa.reshape"(%206) {new_shape = array<i64: 8, 15, 15>} : (tensor<1x8x15x15xf32>) -> tensor<8x15x15xf32>
%209 = "tosa.reshape"(%113) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%210 = "tosa.matmul"(%208, %209) : (tensor<8x15x15xf32>, tensor<8x15x64xf32>) -> tensor<8x15x64xf32>
%cast_4 = tensor.cast %210 : tensor<8x15x64xf32> to tensor<8x15x64xf32>
%211 = "tosa.reshape"(%cast_4) {new_shape = array<i64: 1, 8, 15, 64>} : (tensor<8x15x64xf32>) -> tensor<1x8x15x64xf32>
%212 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%213 = "tosa.transpose"(%211, %212) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x15x8x64xf32>
%214 = "tosa.cast"(%213) : (tensor<1x15x8x64xf32>) -> tensor<1x15x8x64xf32>
%215 = torch.prim.ListConstruct %int1, %int-1, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%216 = "tosa.reshape"(%214) {new_shape = array<i64: 1, 15, 512>} : (tensor<1x15x8x64xf32>) -> tensor<1x15x512xf32>
%217 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%218 = "tosa.transpose"(%12, %217) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%219 = "tosa.reshape"(%216) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%220 = "tosa.reshape"(%219) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%221 = "tosa.reshape"(%218) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%222 = "tosa.matmul"(%220, %221) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%223 = "tosa.reshape"(%222) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_5 = tensor.cast %223 : tensor<15x512xf32> to tensor<15x512xf32>
%224 = "tosa.reshape"(%cast_5) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%225 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%226 = "tosa.mul"(%224, %225) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%227 = "tosa.add"(%52, %226) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%228 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%229 = "tosa.pow"(%227, %228) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%230 = "tosa.reduce_sum"(%229) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%231 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%232 = "tosa.reciprocal"(%231) : (tensor) -> tensor
%233 = "tosa.mul"(%230, %232) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%234 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%235 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%236 = "tosa.mul"(%234, %235) {shift = 0 : i32} : (tensor, tensor) -> tensor
%237 = "tosa.add"(%233, %236) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%238 = "tosa.rsqrt"(%237) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%239 = "tosa.mul"(%227, %238) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%240 = "tosa.mul"(%13, %239) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%241 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%242 = "tosa.transpose"(%10, %241) : (tensor<2048x512xf32>, tensor<2xi32>) -> tensor<512x2048xf32>
%243 = "tosa.reshape"(%240) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%244 = "tosa.reshape"(%243) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%245 = "tosa.reshape"(%242) {new_shape = array<i64: 1, 512, 2048>} : (tensor<512x2048xf32>) -> tensor<1x512x2048xf32>
%246 = "tosa.matmul"(%244, %245) : (tensor<1x15x512xf32>, tensor<1x512x2048xf32>) -> tensor<1x15x2048xf32>
%247 = "tosa.reshape"(%246) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%cast_6 = tensor.cast %247 : tensor<15x2048xf32> to tensor<15x2048xf32>
%248 = torch.prim.ListConstruct %int1, %int15, %int2048 : (!torch.int, !torch.int, !torch.int) -> !torch.list
%249 = "tosa.reshape"(%cast_6) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%250 = "tosa.clamp"(%249) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x15x2048xf32>) -> tensor<1x15x2048xf32>
%251 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%252 = "tosa.transpose"(%9, %251) : (tensor<512x2048xf32>, tensor<2xi32>) -> tensor<2048x512xf32>
%253 = torch.prim.ListConstruct %int15, %int2048 : (!torch.int, !torch.int) -> !torch.list
%254 = "tosa.reshape"(%250) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%255 = "tosa.reshape"(%254) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%256 = "tosa.reshape"(%252) {new_shape = array<i64: 1, 2048, 512>} : (tensor<2048x512xf32>) -> tensor<1x2048x512xf32>
%257 = "tosa.matmul"(%255, %256) : (tensor<1x15x2048xf32>, tensor<1x2048x512xf32>) -> tensor<1x15x512xf32>
%258 = "tosa.reshape"(%257) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_7 = tensor.cast %258 : tensor<15x512xf32> to tensor<15x512xf32>
%259 = "tosa.reshape"(%cast_7) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%260 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%261 = "tosa.mul"(%259, %260) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%262 = "tosa.add"(%227, %261) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%263 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%264 = "tosa.pow"(%262, %263) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%265 = "tosa.reduce_sum"(%264) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%266 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%267 = "tosa.reciprocal"(%266) : (tensor) -> tensor
%268 = "tosa.mul"(%265, %267) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%269 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%270 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%271 = "tosa.mul"(%269, %270) {shift = 0 : i32} : (tensor, tensor) -> tensor
%272 = "tosa.add"(%268, %271) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%273 = "tosa.rsqrt"(%272) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%274 = "tosa.mul"(%262, %273) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%275 = "tosa.mul"(%13, %274) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%276 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%277 = "tosa.transpose"(%12, %276) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%278 = "tosa.reshape"(%275) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%279 = "tosa.reshape"(%278) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%280 = "tosa.reshape"(%277) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%281 = "tosa.matmul"(%279, %280) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%282 = "tosa.reshape"(%281) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_8 = tensor.cast %282 : tensor<15x512xf32> to tensor<15x512xf32>
%283 = "tosa.reshape"(%cast_8) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%284 = "tosa.reshape"(%283) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%285 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%286 = "tosa.transpose"(%284, %285) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%287 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%288 = "tosa.transpose"(%12, %287) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%289 = "tosa.reshape"(%275) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%290 = "tosa.reshape"(%289) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%291 = "tosa.reshape"(%288) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%292 = "tosa.matmul"(%290, %291) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%293 = "tosa.reshape"(%292) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_9 = tensor.cast %293 : tensor<15x512xf32> to tensor<15x512xf32>
%294 = "tosa.reshape"(%cast_9) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%295 = "tosa.reshape"(%294) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%296 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%297 = "tosa.transpose"(%295, %296) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%298 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%299 = "tosa.transpose"(%12, %298) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%300 = "tosa.reshape"(%275) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%301 = "tosa.reshape"(%300) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%302 = "tosa.reshape"(%299) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%303 = "tosa.matmul"(%301, %302) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%304 = "tosa.reshape"(%303) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_10 = tensor.cast %304 : tensor<15x512xf32> to tensor<15x512xf32>
%305 = "tosa.reshape"(%cast_10) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%306 = "tosa.reshape"(%305) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%307 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%308 = "tosa.transpose"(%306, %307) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%309 = "tosa.const"() {value = dense<[0, 1, 3, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
%310 = "tosa.transpose"(%297, %309) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x8x64x15xf32>
%311 = "tosa.reshape"(%286) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%312 = "tosa.reshape"(%310) {new_shape = array<i64: 8, 64, 15>} : (tensor<1x8x64x15xf32>) -> tensor<8x64x15xf32>
%313 = "tosa.matmul"(%311, %312) : (tensor<8x15x64xf32>, tensor<8x64x15xf32>) -> tensor<8x15x15xf32>
%cast_11 = tensor.cast %313 : tensor<8x15x15xf32> to tensor<8x15x15xf32>
%314 = "tosa.reshape"(%cast_11) {new_shape = array<i64: 1, 8, 15, 15>} : (tensor<8x15x15xf32>) -> tensor<1x8x15x15xf32>
%315 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%316 = "tosa.mul"(%193, %315) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor) -> tensor<1x8x15x15xf32>
%317 = "tosa.add"(%314, %316) : (tensor<1x8x15x15xf32>, tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%318 = "tosa.reduce_max"(%317) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%319 = "tosa.argmax"(%317) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15xi64>
%320 = "tosa.reshape"(%319) {new_shape = array<i64: 1, 8, 15, 1>} : (tensor<1x8x15xi64>) -> tensor<1x8x15x1xi64>
%321 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%322 = "tosa.mul"(%318, %321) {shift = 0 : i32} : (tensor<1x8x15x1xf32>, tensor) -> tensor<1x8x15x1xf32>
%323 = "tosa.sub"(%317, %322) : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%324 = "tosa.exp"(%323) : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%325 = "tosa.reduce_sum"(%324) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%326 = "tosa.reciprocal"(%325) : (tensor<1x8x15x1xf32>) -> tensor<1x8x15x1xf32>
%327 = "tosa.mul"(%324, %326) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%328 = "tosa.reshape"(%327) {new_shape = array<i64: 8, 15, 15>} : (tensor<1x8x15x15xf32>) -> tensor<8x15x15xf32>
%329 = "tosa.reshape"(%308) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%330 = "tosa.matmul"(%328, %329) : (tensor<8x15x15xf32>, tensor<8x15x64xf32>) -> tensor<8x15x64xf32>
%cast_12 = tensor.cast %330 : tensor<8x15x64xf32> to tensor<8x15x64xf32>
%331 = "tosa.reshape"(%cast_12) {new_shape = array<i64: 1, 8, 15, 64>} : (tensor<8x15x64xf32>) -> tensor<1x8x15x64xf32>
%332 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%333 = "tosa.transpose"(%331, %332) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x15x8x64xf32>
%334 = "tosa.cast"(%333) : (tensor<1x15x8x64xf32>) -> tensor<1x15x8x64xf32>
%335 = "tosa.reshape"(%334) {new_shape = array<i64: 1, 15, 512>} : (tensor<1x15x8x64xf32>) -> tensor<1x15x512xf32>
%336 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%337 = "tosa.transpose"(%12, %336) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%338 = "tosa.reshape"(%335) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%339 = "tosa.reshape"(%338) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%340 = "tosa.reshape"(%337) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%341 = "tosa.matmul"(%339, %340) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%342 = "tosa.reshape"(%341) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_13 = tensor.cast %342 : tensor<15x512xf32> to tensor<15x512xf32>
%343 = "tosa.reshape"(%cast_13) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%344 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%345 = "tosa.mul"(%343, %344) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%346 = "tosa.add"(%262, %345) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%347 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%348 = "tosa.pow"(%346, %347) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%349 = "tosa.reduce_sum"(%348) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%350 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%351 = "tosa.reciprocal"(%350) : (tensor) -> tensor
%352 = "tosa.mul"(%349, %351) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%353 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%354 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%355 = "tosa.mul"(%353, %354) {shift = 0 : i32} : (tensor, tensor) -> tensor
%356 = "tosa.add"(%352, %355) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%357 = "tosa.rsqrt"(%356) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%358 = "tosa.mul"(%346, %357) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%359 = "tosa.mul"(%13, %358) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%360 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%361 = "tosa.transpose"(%10, %360) : (tensor<2048x512xf32>, tensor<2xi32>) -> tensor<512x2048xf32>
%362 = "tosa.reshape"(%359) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%363 = "tosa.reshape"(%362) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%364 = "tosa.reshape"(%361) {new_shape = array<i64: 1, 512, 2048>} : (tensor<512x2048xf32>) -> tensor<1x512x2048xf32>
%365 = "tosa.matmul"(%363, %364) : (tensor<1x15x512xf32>, tensor<1x512x2048xf32>) -> tensor<1x15x2048xf32>
%366 = "tosa.reshape"(%365) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%cast_14 = tensor.cast %366 : tensor<15x2048xf32> to tensor<15x2048xf32>
%367 = "tosa.reshape"(%cast_14) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%368 = "tosa.clamp"(%367) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x15x2048xf32>) -> tensor<1x15x2048xf32>
%369 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%370 = "tosa.transpose"(%9, %369) : (tensor<512x2048xf32>, tensor<2xi32>) -> tensor<2048x512xf32>
%371 = "tosa.reshape"(%368) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%372 = "tosa.reshape"(%371) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%373 = "tosa.reshape"(%370) {new_shape = array<i64: 1, 2048, 512>} : (tensor<2048x512xf32>) -> tensor<1x2048x512xf32>
%374 = "tosa.matmul"(%372, %373) : (tensor<1x15x2048xf32>, tensor<1x2048x512xf32>) -> tensor<1x15x512xf32>
%375 = "tosa.reshape"(%374) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_15 = tensor.cast %375 : tensor<15x512xf32> to tensor<15x512xf32>
%376 = "tosa.reshape"(%cast_15) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%377 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%378 = "tosa.mul"(%376, %377) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%379 = "tosa.add"(%346, %378) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%380 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%381 = "tosa.pow"(%379, %380) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%382 = "tosa.reduce_sum"(%381) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%383 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%384 = "tosa.reciprocal"(%383) : (tensor) -> tensor
%385 = "tosa.mul"(%382, %384) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%386 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%387 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%388 = "tosa.mul"(%386, %387) {shift = 0 : i32} : (tensor, tensor) -> tensor
%389 = "tosa.add"(%385, %388) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%390 = "tosa.rsqrt"(%389) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%391 = "tosa.mul"(%379, %390) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%392 = "tosa.mul"(%13, %391) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%393 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%394 = "tosa.transpose"(%12, %393) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%395 = "tosa.reshape"(%392) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%396 = "tosa.reshape"(%395) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%397 = "tosa.reshape"(%394) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%398 = "tosa.matmul"(%396, %397) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%399 = "tosa.reshape"(%398) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_16 = tensor.cast %399 : tensor<15x512xf32> to tensor<15x512xf32>
%400 = "tosa.reshape"(%cast_16) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%401 = "tosa.reshape"(%400) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%402 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%403 = "tosa.transpose"(%401, %402) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%404 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%405 = "tosa.transpose"(%12, %404) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%406 = "tosa.reshape"(%392) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%407 = "tosa.reshape"(%406) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%408 = "tosa.reshape"(%405) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%409 = "tosa.matmul"(%407, %408) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%410 = "tosa.reshape"(%409) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_17 = tensor.cast %410 : tensor<15x512xf32> to tensor<15x512xf32>
%411 = "tosa.reshape"(%cast_17) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%412 = "tosa.reshape"(%411) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%413 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%414 = "tosa.transpose"(%412, %413) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%415 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%416 = "tosa.transpose"(%12, %415) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%417 = "tosa.reshape"(%392) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%418 = "tosa.reshape"(%417) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%419 = "tosa.reshape"(%416) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%420 = "tosa.matmul"(%418, %419) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%421 = "tosa.reshape"(%420) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_18 = tensor.cast %421 : tensor<15x512xf32> to tensor<15x512xf32>
%422 = "tosa.reshape"(%cast_18) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%423 = "tosa.reshape"(%422) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%424 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%425 = "tosa.transpose"(%423, %424) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%426 = "tosa.const"() {value = dense<[0, 1, 3, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
%427 = "tosa.transpose"(%414, %426) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x8x64x15xf32>
%428 = "tosa.reshape"(%403) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%429 = "tosa.reshape"(%427) {new_shape = array<i64: 8, 64, 15>} : (tensor<1x8x64x15xf32>) -> tensor<8x64x15xf32>
%430 = "tosa.matmul"(%428, %429) : (tensor<8x15x64xf32>, tensor<8x64x15xf32>) -> tensor<8x15x15xf32>
%cast_19 = tensor.cast %430 : tensor<8x15x15xf32> to tensor<8x15x15xf32>
%431 = "tosa.reshape"(%cast_19) {new_shape = array<i64: 1, 8, 15, 15>} : (tensor<8x15x15xf32>) -> tensor<1x8x15x15xf32>
%432 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%433 = "tosa.mul"(%193, %432) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor) -> tensor<1x8x15x15xf32>
%434 = "tosa.add"(%431, %433) : (tensor<1x8x15x15xf32>, tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%435 = "tosa.reduce_max"(%434) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%436 = "tosa.argmax"(%434) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15xi64>
%437 = "tosa.reshape"(%436) {new_shape = array<i64: 1, 8, 15, 1>} : (tensor<1x8x15xi64>) -> tensor<1x8x15x1xi64>
%438 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%439 = "tosa.mul"(%435, %438) {shift = 0 : i32} : (tensor<1x8x15x1xf32>, tensor) -> tensor<1x8x15x1xf32>
%440 = "tosa.sub"(%434, %439) : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%441 = "tosa.exp"(%440) : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%442 = "tosa.reduce_sum"(%441) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%443 = "tosa.reciprocal"(%442) : (tensor<1x8x15x1xf32>) -> tensor<1x8x15x1xf32>
%444 = "tosa.mul"(%441, %443) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%445 = "tosa.reshape"(%444) {new_shape = array<i64: 8, 15, 15>} : (tensor<1x8x15x15xf32>) -> tensor<8x15x15xf32>
%446 = "tosa.reshape"(%425) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%447 = "tosa.matmul"(%445, %446) : (tensor<8x15x15xf32>, tensor<8x15x64xf32>) -> tensor<8x15x64xf32>
%cast_20 = tensor.cast %447 : tensor<8x15x64xf32> to tensor<8x15x64xf32>
%448 = "tosa.reshape"(%cast_20) {new_shape = array<i64: 1, 8, 15, 64>} : (tensor<8x15x64xf32>) -> tensor<1x8x15x64xf32>
%449 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%450 = "tosa.transpose"(%448, %449) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x15x8x64xf32>
%451 = "tosa.cast"(%450) : (tensor<1x15x8x64xf32>) -> tensor<1x15x8x64xf32>
%452 = "tosa.reshape"(%451) {new_shape = array<i64: 1, 15, 512>} : (tensor<1x15x8x64xf32>) -> tensor<1x15x512xf32>
%453 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%454 = "tosa.transpose"(%12, %453) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%455 = "tosa.reshape"(%452) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%456 = "tosa.reshape"(%455) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%457 = "tosa.reshape"(%454) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%458 = "tosa.matmul"(%456, %457) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%459 = "tosa.reshape"(%458) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_21 = tensor.cast %459 : tensor<15x512xf32> to tensor<15x512xf32>
%460 = "tosa.reshape"(%cast_21) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%461 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%462 = "tosa.mul"(%460, %461) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%463 = "tosa.add"(%379, %462) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%464 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%465 = "tosa.pow"(%463, %464) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%466 = "tosa.reduce_sum"(%465) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%467 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%468 = "tosa.reciprocal"(%467) : (tensor) -> tensor
%469 = "tosa.mul"(%466, %468) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%470 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%471 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%472 = "tosa.mul"(%470, %471) {shift = 0 : i32} : (tensor, tensor) -> tensor
%473 = "tosa.add"(%469, %472) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%474 = "tosa.rsqrt"(%473) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%475 = "tosa.mul"(%463, %474) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%476 = "tosa.mul"(%13, %475) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%477 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%478 = "tosa.transpose"(%10, %477) : (tensor<2048x512xf32>, tensor<2xi32>) -> tensor<512x2048xf32>
%479 = "tosa.reshape"(%476) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%480 = "tosa.reshape"(%479) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%481 = "tosa.reshape"(%478) {new_shape = array<i64: 1, 512, 2048>} : (tensor<512x2048xf32>) -> tensor<1x512x2048xf32>
%482 = "tosa.matmul"(%480, %481) : (tensor<1x15x512xf32>, tensor<1x512x2048xf32>) -> tensor<1x15x2048xf32>
%483 = "tosa.reshape"(%482) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%cast_22 = tensor.cast %483 : tensor<15x2048xf32> to tensor<15x2048xf32>
%484 = "tosa.reshape"(%cast_22) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%485 = "tosa.clamp"(%484) {max_fp = 3.40282347E+38 : f32, max_int = 2147483647 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x15x2048xf32>) -> tensor<1x15x2048xf32>
%486 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%487 = "tosa.transpose"(%9, %486) : (tensor<512x2048xf32>, tensor<2xi32>) -> tensor<2048x512xf32>
%488 = "tosa.reshape"(%485) {new_shape = array<i64: 15, 2048>} : (tensor<1x15x2048xf32>) -> tensor<15x2048xf32>
%489 = "tosa.reshape"(%488) {new_shape = array<i64: 1, 15, 2048>} : (tensor<15x2048xf32>) -> tensor<1x15x2048xf32>
%490 = "tosa.reshape"(%487) {new_shape = array<i64: 1, 2048, 512>} : (tensor<2048x512xf32>) -> tensor<1x2048x512xf32>
%491 = "tosa.matmul"(%489, %490) : (tensor<1x15x2048xf32>, tensor<1x2048x512xf32>) -> tensor<1x15x512xf32>
%492 = "tosa.reshape"(%491) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_23 = tensor.cast %492 : tensor<15x512xf32> to tensor<15x512xf32>
%493 = "tosa.reshape"(%cast_23) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%494 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%495 = "tosa.mul"(%493, %494) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%496 = "tosa.add"(%463, %495) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%497 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor
%498 = "tosa.pow"(%496, %497) : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%499 = "tosa.reduce_sum"(%498) {axis = 2 : i64} : (tensor<1x15x512xf32>) -> tensor<1x15x1xf32>
%500 = "tosa.const"() {value = dense<5.120000e+02> : tensor} : () -> tensor
%501 = "tosa.reciprocal"(%500) : (tensor) -> tensor
%502 = "tosa.mul"(%499, %501) {shift = 0 : i32} : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%503 = "tosa.const"() {value = dense<9.99999997E-7> : tensor} : () -> tensor
%504 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%505 = "tosa.mul"(%503, %504) {shift = 0 : i32} : (tensor, tensor) -> tensor
%506 = "tosa.add"(%502, %505) : (tensor<1x15x1xf32>, tensor) -> tensor<1x15x1xf32>
%507 = "tosa.rsqrt"(%506) : (tensor<1x15x1xf32>) -> tensor<1x15x1xf32>
%508 = "tosa.mul"(%496, %507) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor<1x15x1xf32>) -> tensor<1x15x512xf32>
%509 = "tosa.mul"(%13, %508) {shift = 0 : i32} : (tensor<512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%510 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%511 = "tosa.transpose"(%12, %510) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%512 = "tosa.reshape"(%509) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%513 = "tosa.reshape"(%512) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%514 = "tosa.reshape"(%511) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%515 = "tosa.matmul"(%513, %514) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%516 = "tosa.reshape"(%515) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_24 = tensor.cast %516 : tensor<15x512xf32> to tensor<15x512xf32>
%517 = "tosa.reshape"(%cast_24) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%518 = "tosa.reshape"(%517) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%519 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%520 = "tosa.transpose"(%518, %519) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%521 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%522 = "tosa.transpose"(%12, %521) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%523 = "tosa.reshape"(%509) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%524 = "tosa.reshape"(%523) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%525 = "tosa.reshape"(%522) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%526 = "tosa.matmul"(%524, %525) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%527 = "tosa.reshape"(%526) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_25 = tensor.cast %527 : tensor<15x512xf32> to tensor<15x512xf32>
%528 = "tosa.reshape"(%cast_25) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%529 = "tosa.reshape"(%528) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%530 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%531 = "tosa.transpose"(%529, %530) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%532 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%533 = "tosa.transpose"(%12, %532) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%534 = "tosa.reshape"(%509) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%535 = "tosa.reshape"(%534) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%536 = "tosa.reshape"(%533) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%537 = "tosa.matmul"(%535, %536) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%538 = "tosa.reshape"(%537) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_26 = tensor.cast %538 : tensor<15x512xf32> to tensor<15x512xf32>
%539 = "tosa.reshape"(%cast_26) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%540 = "tosa.reshape"(%539) {new_shape = array<i64: 1, 15, 8, 64>} : (tensor<1x15x512xf32>) -> tensor<1x15x8x64xf32>
%541 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%542 = "tosa.transpose"(%540, %541) : (tensor<1x15x8x64xf32>, tensor<4xi32>) -> tensor<1x8x15x64xf32>
%543 = "tosa.const"() {value = dense<[0, 1, 3, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
%544 = "tosa.transpose"(%531, %543) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x8x64x15xf32>
%545 = "tosa.reshape"(%520) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%546 = "tosa.reshape"(%544) {new_shape = array<i64: 8, 64, 15>} : (tensor<1x8x64x15xf32>) -> tensor<8x64x15xf32>
%547 = "tosa.matmul"(%545, %546) : (tensor<8x15x64xf32>, tensor<8x64x15xf32>) -> tensor<8x15x15xf32>
%cast_27 = tensor.cast %547 : tensor<8x15x15xf32> to tensor<8x15x15xf32>
%548 = "tosa.reshape"(%cast_27) {new_shape = array<i64: 1, 8, 15, 15>} : (tensor<8x15x15xf32>) -> tensor<1x8x15x15xf32>
%549 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%550 = "tosa.mul"(%193, %549) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor) -> tensor<1x8x15x15xf32>
%551 = "tosa.add"(%548, %550) : (tensor<1x8x15x15xf32>, tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%552 = "tosa.reduce_max"(%551) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%553 = "tosa.argmax"(%551) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15xi64>
%554 = "tosa.reshape"(%553) {new_shape = array<i64: 1, 8, 15, 1>} : (tensor<1x8x15xi64>) -> tensor<1x8x15x1xi64>
%555 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%556 = "tosa.mul"(%552, %555) {shift = 0 : i32} : (tensor<1x8x15x1xf32>, tensor) -> tensor<1x8x15x1xf32>
%557 = "tosa.sub"(%551, %556) : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%558 = "tosa.exp"(%557) : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x15xf32>
%559 = "tosa.reduce_sum"(%558) {axis = 3 : i64} : (tensor<1x8x15x15xf32>) -> tensor<1x8x15x1xf32>
%560 = "tosa.reciprocal"(%559) : (tensor<1x8x15x1xf32>) -> tensor<1x8x15x1xf32>
%561 = "tosa.mul"(%558, %560) {shift = 0 : i32} : (tensor<1x8x15x15xf32>, tensor<1x8x15x1xf32>) -> tensor<1x8x15x15xf32>
%562 = "tosa.reshape"(%561) {new_shape = array<i64: 8, 15, 15>} : (tensor<1x8x15x15xf32>) -> tensor<8x15x15xf32>
%563 = "tosa.reshape"(%542) {new_shape = array<i64: 8, 15, 64>} : (tensor<1x8x15x64xf32>) -> tensor<8x15x64xf32>
%564 = "tosa.matmul"(%562, %563) : (tensor<8x15x15xf32>, tensor<8x15x64xf32>) -> tensor<8x15x64xf32>
%cast_28 = tensor.cast %564 : tensor<8x15x64xf32> to tensor<8x15x64xf32>
%565 = "tosa.reshape"(%cast_28) {new_shape = array<i64: 1, 8, 15, 64>} : (tensor<8x15x64xf32>) -> tensor<1x8x15x64xf32>
%566 = "tosa.const"() {value = dense<[0, 2, 1, 3]> : tensor<4xi32>} : () -> tensor<4xi32>
%567 = "tosa.transpose"(%565, %566) : (tensor<1x8x15x64xf32>, tensor<4xi32>) -> tensor<1x15x8x64xf32>
%568 = "tosa.cast"(%567) : (tensor<1x15x8x64xf32>) -> tensor<1x15x8x64xf32>
%569 = "tosa.reshape"(%568) {new_shape = array<i64: 1, 15, 512>} : (tensor<1x15x8x64xf32>) -> tensor<1x15x512xf32>
%570 = "tosa.const"() {value = dense<[1, 0]> : tensor<2xi32>} : () -> tensor<2xi32>
%571 = "tosa.transpose"(%12, %570) : (tensor<512x512xf32>, tensor<2xi32>) -> tensor<512x512xf32>
%572 = "tosa.reshape"(%569) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%573 = "tosa.reshape"(%572) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%574 = "tosa.reshape"(%571) {new_shape = array<i64: 1, 512, 512>} : (tensor<512x512xf32>) -> tensor<1x512x512xf32>
%575 = "tosa.matmul"(%573, %574) : (tensor<1x15x512xf32>, tensor<1x512x512xf32>) -> tensor<1x15x512xf32>
%576 = "tosa.reshape"(%575) {new_shape = array<i64: 15, 512>} : (tensor<1x15x512xf32>) -> tensor<15x512xf32>
%cast_29 = tensor.cast %576 : tensor<15x512xf32> to tensor<15x512xf32>
%577 = "tosa.reshape"(%cast_29) {new_shape = array<i64: 1, 15, 512>} : (tensor<15x512xf32>) -> tensor<1x15x512xf32>
%578 = "tosa.const"() {value = dense<1.000000e+00> : tensor} : () -> tensor
%579 = "tosa.mul"(%577, %578) {shift = 0 : i32} : (tensor<1x15x512xf32>, tensor) -> tensor<1x15x512xf32>
%580 = "tosa.add"(%496, %579) : (tensor<1x15x512xf32>, tensor<1x15x512xf32>) -> tensor<1x15x512xf32>
%581 = "tosa.const"() {value = dense<2.000000e+00> : tensor} : () -> tensor