Skip to content

Instantly share code, notes, and snippets.

@AmosLewis
Created September 26, 2022 23:23
Show Gist options
  • Save AmosLewis/e71c0d1b621c34ce9e307c0dc86e2b81 to your computer and use it in GitHub Desktop.
Save AmosLewis/e71c0d1b621c34ce9e307c0dc86e2b81 to your computer and use it in GitHub Desktop.
gpttosatmp
#loc0 = loc(unknown)
module attributes {torch.debug_module_name = "_lambda"} {
func.func @forward(%arg0: !torch.vtensor<[1,5],si64> loc(unknown)) -> !torch.vtensor<[1,5,50257],f32> {
%int5 = torch.constant.int 5 loc(#loc1)
%int1 = torch.constant.int 1 loc(#loc2)
%true = torch.constant.bool true loc(#loc3)
%float0.000000e00 = torch.constant.float 0.000000e+00 loc(#loc4)
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc0)
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc0)
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc0)
%138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc0)
%141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%142 = torch.vtensor.literal(dense<-3.40282347E+38> : tensor<f32>) : !torch.vtensor<[],f32> loc(#loc0)
%143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1x1x1024x1024xui8>) : !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc0)
%144 = torch.vtensor.literal(dense<8.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> loc(#loc0)
%145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x2304xf32>) : !torch.vtensor<[768,2304],f32> loc(#loc0)
%146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2304xf32>) : !torch.vtensor<[2304],f32> loc(#loc0)
%147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc0)
%149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x768xf32>) : !torch.vtensor<[1024,768],f32> loc(#loc0)
%150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<50257x768xf32>) : !torch.vtensor<[50257,768],f32> loc(#loc0)
%false = torch.constant.bool false loc(#loc5)
%none = torch.constant.none loc(#loc0)
%int-2 = torch.constant.int -2 loc(#loc6)
%int11 = torch.constant.int 11 loc(#loc7)
%int4 = torch.constant.int 4 loc(#loc8)
%int-1 = torch.constant.int -1 loc(#loc9)
%int0 = torch.constant.int 0 loc(#loc10)
%int768 = torch.constant.int 768 loc(#loc11)
%float1.000000e-05 = torch.constant.float 1.000000e-05 loc(#loc12)
%int2 = torch.constant.int 2 loc(#loc13)
%int2304 = torch.constant.int 2304 loc(#loc14)
%int1536 = torch.constant.int 1536 loc(#loc15)
%int12 = torch.constant.int 12 loc(#loc16)
%int64 = torch.constant.int 64 loc(#loc17)
%int3 = torch.constant.int 3 loc(#loc18)
%int9223372036854775807 = torch.constant.int 9223372036854775807 loc(#loc19)
%int3072 = torch.constant.int 3072 loc(#loc20)
%float5.000000e-01 = torch.constant.float 5.000000e-01 loc(#loc21)
%float3.000000e00 = torch.constant.float 3.000000e+00 loc(#loc22)
%float4.471500e-02 = torch.constant.float 4.471500e-02 loc(#loc23)
%float7.978850e-01 = torch.constant.float 0.79788456080286541 loc(#loc24)
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc25)
%int50257 = torch.constant.int 50257 loc(#loc26)
%cpu = torch.constant.device "cpu" loc(#loc0)
%151 = torch.prim.ListConstruct %int-1, %int5 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0)
%152 = torch.aten.view %arg0, %151 : !torch.vtensor<[1,5],si64>, !torch.list<int> -> !torch.vtensor<[1,5],si64> loc(#loc27)
%153 = torch.aten.arange.start_step %int0, %int5, %int1, %int4, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[5],si64> loc(#loc28)
%154 = torch.aten.unsqueeze %153, %int0 : !torch.vtensor<[5],si64>, !torch.int -> !torch.vtensor<[1,5],si64> loc(#loc29)
%155 = torch.aten.view %154, %151 : !torch.vtensor<[1,5],si64>, !torch.list<int> -> !torch.vtensor<[1,5],si64> loc(#loc30)
%156 = torch.aten.embedding %150, %152, %int-1, %false, %false : !torch.vtensor<[50257,768],f32>, !torch.vtensor<[1,5],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,5,768],f32> loc(#loc31)
%157 = torch.aten.embedding %149, %155, %int-1, %false, %false : !torch.vtensor<[1024,768],f32>, !torch.vtensor<[1,5],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,5,768],f32> loc(#loc32)
%158 = torch.aten.add.Tensor %156, %157, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc33)
%159 = torch.prim.ListConstruct %int768 : (!torch.int) -> !torch.list<int> loc(#loc0)
%result0, %result1, %result2 = torch.aten.native_layer_norm %158, %159, %148, %147, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc34)
%160 = torch.prim.ListConstruct %int-1, %int768 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0)
%161 = torch.aten.view %result0, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc35)
%162 = torch.aten.mm %161, %145 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc36)
%163 = torch.aten.mul.Scalar %146, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc36)
%164 = torch.aten.add.Tensor %163, %162, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc36)
%165 = torch.prim.ListConstruct %int1, %int5, %int2304 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc37)
%166 = torch.aten.view %164, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc38)
%167 = torch.aten.slice.Tensor %166, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc39)
%168 = torch.aten.slice.Tensor %166, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc40)
%169 = torch.aten.slice.Tensor %166, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc41)
%170 = torch.prim.ListConstruct %int1, %int5, %int12, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc42)
%171 = torch.aten.view %167, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc43)
%172 = torch.prim.ListConstruct %int0, %int2, %int1, %int3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc0)
%173 = torch.aten.permute %171, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc44)
%174 = torch.aten.view %168, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc45)
%175 = torch.aten.permute %174, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc46)
%176 = torch.aten.view %169, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc47)
%177 = torch.aten.permute %176, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc48)
%178 = torch.aten.transpose.int %175, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc49)
%179 = torch.prim.ListConstruct %int1, %int12, %int5, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc50)
%180 = torch.aten.broadcast_to %173, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc51)
%181 = torch.prim.ListConstruct %int12, %int5, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc52)
%182 = torch.aten.view %180, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc53)
%183 = torch.prim.ListConstruct %int1, %int12, %int64, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc54)
%184 = torch.aten.broadcast_to %178, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc55)
%185 = torch.prim.ListConstruct %int12, %int64, %int5 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc56)
%186 = torch.aten.view %184, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc57)
%187 = torch.aten.bmm %182, %186 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc58)
%188 = torch.prim.ListConstruct %int1, %int12, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc59)
%189 = torch.aten.view %187, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc60)
%190 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc61)
%191 = torch.aten.div.Tensor %189, %190 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc62)
%192 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc63)
%193 = torch.aten.slice.Tensor %192, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc64)
%194 = torch.aten.slice.Tensor %193, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc65)
%195 = torch.aten.slice.Tensor %194, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc66)
%196 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc4)
%197 = torch.aten.to.dtype %196, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc4)
%198 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc4)
%199 = torch.aten.broadcast_to %197, %198 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc4)
%200 = torch.valsem.aten.copy %199, %195, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc4)
%201 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc67)
%202 = torch.aten.where.self %200, %191, %201 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc68)
%values, %indices = torch.aten.max.dim %202, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc3)
%203 = torch.aten.sub.Tensor %202, %values, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc3)
%204 = torch.aten.exp %203 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc3)
%205 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc3)
%206 = torch.aten.sum.dim_IntList %204, %205, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc3)
%207 = torch.aten.div.Tensor %204, %206 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc3)
%208 = torch.aten.broadcast_to %207, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc69)
%209 = torch.prim.ListConstruct %int12, %int5, %int5 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc70)
%210 = torch.aten.view %208, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc71)
%211 = torch.aten.broadcast_to %177, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc72)
%212 = torch.aten.view %211, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc73)
%213 = torch.aten.bmm %210, %212 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc74)
%214 = torch.aten.view %213, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc75)
%215 = torch.aten.permute %214, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc76)
%216 = torch.aten.clone %215, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc77)
%217 = torch.prim.ListConstruct %int1, %int5, %int768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc78)
%218 = torch.aten.view %216, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc79)
%219 = torch.aten.view %218, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc80)
%220 = torch.aten.mm %219, %140 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc81)
%221 = torch.aten.mul.Scalar %141, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc81)
%222 = torch.aten.add.Tensor %221, %220, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc81)
%223 = torch.aten.view %222, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc82)
%224 = torch.aten.add.Tensor %223, %158, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc83)
%result0_0, %result1_1, %result2_2 = torch.aten.native_layer_norm %224, %159, %139, %138, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc84)
%225 = torch.aten.view %result0_0, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc85)
%226 = torch.aten.mm %225, %136 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc86)
%227 = torch.aten.mul.Scalar %137, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc86)
%228 = torch.aten.add.Tensor %227, %226, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc86)
%229 = torch.prim.ListConstruct %int1, %int5, %int3072 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc87)
%230 = torch.aten.view %228, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc88)
%231 = torch.aten.mul.Scalar %230, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc89)
%232 = torch.aten.pow.Tensor_Scalar %230, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc90)
%233 = torch.aten.mul.Scalar %232, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc91)
%234 = torch.aten.add.Tensor %230, %233, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc92)
%235 = torch.aten.mul.Scalar %234, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc93)
%236 = torch.aten.tanh %235 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc94)
%237 = torch.aten.add.Scalar %236, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc95)
%238 = torch.aten.mul.Tensor %231, %237 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc96)
%239 = torch.prim.ListConstruct %int-1, %int3072 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0)
%240 = torch.aten.view %238, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc97)
%241 = torch.aten.mm %240, %134 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc98)
%242 = torch.aten.mul.Scalar %135, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc98)
%243 = torch.aten.add.Tensor %242, %241, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc98)
%244 = torch.aten.view %243, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc99)
%245 = torch.aten.add.Tensor %224, %244, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc100)
%result0_3, %result1_4, %result2_5 = torch.aten.native_layer_norm %245, %159, %133, %132, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc101)
%246 = torch.aten.view %result0_3, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc102)
%247 = torch.aten.mm %246, %130 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc103)
%248 = torch.aten.mul.Scalar %131, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc103)
%249 = torch.aten.add.Tensor %248, %247, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc103)
%250 = torch.aten.view %249, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc104)
%251 = torch.aten.slice.Tensor %250, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc105)
%252 = torch.aten.slice.Tensor %250, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc106)
%253 = torch.aten.slice.Tensor %250, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc107)
%254 = torch.aten.view %251, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc108)
%255 = torch.aten.permute %254, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc109)
%256 = torch.aten.view %252, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc110)
%257 = torch.aten.permute %256, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc111)
%258 = torch.aten.view %253, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc112)
%259 = torch.aten.permute %258, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc113)
%260 = torch.aten.transpose.int %257, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc114)
%261 = torch.aten.broadcast_to %255, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc115)
%262 = torch.aten.view %261, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc116)
%263 = torch.aten.broadcast_to %260, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc117)
%264 = torch.aten.view %263, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc118)
%265 = torch.aten.bmm %262, %264 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc119)
%266 = torch.aten.view %265, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc120)
%267 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc121)
%268 = torch.aten.div.Tensor %266, %267 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc122)
%269 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc123)
%270 = torch.aten.slice.Tensor %269, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc124)
%271 = torch.aten.slice.Tensor %270, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc125)
%272 = torch.aten.slice.Tensor %271, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc126)
%273 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc127)
%274 = torch.aten.to.dtype %273, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc127)
%275 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc127)
%276 = torch.aten.broadcast_to %274, %275 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc127)
%277 = torch.valsem.aten.copy %276, %272, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc127)
%278 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc128)
%279 = torch.aten.where.self %277, %268, %278 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc129)
%values_6, %indices_7 = torch.aten.max.dim %279, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc130)
%280 = torch.aten.sub.Tensor %279, %values_6, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc130)
%281 = torch.aten.exp %280 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc130)
%282 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc130)
%283 = torch.aten.sum.dim_IntList %281, %282, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc130)
%284 = torch.aten.div.Tensor %281, %283 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc130)
%285 = torch.aten.broadcast_to %284, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc131)
%286 = torch.aten.view %285, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc132)
%287 = torch.aten.broadcast_to %259, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc133)
%288 = torch.aten.view %287, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc134)
%289 = torch.aten.bmm %286, %288 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc135)
%290 = torch.aten.view %289, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc136)
%291 = torch.aten.permute %290, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc137)
%292 = torch.aten.clone %291, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc138)
%293 = torch.aten.view %292, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc139)
%294 = torch.aten.view %293, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc140)
%295 = torch.aten.mm %294, %128 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc141)
%296 = torch.aten.mul.Scalar %129, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc141)
%297 = torch.aten.add.Tensor %296, %295, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc141)
%298 = torch.aten.view %297, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc142)
%299 = torch.aten.add.Tensor %298, %245, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc143)
%result0_8, %result1_9, %result2_10 = torch.aten.native_layer_norm %299, %159, %127, %126, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc144)
%300 = torch.aten.view %result0_8, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc145)
%301 = torch.aten.mm %300, %124 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc146)
%302 = torch.aten.mul.Scalar %125, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc146)
%303 = torch.aten.add.Tensor %302, %301, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc146)
%304 = torch.aten.view %303, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc147)
%305 = torch.aten.mul.Scalar %304, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc148)
%306 = torch.aten.pow.Tensor_Scalar %304, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc149)
%307 = torch.aten.mul.Scalar %306, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc150)
%308 = torch.aten.add.Tensor %304, %307, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc151)
%309 = torch.aten.mul.Scalar %308, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc152)
%310 = torch.aten.tanh %309 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc153)
%311 = torch.aten.add.Scalar %310, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc154)
%312 = torch.aten.mul.Tensor %305, %311 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc155)
%313 = torch.aten.view %312, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc156)
%314 = torch.aten.mm %313, %122 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc157)
%315 = torch.aten.mul.Scalar %123, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc157)
%316 = torch.aten.add.Tensor %315, %314, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc157)
%317 = torch.aten.view %316, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc158)
%318 = torch.aten.add.Tensor %299, %317, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc159)
%result0_11, %result1_12, %result2_13 = torch.aten.native_layer_norm %318, %159, %121, %120, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc160)
%319 = torch.aten.view %result0_11, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc161)
%320 = torch.aten.mm %319, %118 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc162)
%321 = torch.aten.mul.Scalar %119, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc162)
%322 = torch.aten.add.Tensor %321, %320, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc162)
%323 = torch.aten.view %322, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc163)
%324 = torch.aten.slice.Tensor %323, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc164)
%325 = torch.aten.slice.Tensor %323, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc165)
%326 = torch.aten.slice.Tensor %323, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc166)
%327 = torch.aten.view %324, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc167)
%328 = torch.aten.permute %327, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc168)
%329 = torch.aten.view %325, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc169)
%330 = torch.aten.permute %329, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc170)
%331 = torch.aten.view %326, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc171)
%332 = torch.aten.permute %331, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc172)
%333 = torch.aten.transpose.int %330, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc173)
%334 = torch.aten.broadcast_to %328, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc174)
%335 = torch.aten.view %334, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc175)
%336 = torch.aten.broadcast_to %333, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc176)
%337 = torch.aten.view %336, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc177)
%338 = torch.aten.bmm %335, %337 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc178)
%339 = torch.aten.view %338, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc179)
%340 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc180)
%341 = torch.aten.div.Tensor %339, %340 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc181)
%342 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc182)
%343 = torch.aten.slice.Tensor %342, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc183)
%344 = torch.aten.slice.Tensor %343, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc184)
%345 = torch.aten.slice.Tensor %344, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc185)
%346 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc186)
%347 = torch.aten.to.dtype %346, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc186)
%348 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc186)
%349 = torch.aten.broadcast_to %347, %348 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc186)
%350 = torch.valsem.aten.copy %349, %345, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc186)
%351 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc187)
%352 = torch.aten.where.self %350, %341, %351 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc188)
%values_14, %indices_15 = torch.aten.max.dim %352, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc189)
%353 = torch.aten.sub.Tensor %352, %values_14, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc189)
%354 = torch.aten.exp %353 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc189)
%355 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc189)
%356 = torch.aten.sum.dim_IntList %354, %355, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc189)
%357 = torch.aten.div.Tensor %354, %356 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc189)
%358 = torch.aten.broadcast_to %357, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc190)
%359 = torch.aten.view %358, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc191)
%360 = torch.aten.broadcast_to %332, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc192)
%361 = torch.aten.view %360, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc193)
%362 = torch.aten.bmm %359, %361 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc194)
%363 = torch.aten.view %362, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc195)
%364 = torch.aten.permute %363, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc196)
%365 = torch.aten.clone %364, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc197)
%366 = torch.aten.view %365, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc198)
%367 = torch.aten.view %366, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc199)
%368 = torch.aten.mm %367, %116 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc200)
%369 = torch.aten.mul.Scalar %117, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc200)
%370 = torch.aten.add.Tensor %369, %368, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc200)
%371 = torch.aten.view %370, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc201)
%372 = torch.aten.add.Tensor %371, %318, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc202)
%result0_16, %result1_17, %result2_18 = torch.aten.native_layer_norm %372, %159, %115, %114, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc203)
%373 = torch.aten.view %result0_16, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc204)
%374 = torch.aten.mm %373, %112 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc205)
%375 = torch.aten.mul.Scalar %113, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc205)
%376 = torch.aten.add.Tensor %375, %374, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc205)
%377 = torch.aten.view %376, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc206)
%378 = torch.aten.mul.Scalar %377, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc207)
%379 = torch.aten.pow.Tensor_Scalar %377, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc208)
%380 = torch.aten.mul.Scalar %379, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc209)
%381 = torch.aten.add.Tensor %377, %380, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc210)
%382 = torch.aten.mul.Scalar %381, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc211)
%383 = torch.aten.tanh %382 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc212)
%384 = torch.aten.add.Scalar %383, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc213)
%385 = torch.aten.mul.Tensor %378, %384 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc214)
%386 = torch.aten.view %385, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc215)
%387 = torch.aten.mm %386, %110 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc216)
%388 = torch.aten.mul.Scalar %111, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc216)
%389 = torch.aten.add.Tensor %388, %387, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc216)
%390 = torch.aten.view %389, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc217)
%391 = torch.aten.add.Tensor %372, %390, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc218)
%result0_19, %result1_20, %result2_21 = torch.aten.native_layer_norm %391, %159, %109, %108, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc219)
%392 = torch.aten.view %result0_19, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc220)
%393 = torch.aten.mm %392, %106 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc221)
%394 = torch.aten.mul.Scalar %107, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc221)
%395 = torch.aten.add.Tensor %394, %393, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc221)
%396 = torch.aten.view %395, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc222)
%397 = torch.aten.slice.Tensor %396, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc223)
%398 = torch.aten.slice.Tensor %396, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc224)
%399 = torch.aten.slice.Tensor %396, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc225)
%400 = torch.aten.view %397, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc226)
%401 = torch.aten.permute %400, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc227)
%402 = torch.aten.view %398, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc228)
%403 = torch.aten.permute %402, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc229)
%404 = torch.aten.view %399, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc230)
%405 = torch.aten.permute %404, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc231)
%406 = torch.aten.transpose.int %403, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc232)
%407 = torch.aten.broadcast_to %401, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc233)
%408 = torch.aten.view %407, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc234)
%409 = torch.aten.broadcast_to %406, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc235)
%410 = torch.aten.view %409, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc236)
%411 = torch.aten.bmm %408, %410 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc237)
%412 = torch.aten.view %411, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc238)
%413 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc239)
%414 = torch.aten.div.Tensor %412, %413 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc240)
%415 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc241)
%416 = torch.aten.slice.Tensor %415, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc242)
%417 = torch.aten.slice.Tensor %416, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc243)
%418 = torch.aten.slice.Tensor %417, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc244)
%419 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc245)
%420 = torch.aten.to.dtype %419, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc245)
%421 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc245)
%422 = torch.aten.broadcast_to %420, %421 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc245)
%423 = torch.valsem.aten.copy %422, %418, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc245)
%424 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc246)
%425 = torch.aten.where.self %423, %414, %424 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc247)
%values_22, %indices_23 = torch.aten.max.dim %425, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc248)
%426 = torch.aten.sub.Tensor %425, %values_22, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc248)
%427 = torch.aten.exp %426 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc248)
%428 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc248)
%429 = torch.aten.sum.dim_IntList %427, %428, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc248)
%430 = torch.aten.div.Tensor %427, %429 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc248)
%431 = torch.aten.broadcast_to %430, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc249)
%432 = torch.aten.view %431, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc250)
%433 = torch.aten.broadcast_to %405, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc251)
%434 = torch.aten.view %433, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc252)
%435 = torch.aten.bmm %432, %434 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc253)
%436 = torch.aten.view %435, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc254)
%437 = torch.aten.permute %436, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc255)
%438 = torch.aten.clone %437, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc256)
%439 = torch.aten.view %438, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc257)
%440 = torch.aten.view %439, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc258)
%441 = torch.aten.mm %440, %104 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc259)
%442 = torch.aten.mul.Scalar %105, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc259)
%443 = torch.aten.add.Tensor %442, %441, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc259)
%444 = torch.aten.view %443, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc260)
%445 = torch.aten.add.Tensor %444, %391, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc261)
%result0_24, %result1_25, %result2_26 = torch.aten.native_layer_norm %445, %159, %103, %102, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc262)
%446 = torch.aten.view %result0_24, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc263)
%447 = torch.aten.mm %446, %100 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc264)
%448 = torch.aten.mul.Scalar %101, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc264)
%449 = torch.aten.add.Tensor %448, %447, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc264)
%450 = torch.aten.view %449, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc265)
%451 = torch.aten.mul.Scalar %450, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc266)
%452 = torch.aten.pow.Tensor_Scalar %450, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc267)
%453 = torch.aten.mul.Scalar %452, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc268)
%454 = torch.aten.add.Tensor %450, %453, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc269)
%455 = torch.aten.mul.Scalar %454, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc270)
%456 = torch.aten.tanh %455 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc271)
%457 = torch.aten.add.Scalar %456, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc272)
%458 = torch.aten.mul.Tensor %451, %457 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc273)
%459 = torch.aten.view %458, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc274)
%460 = torch.aten.mm %459, %98 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc275)
%461 = torch.aten.mul.Scalar %99, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc275)
%462 = torch.aten.add.Tensor %461, %460, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc275)
%463 = torch.aten.view %462, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc276)
%464 = torch.aten.add.Tensor %445, %463, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc277)
%result0_27, %result1_28, %result2_29 = torch.aten.native_layer_norm %464, %159, %97, %96, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc278)
%465 = torch.aten.view %result0_27, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc279)
%466 = torch.aten.mm %465, %94 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc280)
%467 = torch.aten.mul.Scalar %95, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc280)
%468 = torch.aten.add.Tensor %467, %466, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc280)
%469 = torch.aten.view %468, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc281)
%470 = torch.aten.slice.Tensor %469, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc282)
%471 = torch.aten.slice.Tensor %469, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc283)
%472 = torch.aten.slice.Tensor %469, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc284)
%473 = torch.aten.view %470, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc285)
%474 = torch.aten.permute %473, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc286)
%475 = torch.aten.view %471, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc287)
%476 = torch.aten.permute %475, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc288)
%477 = torch.aten.view %472, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc289)
%478 = torch.aten.permute %477, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc290)
%479 = torch.aten.transpose.int %476, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc291)
%480 = torch.aten.broadcast_to %474, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc292)
%481 = torch.aten.view %480, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc293)
%482 = torch.aten.broadcast_to %479, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc294)
%483 = torch.aten.view %482, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc295)
%484 = torch.aten.bmm %481, %483 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc296)
%485 = torch.aten.view %484, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc297)
%486 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc298)
%487 = torch.aten.div.Tensor %485, %486 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc299)
%488 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc300)
%489 = torch.aten.slice.Tensor %488, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc301)
%490 = torch.aten.slice.Tensor %489, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc302)
%491 = torch.aten.slice.Tensor %490, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc303)
%492 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc304)
%493 = torch.aten.to.dtype %492, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc304)
%494 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc304)
%495 = torch.aten.broadcast_to %493, %494 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc304)
%496 = torch.valsem.aten.copy %495, %491, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc304)
%497 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc305)
%498 = torch.aten.where.self %496, %487, %497 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc306)
%values_30, %indices_31 = torch.aten.max.dim %498, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc307)
%499 = torch.aten.sub.Tensor %498, %values_30, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc307)
%500 = torch.aten.exp %499 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc307)
%501 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc307)
%502 = torch.aten.sum.dim_IntList %500, %501, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc307)
%503 = torch.aten.div.Tensor %500, %502 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc307)
%504 = torch.aten.broadcast_to %503, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc308)
%505 = torch.aten.view %504, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc309)
%506 = torch.aten.broadcast_to %478, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc310)
%507 = torch.aten.view %506, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc311)
%508 = torch.aten.bmm %505, %507 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc312)
%509 = torch.aten.view %508, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc313)
%510 = torch.aten.permute %509, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc314)
%511 = torch.aten.clone %510, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc315)
%512 = torch.aten.view %511, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc316)
%513 = torch.aten.view %512, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc317)
%514 = torch.aten.mm %513, %92 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc318)
%515 = torch.aten.mul.Scalar %93, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc318)
%516 = torch.aten.add.Tensor %515, %514, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc318)
%517 = torch.aten.view %516, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc319)
%518 = torch.aten.add.Tensor %517, %464, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc320)
%result0_32, %result1_33, %result2_34 = torch.aten.native_layer_norm %518, %159, %91, %90, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc321)
%519 = torch.aten.view %result0_32, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc322)
%520 = torch.aten.mm %519, %88 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc323)
%521 = torch.aten.mul.Scalar %89, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc323)
%522 = torch.aten.add.Tensor %521, %520, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc323)
%523 = torch.aten.view %522, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc324)
%524 = torch.aten.mul.Scalar %523, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc325)
%525 = torch.aten.pow.Tensor_Scalar %523, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc326)
%526 = torch.aten.mul.Scalar %525, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc327)
%527 = torch.aten.add.Tensor %523, %526, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc328)
%528 = torch.aten.mul.Scalar %527, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc329)
%529 = torch.aten.tanh %528 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc330)
%530 = torch.aten.add.Scalar %529, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc331)
%531 = torch.aten.mul.Tensor %524, %530 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc332)
%532 = torch.aten.view %531, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc333)
%533 = torch.aten.mm %532, %86 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc334)
%534 = torch.aten.mul.Scalar %87, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc334)
%535 = torch.aten.add.Tensor %534, %533, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc334)
%536 = torch.aten.view %535, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc335)
%537 = torch.aten.add.Tensor %518, %536, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc336)
%result0_35, %result1_36, %result2_37 = torch.aten.native_layer_norm %537, %159, %85, %84, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc337)
%538 = torch.aten.view %result0_35, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc338)
%539 = torch.aten.mm %538, %82 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc339)
%540 = torch.aten.mul.Scalar %83, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc339)
%541 = torch.aten.add.Tensor %540, %539, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc339)
%542 = torch.aten.view %541, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc340)
%543 = torch.aten.slice.Tensor %542, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc341)
%544 = torch.aten.slice.Tensor %542, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc342)
%545 = torch.aten.slice.Tensor %542, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc343)
%546 = torch.aten.view %543, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc344)
%547 = torch.aten.permute %546, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc345)
%548 = torch.aten.view %544, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc346)
%549 = torch.aten.permute %548, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc347)
%550 = torch.aten.view %545, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc348)
%551 = torch.aten.permute %550, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc349)
%552 = torch.aten.transpose.int %549, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc350)
%553 = torch.aten.broadcast_to %547, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc351)
%554 = torch.aten.view %553, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc352)
%555 = torch.aten.broadcast_to %552, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc353)
%556 = torch.aten.view %555, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc354)
%557 = torch.aten.bmm %554, %556 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc355)
%558 = torch.aten.view %557, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc356)
%559 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc357)
%560 = torch.aten.div.Tensor %558, %559 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc358)
%561 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc359)
%562 = torch.aten.slice.Tensor %561, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc360)
%563 = torch.aten.slice.Tensor %562, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc361)
%564 = torch.aten.slice.Tensor %563, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc362)
%565 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc363)
%566 = torch.aten.to.dtype %565, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc363)
%567 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc363)
%568 = torch.aten.broadcast_to %566, %567 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc363)
%569 = torch.valsem.aten.copy %568, %564, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc363)
%570 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc364)
%571 = torch.aten.where.self %569, %560, %570 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc365)
%values_38, %indices_39 = torch.aten.max.dim %571, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc366)
%572 = torch.aten.sub.Tensor %571, %values_38, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc366)
%573 = torch.aten.exp %572 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc366)
%574 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc366)
%575 = torch.aten.sum.dim_IntList %573, %574, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc366)
%576 = torch.aten.div.Tensor %573, %575 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc366)
%577 = torch.aten.broadcast_to %576, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc367)
%578 = torch.aten.view %577, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc368)
%579 = torch.aten.broadcast_to %551, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc369)
%580 = torch.aten.view %579, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc370)
%581 = torch.aten.bmm %578, %580 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc371)
%582 = torch.aten.view %581, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc372)
%583 = torch.aten.permute %582, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc373)
%584 = torch.aten.clone %583, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc374)
%585 = torch.aten.view %584, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc375)
%586 = torch.aten.view %585, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc376)
%587 = torch.aten.mm %586, %80 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc377)
%588 = torch.aten.mul.Scalar %81, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc377)
%589 = torch.aten.add.Tensor %588, %587, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc377)
%590 = torch.aten.view %589, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc378)
%591 = torch.aten.add.Tensor %590, %537, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc379)
%result0_40, %result1_41, %result2_42 = torch.aten.native_layer_norm %591, %159, %79, %78, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc380)
%592 = torch.aten.view %result0_40, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc381)
%593 = torch.aten.mm %592, %76 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc382)
%594 = torch.aten.mul.Scalar %77, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc382)
%595 = torch.aten.add.Tensor %594, %593, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc382)
%596 = torch.aten.view %595, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc383)
%597 = torch.aten.mul.Scalar %596, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc384)
%598 = torch.aten.pow.Tensor_Scalar %596, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc385)
%599 = torch.aten.mul.Scalar %598, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc386)
%600 = torch.aten.add.Tensor %596, %599, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc387)
%601 = torch.aten.mul.Scalar %600, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc388)
%602 = torch.aten.tanh %601 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc389)
%603 = torch.aten.add.Scalar %602, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc390)
%604 = torch.aten.mul.Tensor %597, %603 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc391)
%605 = torch.aten.view %604, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc392)
%606 = torch.aten.mm %605, %74 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc393)
%607 = torch.aten.mul.Scalar %75, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc393)
%608 = torch.aten.add.Tensor %607, %606, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc393)
%609 = torch.aten.view %608, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc394)
%610 = torch.aten.add.Tensor %591, %609, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc395)
%result0_43, %result1_44, %result2_45 = torch.aten.native_layer_norm %610, %159, %73, %72, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc396)
%611 = torch.aten.view %result0_43, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc397)
%612 = torch.aten.mm %611, %70 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc398)
%613 = torch.aten.mul.Scalar %71, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc398)
%614 = torch.aten.add.Tensor %613, %612, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc398)
%615 = torch.aten.view %614, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc399)
%616 = torch.aten.slice.Tensor %615, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc400)
%617 = torch.aten.slice.Tensor %615, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc401)
%618 = torch.aten.slice.Tensor %615, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc402)
%619 = torch.aten.view %616, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc403)
%620 = torch.aten.permute %619, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc404)
%621 = torch.aten.view %617, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc405)
%622 = torch.aten.permute %621, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc406)
%623 = torch.aten.view %618, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc407)
%624 = torch.aten.permute %623, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc408)
%625 = torch.aten.transpose.int %622, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc409)
%626 = torch.aten.broadcast_to %620, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc410)
%627 = torch.aten.view %626, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc411)
%628 = torch.aten.broadcast_to %625, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc412)
%629 = torch.aten.view %628, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc413)
%630 = torch.aten.bmm %627, %629 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc414)
%631 = torch.aten.view %630, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc415)
%632 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc416)
%633 = torch.aten.div.Tensor %631, %632 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc417)
%634 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc418)
%635 = torch.aten.slice.Tensor %634, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc419)
%636 = torch.aten.slice.Tensor %635, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc420)
%637 = torch.aten.slice.Tensor %636, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc421)
%638 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc422)
%639 = torch.aten.to.dtype %638, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc422)
%640 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc422)
%641 = torch.aten.broadcast_to %639, %640 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc422)
%642 = torch.valsem.aten.copy %641, %637, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc422)
%643 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc423)
%644 = torch.aten.where.self %642, %633, %643 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc424)
%values_46, %indices_47 = torch.aten.max.dim %644, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc425)
%645 = torch.aten.sub.Tensor %644, %values_46, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc425)
%646 = torch.aten.exp %645 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc425)
%647 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc425)
%648 = torch.aten.sum.dim_IntList %646, %647, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc425)
%649 = torch.aten.div.Tensor %646, %648 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc425)
%650 = torch.aten.broadcast_to %649, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc426)
%651 = torch.aten.view %650, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc427)
%652 = torch.aten.broadcast_to %624, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc428)
%653 = torch.aten.view %652, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc429)
%654 = torch.aten.bmm %651, %653 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc430)
%655 = torch.aten.view %654, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc431)
%656 = torch.aten.permute %655, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc432)
%657 = torch.aten.clone %656, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc433)
%658 = torch.aten.view %657, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc434)
%659 = torch.aten.view %658, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc435)
%660 = torch.aten.mm %659, %68 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc436)
%661 = torch.aten.mul.Scalar %69, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc436)
%662 = torch.aten.add.Tensor %661, %660, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc436)
%663 = torch.aten.view %662, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc437)
%664 = torch.aten.add.Tensor %663, %610, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc438)
%result0_48, %result1_49, %result2_50 = torch.aten.native_layer_norm %664, %159, %67, %66, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc439)
%665 = torch.aten.view %result0_48, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc440)
%666 = torch.aten.mm %665, %64 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc441)
%667 = torch.aten.mul.Scalar %65, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc441)
%668 = torch.aten.add.Tensor %667, %666, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc441)
%669 = torch.aten.view %668, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc442)
%670 = torch.aten.mul.Scalar %669, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc443)
%671 = torch.aten.pow.Tensor_Scalar %669, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc444)
%672 = torch.aten.mul.Scalar %671, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc445)
%673 = torch.aten.add.Tensor %669, %672, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc446)
%674 = torch.aten.mul.Scalar %673, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc447)
%675 = torch.aten.tanh %674 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc448)
%676 = torch.aten.add.Scalar %675, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc449)
%677 = torch.aten.mul.Tensor %670, %676 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc450)
%678 = torch.aten.view %677, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc451)
%679 = torch.aten.mm %678, %62 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc452)
%680 = torch.aten.mul.Scalar %63, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc452)
%681 = torch.aten.add.Tensor %680, %679, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc452)
%682 = torch.aten.view %681, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc453)
%683 = torch.aten.add.Tensor %664, %682, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc454)
%result0_51, %result1_52, %result2_53 = torch.aten.native_layer_norm %683, %159, %61, %60, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc455)
%684 = torch.aten.view %result0_51, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc456)
%685 = torch.aten.mm %684, %58 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc457)
%686 = torch.aten.mul.Scalar %59, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc457)
%687 = torch.aten.add.Tensor %686, %685, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc457)
%688 = torch.aten.view %687, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc458)
%689 = torch.aten.slice.Tensor %688, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc459)
%690 = torch.aten.slice.Tensor %688, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc460)
%691 = torch.aten.slice.Tensor %688, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc461)
%692 = torch.aten.view %689, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc462)
%693 = torch.aten.permute %692, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc463)
%694 = torch.aten.view %690, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc464)
%695 = torch.aten.permute %694, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc465)
%696 = torch.aten.view %691, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc466)
%697 = torch.aten.permute %696, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc467)
%698 = torch.aten.transpose.int %695, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc468)
%699 = torch.aten.broadcast_to %693, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc469)
%700 = torch.aten.view %699, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc470)
%701 = torch.aten.broadcast_to %698, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc471)
%702 = torch.aten.view %701, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc472)
%703 = torch.aten.bmm %700, %702 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc473)
%704 = torch.aten.view %703, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc474)
%705 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc475)
%706 = torch.aten.div.Tensor %704, %705 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc476)
%707 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc477)
%708 = torch.aten.slice.Tensor %707, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc478)
%709 = torch.aten.slice.Tensor %708, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc479)
%710 = torch.aten.slice.Tensor %709, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc480)
%711 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc481)
%712 = torch.aten.to.dtype %711, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc481)
%713 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc481)
%714 = torch.aten.broadcast_to %712, %713 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc481)
%715 = torch.valsem.aten.copy %714, %710, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc481)
%716 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc482)
%717 = torch.aten.where.self %715, %706, %716 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc483)
%values_54, %indices_55 = torch.aten.max.dim %717, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc484)
%718 = torch.aten.sub.Tensor %717, %values_54, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc484)
%719 = torch.aten.exp %718 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc484)
%720 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc484)
%721 = torch.aten.sum.dim_IntList %719, %720, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc484)
%722 = torch.aten.div.Tensor %719, %721 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc484)
%723 = torch.aten.broadcast_to %722, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc485)
%724 = torch.aten.view %723, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc486)
%725 = torch.aten.broadcast_to %697, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc487)
%726 = torch.aten.view %725, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc488)
%727 = torch.aten.bmm %724, %726 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc489)
%728 = torch.aten.view %727, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc490)
%729 = torch.aten.permute %728, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc491)
%730 = torch.aten.clone %729, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc492)
%731 = torch.aten.view %730, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc493)
%732 = torch.aten.view %731, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc494)
%733 = torch.aten.mm %732, %56 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc495)
%734 = torch.aten.mul.Scalar %57, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc495)
%735 = torch.aten.add.Tensor %734, %733, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc495)
%736 = torch.aten.view %735, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc496)
%737 = torch.aten.add.Tensor %736, %683, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc497)
%result0_56, %result1_57, %result2_58 = torch.aten.native_layer_norm %737, %159, %55, %54, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc498)
%738 = torch.aten.view %result0_56, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc499)
%739 = torch.aten.mm %738, %52 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc500)
%740 = torch.aten.mul.Scalar %53, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc500)
%741 = torch.aten.add.Tensor %740, %739, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc500)
%742 = torch.aten.view %741, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc501)
%743 = torch.aten.mul.Scalar %742, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc502)
%744 = torch.aten.pow.Tensor_Scalar %742, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc503)
%745 = torch.aten.mul.Scalar %744, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc504)
%746 = torch.aten.add.Tensor %742, %745, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc505)
%747 = torch.aten.mul.Scalar %746, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc506)
%748 = torch.aten.tanh %747 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc507)
%749 = torch.aten.add.Scalar %748, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc508)
%750 = torch.aten.mul.Tensor %743, %749 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc509)
%751 = torch.aten.view %750, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc510)
%752 = torch.aten.mm %751, %50 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc511)
%753 = torch.aten.mul.Scalar %51, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc511)
%754 = torch.aten.add.Tensor %753, %752, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc511)
%755 = torch.aten.view %754, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc512)
%756 = torch.aten.add.Tensor %737, %755, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc513)
%result0_59, %result1_60, %result2_61 = torch.aten.native_layer_norm %756, %159, %49, %48, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc514)
%757 = torch.aten.view %result0_59, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc515)
%758 = torch.aten.mm %757, %46 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc516)
%759 = torch.aten.mul.Scalar %47, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc516)
%760 = torch.aten.add.Tensor %759, %758, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc516)
%761 = torch.aten.view %760, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc517)
%762 = torch.aten.slice.Tensor %761, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc518)
%763 = torch.aten.slice.Tensor %761, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc519)
%764 = torch.aten.slice.Tensor %761, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc520)
%765 = torch.aten.view %762, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc521)
%766 = torch.aten.permute %765, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc522)
%767 = torch.aten.view %763, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc523)
%768 = torch.aten.permute %767, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc524)
%769 = torch.aten.view %764, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc525)
%770 = torch.aten.permute %769, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc526)
%771 = torch.aten.transpose.int %768, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc527)
%772 = torch.aten.broadcast_to %766, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc528)
%773 = torch.aten.view %772, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc529)
%774 = torch.aten.broadcast_to %771, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc530)
%775 = torch.aten.view %774, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc531)
%776 = torch.aten.bmm %773, %775 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc532)
%777 = torch.aten.view %776, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc533)
%778 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc534)
%779 = torch.aten.div.Tensor %777, %778 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc535)
%780 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc536)
%781 = torch.aten.slice.Tensor %780, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc537)
%782 = torch.aten.slice.Tensor %781, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc538)
%783 = torch.aten.slice.Tensor %782, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc539)
%784 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc540)
%785 = torch.aten.to.dtype %784, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc540)
%786 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc540)
%787 = torch.aten.broadcast_to %785, %786 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc540)
%788 = torch.valsem.aten.copy %787, %783, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc540)
%789 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc541)
%790 = torch.aten.where.self %788, %779, %789 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc542)
%values_62, %indices_63 = torch.aten.max.dim %790, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc543)
%791 = torch.aten.sub.Tensor %790, %values_62, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc543)
%792 = torch.aten.exp %791 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc543)
%793 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc543)
%794 = torch.aten.sum.dim_IntList %792, %793, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc543)
%795 = torch.aten.div.Tensor %792, %794 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc543)
%796 = torch.aten.broadcast_to %795, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc544)
%797 = torch.aten.view %796, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc545)
%798 = torch.aten.broadcast_to %770, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc546)
%799 = torch.aten.view %798, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc547)
%800 = torch.aten.bmm %797, %799 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc548)
%801 = torch.aten.view %800, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc549)
%802 = torch.aten.permute %801, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc550)
%803 = torch.aten.clone %802, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc551)
%804 = torch.aten.view %803, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc552)
%805 = torch.aten.view %804, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc553)
%806 = torch.aten.mm %805, %44 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc554)
%807 = torch.aten.mul.Scalar %45, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc554)
%808 = torch.aten.add.Tensor %807, %806, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc554)
%809 = torch.aten.view %808, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc555)
%810 = torch.aten.add.Tensor %809, %756, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc556)
%result0_64, %result1_65, %result2_66 = torch.aten.native_layer_norm %810, %159, %43, %42, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc557)
%811 = torch.aten.view %result0_64, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc558)
%812 = torch.aten.mm %811, %40 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc559)
%813 = torch.aten.mul.Scalar %41, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc559)
%814 = torch.aten.add.Tensor %813, %812, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc559)
%815 = torch.aten.view %814, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc560)
%816 = torch.aten.mul.Scalar %815, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc561)
%817 = torch.aten.pow.Tensor_Scalar %815, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc562)
%818 = torch.aten.mul.Scalar %817, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc563)
%819 = torch.aten.add.Tensor %815, %818, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc564)
%820 = torch.aten.mul.Scalar %819, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc565)
%821 = torch.aten.tanh %820 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc566)
%822 = torch.aten.add.Scalar %821, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc567)
%823 = torch.aten.mul.Tensor %816, %822 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc568)
%824 = torch.aten.view %823, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc569)
%825 = torch.aten.mm %824, %38 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc570)
%826 = torch.aten.mul.Scalar %39, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc570)
%827 = torch.aten.add.Tensor %826, %825, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc570)
%828 = torch.aten.view %827, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc571)
%829 = torch.aten.add.Tensor %810, %828, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc572)
%result0_67, %result1_68, %result2_69 = torch.aten.native_layer_norm %829, %159, %37, %36, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc573)
%830 = torch.aten.view %result0_67, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc574)
%831 = torch.aten.mm %830, %34 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc575)
%832 = torch.aten.mul.Scalar %35, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc575)
%833 = torch.aten.add.Tensor %832, %831, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc575)
%834 = torch.aten.view %833, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc576)
%835 = torch.aten.slice.Tensor %834, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc577)
%836 = torch.aten.slice.Tensor %834, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc578)
%837 = torch.aten.slice.Tensor %834, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc579)
%838 = torch.aten.view %835, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc580)
%839 = torch.aten.permute %838, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc581)
%840 = torch.aten.view %836, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc582)
%841 = torch.aten.permute %840, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc583)
%842 = torch.aten.view %837, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc584)
%843 = torch.aten.permute %842, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc585)
%844 = torch.aten.transpose.int %841, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc586)
%845 = torch.aten.broadcast_to %839, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc587)
%846 = torch.aten.view %845, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc588)
%847 = torch.aten.broadcast_to %844, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc589)
%848 = torch.aten.view %847, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc590)
%849 = torch.aten.bmm %846, %848 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc591)
%850 = torch.aten.view %849, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc592)
%851 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc593)
%852 = torch.aten.div.Tensor %850, %851 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc594)
%853 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc595)
%854 = torch.aten.slice.Tensor %853, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc596)
%855 = torch.aten.slice.Tensor %854, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc597)
%856 = torch.aten.slice.Tensor %855, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc598)
%857 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc599)
%858 = torch.aten.to.dtype %857, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc599)
%859 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc599)
%860 = torch.aten.broadcast_to %858, %859 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc599)
%861 = torch.valsem.aten.copy %860, %856, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc599)
%862 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc600)
%863 = torch.aten.where.self %861, %852, %862 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc601)
%values_70, %indices_71 = torch.aten.max.dim %863, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc602)
%864 = torch.aten.sub.Tensor %863, %values_70, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc602)
%865 = torch.aten.exp %864 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc602)
%866 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc602)
%867 = torch.aten.sum.dim_IntList %865, %866, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc602)
%868 = torch.aten.div.Tensor %865, %867 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc602)
%869 = torch.aten.broadcast_to %868, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc603)
%870 = torch.aten.view %869, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc604)
%871 = torch.aten.broadcast_to %843, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc605)
%872 = torch.aten.view %871, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc606)
%873 = torch.aten.bmm %870, %872 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc607)
%874 = torch.aten.view %873, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc608)
%875 = torch.aten.permute %874, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc609)
%876 = torch.aten.clone %875, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc610)
%877 = torch.aten.view %876, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc611)
%878 = torch.aten.view %877, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc612)
%879 = torch.aten.mm %878, %32 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc613)
%880 = torch.aten.mul.Scalar %33, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc613)
%881 = torch.aten.add.Tensor %880, %879, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc613)
%882 = torch.aten.view %881, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc614)
%883 = torch.aten.add.Tensor %882, %829, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc615)
%result0_72, %result1_73, %result2_74 = torch.aten.native_layer_norm %883, %159, %31, %30, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc616)
%884 = torch.aten.view %result0_72, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc617)
%885 = torch.aten.mm %884, %28 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc618)
%886 = torch.aten.mul.Scalar %29, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc618)
%887 = torch.aten.add.Tensor %886, %885, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc618)
%888 = torch.aten.view %887, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc619)
%889 = torch.aten.mul.Scalar %888, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc620)
%890 = torch.aten.pow.Tensor_Scalar %888, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc621)
%891 = torch.aten.mul.Scalar %890, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc622)
%892 = torch.aten.add.Tensor %888, %891, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc623)
%893 = torch.aten.mul.Scalar %892, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc624)
%894 = torch.aten.tanh %893 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc625)
%895 = torch.aten.add.Scalar %894, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc626)
%896 = torch.aten.mul.Tensor %889, %895 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc627)
%897 = torch.aten.view %896, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc628)
%898 = torch.aten.mm %897, %26 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc629)
%899 = torch.aten.mul.Scalar %27, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc629)
%900 = torch.aten.add.Tensor %899, %898, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc629)
%901 = torch.aten.view %900, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc630)
%902 = torch.aten.add.Tensor %883, %901, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc631)
%result0_75, %result1_76, %result2_77 = torch.aten.native_layer_norm %902, %159, %25, %24, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc632)
%903 = torch.aten.view %result0_75, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc633)
%904 = torch.aten.mm %903, %22 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc634)
%905 = torch.aten.mul.Scalar %23, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc634)
%906 = torch.aten.add.Tensor %905, %904, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc634)
%907 = torch.aten.view %906, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc635)
%908 = torch.aten.slice.Tensor %907, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc636)
%909 = torch.aten.slice.Tensor %907, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc637)
%910 = torch.aten.slice.Tensor %907, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc638)
%911 = torch.aten.view %908, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc639)
%912 = torch.aten.permute %911, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc640)
%913 = torch.aten.view %909, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc641)
%914 = torch.aten.permute %913, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc642)
%915 = torch.aten.view %910, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc643)
%916 = torch.aten.permute %915, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc644)
%917 = torch.aten.transpose.int %914, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc645)
%918 = torch.aten.broadcast_to %912, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc646)
%919 = torch.aten.view %918, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc647)
%920 = torch.aten.broadcast_to %917, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc648)
%921 = torch.aten.view %920, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc649)
%922 = torch.aten.bmm %919, %921 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc650)
%923 = torch.aten.view %922, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc651)
%924 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc652)
%925 = torch.aten.div.Tensor %923, %924 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc653)
%926 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc654)
%927 = torch.aten.slice.Tensor %926, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc655)
%928 = torch.aten.slice.Tensor %927, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc656)
%929 = torch.aten.slice.Tensor %928, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc657)
%930 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc658)
%931 = torch.aten.to.dtype %930, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc658)
%932 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc658)
%933 = torch.aten.broadcast_to %931, %932 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc658)
%934 = torch.valsem.aten.copy %933, %929, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc658)
%935 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc659)
%936 = torch.aten.where.self %934, %925, %935 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc660)
%values_78, %indices_79 = torch.aten.max.dim %936, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc661)
%937 = torch.aten.sub.Tensor %936, %values_78, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc661)
%938 = torch.aten.exp %937 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc661)
%939 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc661)
%940 = torch.aten.sum.dim_IntList %938, %939, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc661)
%941 = torch.aten.div.Tensor %938, %940 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc661)
%942 = torch.aten.broadcast_to %941, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc662)
%943 = torch.aten.view %942, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc663)
%944 = torch.aten.broadcast_to %916, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc664)
%945 = torch.aten.view %944, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc665)
%946 = torch.aten.bmm %943, %945 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc666)
%947 = torch.aten.view %946, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc667)
%948 = torch.aten.permute %947, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc668)
%949 = torch.aten.clone %948, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc669)
%950 = torch.aten.view %949, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc670)
%951 = torch.aten.view %950, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc671)
%952 = torch.aten.mm %951, %20 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc672)
%953 = torch.aten.mul.Scalar %21, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc672)
%954 = torch.aten.add.Tensor %953, %952, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc672)
%955 = torch.aten.view %954, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc673)
%956 = torch.aten.add.Tensor %955, %902, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc674)
%result0_80, %result1_81, %result2_82 = torch.aten.native_layer_norm %956, %159, %19, %18, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc675)
%957 = torch.aten.view %result0_80, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc676)
%958 = torch.aten.mm %957, %16 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc677)
%959 = torch.aten.mul.Scalar %17, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc677)
%960 = torch.aten.add.Tensor %959, %958, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc677)
%961 = torch.aten.view %960, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc678)
%962 = torch.aten.mul.Scalar %961, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc679)
%963 = torch.aten.pow.Tensor_Scalar %961, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc680)
%964 = torch.aten.mul.Scalar %963, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc681)
%965 = torch.aten.add.Tensor %961, %964, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc682)
%966 = torch.aten.mul.Scalar %965, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc683)
%967 = torch.aten.tanh %966 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc684)
%968 = torch.aten.add.Scalar %967, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc685)
%969 = torch.aten.mul.Tensor %962, %968 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc686)
%970 = torch.aten.view %969, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc687)
%971 = torch.aten.mm %970, %14 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc688)
%972 = torch.aten.mul.Scalar %15, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc688)
%973 = torch.aten.add.Tensor %972, %971, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc688)
%974 = torch.aten.view %973, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc689)
%975 = torch.aten.add.Tensor %956, %974, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc690)
%result0_83, %result1_84, %result2_85 = torch.aten.native_layer_norm %975, %159, %13, %12, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc691)
%976 = torch.aten.view %result0_83, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc692)
%977 = torch.aten.mm %976, %10 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,2304],f32> -> !torch.vtensor<[5,2304],f32> loc(#loc693)
%978 = torch.aten.mul.Scalar %11, %int1 : !torch.vtensor<[2304],f32>, !torch.int -> !torch.vtensor<[2304],f32> loc(#loc693)
%979 = torch.aten.add.Tensor %978, %977, %int1 : !torch.vtensor<[2304],f32>, !torch.vtensor<[5,2304],f32>, !torch.int -> !torch.vtensor<[5,2304],f32> loc(#loc693)
%980 = torch.aten.view %979, %165 : !torch.vtensor<[5,2304],f32>, !torch.list<int> -> !torch.vtensor<[1,5,2304],f32> loc(#loc694)
%981 = torch.aten.slice.Tensor %980, %int2, %int0, %int768, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc695)
%982 = torch.aten.slice.Tensor %980, %int2, %int768, %int1536, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc696)
%983 = torch.aten.slice.Tensor %980, %int2, %int1536, %int2304, %int1 : !torch.vtensor<[1,5,2304],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc697)
%984 = torch.aten.view %981, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc698)
%985 = torch.aten.permute %984, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc699)
%986 = torch.aten.view %982, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc700)
%987 = torch.aten.permute %986, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc701)
%988 = torch.aten.view %983, %170 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc702)
%989 = torch.aten.permute %988, %172 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc703)
%990 = torch.aten.transpose.int %987, %int-1, %int-2 : !torch.vtensor<[1,12,5,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,64,5],f32> loc(#loc704)
%991 = torch.aten.broadcast_to %985, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc705)
%992 = torch.aten.view %991, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc706)
%993 = torch.aten.broadcast_to %990, %183 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,64,5],f32> loc(#loc707)
%994 = torch.aten.view %993, %185 : !torch.vtensor<[1,12,64,5],f32>, !torch.list<int> -> !torch.vtensor<[12,64,5],f32> loc(#loc708)
%995 = torch.aten.bmm %992, %994 : !torch.vtensor<[12,5,64],f32>, !torch.vtensor<[12,64,5],f32> -> !torch.vtensor<[12,5,5],f32> loc(#loc709)
%996 = torch.aten.view %995, %188 : !torch.vtensor<[12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc710)
%997 = torch.aten.clone %144, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc711)
%998 = torch.aten.div.Tensor %996, %997 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc712)
%999 = torch.aten.slice.Tensor %143, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc713)
%1000 = torch.aten.slice.Tensor %999, %int1, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1024,1024],ui8> loc(#loc714)
%1001 = torch.aten.slice.Tensor %1000, %int2, %int0, %int5, %int1 : !torch.vtensor<[1,1,1024,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,1024],ui8> loc(#loc715)
%1002 = torch.aten.slice.Tensor %1001, %int3, %int0, %int5, %int1 : !torch.vtensor<[1,1,5,1024],ui8>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,5,5],ui8> loc(#loc716)
%1003 = torch.prim.NumToTensor.Scalar %float0.000000e00 : !torch.float -> !torch.vtensor<[],f64> loc(#loc717)
%1004 = torch.aten.to.dtype %1003, %int11, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc717)
%1005 = torch.prim.ListConstruct %int1, %int1, %int5, %int5 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc717)
%1006 = torch.aten.broadcast_to %1004, %1005 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,5,5],i1> loc(#loc717)
%1007 = torch.valsem.aten.copy %1006, %1002, %false : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,1,5,5],ui8>, !torch.bool -> !torch.vtensor<[1,1,5,5],i1> loc(#loc717)
%1008 = torch.aten.clone %142, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc718)
%1009 = torch.aten.where.self %1007, %998, %1008 : !torch.vtensor<[1,1,5,5],i1>, !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc719)
%values_86, %indices_87 = torch.aten.max.dim %1009, %int-1, %true : !torch.vtensor<[1,12,5,5],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,5,1],f32>, !torch.vtensor<[1,12,5,1],si64> loc(#loc720)
%1010 = torch.aten.sub.Tensor %1009, %values_86, %float1.000000e00 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32>, !torch.float -> !torch.vtensor<[1,12,5,5],f32> loc(#loc720)
%1011 = torch.aten.exp %1010 : !torch.vtensor<[1,12,5,5],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc720)
%1012 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc720)
%1013 = torch.aten.sum.dim_IntList %1011, %1012, %true, %none : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,5,1],f32> loc(#loc720)
%1014 = torch.aten.div.Tensor %1011, %1013 : !torch.vtensor<[1,12,5,5],f32>, !torch.vtensor<[1,12,5,1],f32> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc720)
%1015 = torch.aten.broadcast_to %1014, %188 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,5],f32> loc(#loc721)
%1016 = torch.aten.view %1015, %209 : !torch.vtensor<[1,12,5,5],f32>, !torch.list<int> -> !torch.vtensor<[12,5,5],f32> loc(#loc722)
%1017 = torch.aten.broadcast_to %989, %179 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc723)
%1018 = torch.aten.view %1017, %181 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[12,5,64],f32> loc(#loc724)
%1019 = torch.aten.bmm %1016, %1018 : !torch.vtensor<[12,5,5],f32>, !torch.vtensor<[12,5,64],f32> -> !torch.vtensor<[12,5,64],f32> loc(#loc725)
%1020 = torch.aten.view %1019, %179 : !torch.vtensor<[12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,5,64],f32> loc(#loc726)
%1021 = torch.aten.permute %1020, %172 : !torch.vtensor<[1,12,5,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,12,64],f32> loc(#loc727)
%1022 = torch.aten.clone %1021, %int0 : !torch.vtensor<[1,5,12,64],f32>, !torch.int -> !torch.vtensor<[1,5,12,64],f32> loc(#loc728)
%1023 = torch.aten.view %1022, %217 : !torch.vtensor<[1,5,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc729)
%1024 = torch.aten.view %1023, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc730)
%1025 = torch.aten.mm %1024, %8 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc731)
%1026 = torch.aten.mul.Scalar %9, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc731)
%1027 = torch.aten.add.Tensor %1026, %1025, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc731)
%1028 = torch.aten.view %1027, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc732)
%1029 = torch.aten.add.Tensor %1028, %975, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc733)
%result0_88, %result1_89, %result2_90 = torch.aten.native_layer_norm %1029, %159, %7, %6, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc734)
%1030 = torch.aten.view %result0_88, %160 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc735)
%1031 = torch.aten.mm %1030, %4 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[5,3072],f32> loc(#loc736)
%1032 = torch.aten.mul.Scalar %5, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc736)
%1033 = torch.aten.add.Tensor %1032, %1031, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[5,3072],f32>, !torch.int -> !torch.vtensor<[5,3072],f32> loc(#loc736)
%1034 = torch.aten.view %1033, %229 : !torch.vtensor<[5,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,5,3072],f32> loc(#loc737)
%1035 = torch.aten.mul.Scalar %1034, %float5.000000e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc738)
%1036 = torch.aten.pow.Tensor_Scalar %1034, %float3.000000e00 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc739)
%1037 = torch.aten.mul.Scalar %1036, %float4.471500e-02 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc740)
%1038 = torch.aten.add.Tensor %1034, %1037, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32>, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc741)
%1039 = torch.aten.mul.Scalar %1038, %float7.978850e-01 : !torch.vtensor<[1,5,3072],f32>, !torch.float -> !torch.vtensor<[1,5,3072],f32> loc(#loc742)
%1040 = torch.aten.tanh %1039 : !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc743)
%1041 = torch.aten.add.Scalar %1040, %float1.000000e00, %int1 : !torch.vtensor<[1,5,3072],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,5,3072],f32> loc(#loc744)
%1042 = torch.aten.mul.Tensor %1035, %1041 : !torch.vtensor<[1,5,3072],f32>, !torch.vtensor<[1,5,3072],f32> -> !torch.vtensor<[1,5,3072],f32> loc(#loc745)
%1043 = torch.aten.view %1042, %239 : !torch.vtensor<[1,5,3072],f32>, !torch.list<int> -> !torch.vtensor<[5,3072],f32> loc(#loc746)
%1044 = torch.aten.mm %1043, %2 : !torch.vtensor<[5,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[5,768],f32> loc(#loc747)
%1045 = torch.aten.mul.Scalar %3, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc747)
%1046 = torch.aten.add.Tensor %1045, %1044, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[5,768],f32>, !torch.int -> !torch.vtensor<[5,768],f32> loc(#loc747)
%1047 = torch.aten.view %1046, %217 : !torch.vtensor<[5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc748)
%1048 = torch.aten.add.Tensor %1029, %1047, %int1 : !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,768],f32>, !torch.int -> !torch.vtensor<[1,5,768],f32> loc(#loc749)
%result0_91, %result1_92, %result2_93 = torch.aten.native_layer_norm %1048, %159, %1, %0, %float1.000000e-05 : !torch.vtensor<[1,5,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,5,768],f32>, !torch.vtensor<[1,5,1],f32>, !torch.vtensor<[1,5,1],f32> loc(#loc750)
%1049 = torch.aten.view %result0_91, %217 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[1,5,768],f32> loc(#loc751)
%1050 = torch.aten.transpose.int %150, %int0, %int1 : !torch.vtensor<[50257,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,50257],f32> loc(#loc752)
%1051 = torch.prim.ListConstruct %int5, %int768 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc753)
%1052 = torch.aten.view %1049, %1051 : !torch.vtensor<[1,5,768],f32>, !torch.list<int> -> !torch.vtensor<[5,768],f32> loc(#loc754)
%1053 = torch.aten.mm %1052, %1050 : !torch.vtensor<[5,768],f32>, !torch.vtensor<[768,50257],f32> -> !torch.vtensor<[5,50257],f32> loc(#loc755)
%1054 = torch.prim.ListConstruct %int1, %int5, %int50257 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc0)
%1055 = torch.aten.view %1053, %1054 : !torch.vtensor<[5,50257],f32>, !torch.list<int> -> !torch.vtensor<[1,5,50257],f32> loc(#loc756)
return %1055 : !torch.vtensor<[1,5,50257],f32> loc(#loc0)
} loc(#loc0)
} loc(#loc0)
#loc1 = loc("<eval_with_key>.2":5:44)
#loc2 = loc("<eval_with_key>.2":5:41)
#loc3 = loc("<eval_with_key>.2":53:15)
#loc4 = loc("<eval_with_key>.2":49:15)
#loc5 = loc("<eval_with_key>.2":6:104)
#loc6 = loc("<eval_with_key>.2":34:56)
#loc7 = loc("<eval_with_key>.2":49:56)
#loc8 = loc("<eval_with_key>.2":6:49)
#loc9 = loc("<eval_with_key>.2":5:40)
#loc10 = loc("<eval_with_key>.2":6:35)
#loc11 = loc("<eval_with_key>.2":16:63)
#loc12 = loc("<eval_with_key>.2":16:105)
#loc13 = loc("<eval_with_key>.2":19:34)
#loc14 = loc("<eval_with_key>.2":24:47)
#loc15 = loc("<eval_with_key>.2":26:51)
#loc16 = loc("<eval_with_key>.2":28:49)
#loc17 = loc("<eval_with_key>.2":28:53)
#loc18 = loc("<eval_with_key>.2":29:55)
#loc19 = loc("<eval_with_key>.2":45:60)
#loc20 = loc("<eval_with_key>.2":80:50)
#loc21 = loc("<eval_with_key>.2":81:38)
#loc22 = loc("<eval_with_key>.2":82:40)
#loc23 = loc("<eval_with_key>.2":83:38)
#loc24 = loc("<eval_with_key>.2":85:38)
#loc25 = loc("<eval_with_key>.2":88:37)
#loc26 = loc("<eval_with_key>.2":1009:61)
#loc27 = loc("<eval_with_key>.2":5:11)
#loc28 = loc("<eval_with_key>.2":6:13)
#loc29 = loc("<eval_with_key>.2":7:16)
#loc30 = loc("<eval_with_key>.2":8:13)
#loc31 = loc("<eval_with_key>.2":10:16)
#loc32 = loc("<eval_with_key>.2":12:18)
#loc33 = loc("<eval_with_key>.2":13:10)
#loc34 = loc("<eval_with_key>.2":16:24)
#loc35 = loc("<eval_with_key>.2":20:13)
#loc36 = loc("<eval_with_key>.2":23:12)
#loc37 = loc(callsite(callsite("-":5066:13 at "-":6842:10) at "<eval_with_key>.2":24:13))
#loc38 = loc("<eval_with_key>.2":24:13)
#loc39 = loc("<eval_with_key>.2":25:14)
#loc40 = loc("<eval_with_key>.2":26:14)
#loc41 = loc("<eval_with_key>.2":27:14)
#loc42 = loc(callsite(callsite("-":5066:13 at "-":6842:10) at "<eval_with_key>.2":28:13))
#loc43 = loc("<eval_with_key>.2":28:13)
#loc44 = loc("<eval_with_key>.2":29:14)
#loc45 = loc("<eval_with_key>.2":30:13)
#loc46 = loc("<eval_with_key>.2":31:16)
#loc47 = loc("<eval_with_key>.2":32:13)
#loc48 = loc("<eval_with_key>.2":33:16)
#loc49 = loc("<eval_with_key>.2":34:16)
#loc50 = loc(callsite(callsite("-":5234:15 at "-":6830:10) at "<eval_with_key>.2":35:13))
#loc51 = loc("<eval_with_key>.2":35:13)
#loc52 = loc(callsite(callsite("-":5066:13 at "-":6850:10) at "<eval_with_key>.2":36:21))
#loc53 = loc("<eval_with_key>.2":36:21)
#loc54 = loc(callsite(callsite("-":5234:15 at "-":6830:10) at "<eval_with_key>.2":37:15))
#loc55 = loc("<eval_with_key>.2":37:15)
#loc56 = loc(callsite(callsite("-":5066:13 at "-":6850:10) at "<eval_with_key>.2":38:23))
#loc57 = loc("<eval_with_key>.2":38:23)
#loc58 = loc("<eval_with_key>.2":39:10)
#loc59 = loc(callsite(callsite("-":2078:13 at "-":7159:10) at "<eval_with_key>.2":43:10))
#loc60 = loc("<eval_with_key>.2":40:19)
#loc61 = loc("<eval_with_key>.2":42:22)
#loc62 = loc("<eval_with_key>.2":43:10)
#loc63 = loc("<eval_with_key>.2":45:14)
#loc64 = loc("<eval_with_key>.2":46:14)
#loc65 = loc("<eval_with_key>.2":47:14)
#loc66 = loc("<eval_with_key>.2":48:14)
#loc67 = loc("<eval_with_key>.2":51:24)
#loc68 = loc("<eval_with_key>.2":52:12)
#loc69 = loc("<eval_with_key>.2":55:15)
#loc70 = loc(callsite(callsite("-":5066:13 at "-":6850:10) at "<eval_with_key>.2":56:23))
#loc71 = loc("<eval_with_key>.2":56:23)
#loc72 = loc("<eval_with_key>.2":57:15)
#loc73 = loc("<eval_with_key>.2":58:23)
#loc74 = loc("<eval_with_key>.2":59:12)
#loc75 = loc("<eval_with_key>.2":60:21)
#loc76 = loc("<eval_with_key>.2":61:16)
#loc77 = loc("<eval_with_key>.2":62:12)
#loc78 = loc(callsite(callsite("-":5066:13 at "-":6842:10) at "<eval_with_key>.2":63:13))
#loc79 = loc("<eval_with_key>.2":63:13)
#loc80 = loc("<eval_with_key>.2":64:13)
#loc81 = loc("<eval_with_key>.2":67:14)
#loc82 = loc("<eval_with_key>.2":68:13)
#loc83 = loc("<eval_with_key>.2":69:12)
#loc84 = loc("<eval_with_key>.2":72:26)
#loc85 = loc("<eval_with_key>.2":76:14)
#loc86 = loc("<eval_with_key>.2":79:14)
#loc87 = loc(callsite(callsite("-":5066:13 at "-":6842:10) at "<eval_with_key>.2":80:14))
#loc88 = loc("<eval_with_key>.2":80:14)
#loc89 = loc("<eval_with_key>.2":81:10)
#loc90 = loc("<eval_with_key>.2":82:12)
#loc91 = loc("<eval_with_key>.2":83:12)
#loc92 = loc("<eval_with_key>.2":84:12)
#loc93 = loc("<eval_with_key>.2":85:12)
#loc94 = loc("<eval_with_key>.2":86:11)
#loc95 = loc("<eval_with_key>.2":88:12)
#loc96 = loc("<eval_with_key>.2":89:12)
#loc97 = loc("<eval_with_key>.2":90:14)
#loc98 = loc("<eval_with_key>.2":93:14)
#loc99 = loc("<eval_with_key>.2":94:14)
#loc100 = loc("<eval_with_key>.2":95:12)
#loc101 = loc("<eval_with_key>.2":98:26)
#loc102 = loc("<eval_with_key>.2":102:14)
#loc103 = loc("<eval_with_key>.2":105:14)
#loc104 = loc("<eval_with_key>.2":106:14)
#loc105 = loc("<eval_with_key>.2":107:14)
#loc106 = loc("<eval_with_key>.2":108:14)
#loc107 = loc("<eval_with_key>.2":109:15)
#loc108 = loc("<eval_with_key>.2":110:14)
#loc109 = loc("<eval_with_key>.2":111:16)
#loc110 = loc("<eval_with_key>.2":112:14)
#loc111 = loc("<eval_with_key>.2":113:16)
#loc112 = loc("<eval_with_key>.2":114:14)
#loc113 = loc("<eval_with_key>.2":115:16)
#loc114 = loc("<eval_with_key>.2":116:18)
#loc115 = loc("<eval_with_key>.2":117:15)
#loc116 = loc("<eval_with_key>.2":118:23)
#loc117 = loc("<eval_with_key>.2":119:15)
#loc118 = loc("<eval_with_key>.2":120:23)
#loc119 = loc("<eval_with_key>.2":121:12)
#loc120 = loc("<eval_with_key>.2":122:21)
#loc121 = loc("<eval_with_key>.2":124:24)
#loc122 = loc("<eval_with_key>.2":125:12)
#loc123 = loc("<eval_with_key>.2":127:15)
#loc124 = loc("<eval_with_key>.2":128:15)
#loc125 = loc("<eval_with_key>.2":129:15)
#loc126 = loc("<eval_with_key>.2":130:15)
#loc127 = loc("<eval_with_key>.2":131:17)
#loc128 = loc("<eval_with_key>.2":133:24)
#loc129 = loc("<eval_with_key>.2":134:14)
#loc130 = loc("<eval_with_key>.2":135:17)
#loc131 = loc("<eval_with_key>.2":137:15)
#loc132 = loc("<eval_with_key>.2":138:23)
#loc133 = loc("<eval_with_key>.2":139:15)
#loc134 = loc("<eval_with_key>.2":140:23)
#loc135 = loc("<eval_with_key>.2":141:12)
#loc136 = loc("<eval_with_key>.2":142:21)
#loc137 = loc("<eval_with_key>.2":143:16)
#loc138 = loc("<eval_with_key>.2":144:14)
#loc139 = loc("<eval_with_key>.2":145:14)
#loc140 = loc("<eval_with_key>.2":146:14)
#loc141 = loc("<eval_with_key>.2":149:14)
#loc142 = loc("<eval_with_key>.2":150:14)
#loc143 = loc("<eval_with_key>.2":151:12)
#loc144 = loc("<eval_with_key>.2":154:26)
#loc145 = loc("<eval_with_key>.2":158:14)
#loc146 = loc("<eval_with_key>.2":161:14)
#loc147 = loc("<eval_with_key>.2":162:14)
#loc148 = loc("<eval_with_key>.2":163:12)
#loc149 = loc("<eval_with_key>.2":164:12)
#loc150 = loc("<eval_with_key>.2":165:12)
#loc151 = loc("<eval_with_key>.2":166:12)
#loc152 = loc("<eval_with_key>.2":167:12)
#loc153 = loc("<eval_with_key>.2":168:13)
#loc154 = loc("<eval_with_key>.2":170:12)
#loc155 = loc("<eval_with_key>.2":171:12)
#loc156 = loc("<eval_with_key>.2":172:14)
#loc157 = loc("<eval_with_key>.2":175:14)
#loc158 = loc("<eval_with_key>.2":176:14)
#loc159 = loc("<eval_with_key>.2":177:12)
#loc160 = loc("<eval_with_key>.2":180:26)
#loc161 = loc("<eval_with_key>.2":184:14)
#loc162 = loc("<eval_with_key>.2":187:14)
#loc163 = loc("<eval_with_key>.2":188:14)
#loc164 = loc("<eval_with_key>.2":189:15)
#loc165 = loc("<eval_with_key>.2":190:15)
#loc166 = loc("<eval_with_key>.2":191:15)
#loc167 = loc("<eval_with_key>.2":192:14)
#loc168 = loc("<eval_with_key>.2":193:16)
#loc169 = loc("<eval_with_key>.2":194:14)
#loc170 = loc("<eval_with_key>.2":195:16)
#loc171 = loc("<eval_with_key>.2":196:14)
#loc172 = loc("<eval_with_key>.2":197:17)
#loc173 = loc("<eval_with_key>.2":198:18)
#loc174 = loc("<eval_with_key>.2":199:15)
#loc175 = loc("<eval_with_key>.2":200:23)
#loc176 = loc("<eval_with_key>.2":201:15)
#loc177 = loc("<eval_with_key>.2":202:23)
#loc178 = loc("<eval_with_key>.2":203:12)
#loc179 = loc("<eval_with_key>.2":204:21)
#loc180 = loc("<eval_with_key>.2":206:24)
#loc181 = loc("<eval_with_key>.2":207:12)
#loc182 = loc("<eval_with_key>.2":209:15)
#loc183 = loc("<eval_with_key>.2":210:15)
#loc184 = loc("<eval_with_key>.2":211:15)
#loc185 = loc("<eval_with_key>.2":212:15)
#loc186 = loc("<eval_with_key>.2":213:17)
#loc187 = loc("<eval_with_key>.2":215:24)
#loc188 = loc("<eval_with_key>.2":216:14)
#loc189 = loc("<eval_with_key>.2":217:17)
#loc190 = loc("<eval_with_key>.2":219:16)
#loc191 = loc("<eval_with_key>.2":220:24)
#loc192 = loc("<eval_with_key>.2":221:16)
#loc193 = loc("<eval_with_key>.2":222:24)
#loc194 = loc("<eval_with_key>.2":223:12)
#loc195 = loc("<eval_with_key>.2":224:21)
#loc196 = loc("<eval_with_key>.2":225:17)
#loc197 = loc("<eval_with_key>.2":226:14)
#loc198 = loc("<eval_with_key>.2":227:14)
#loc199 = loc("<eval_with_key>.2":228:14)
#loc200 = loc("<eval_with_key>.2":231:14)
#loc201 = loc("<eval_with_key>.2":232:14)
#loc202 = loc("<eval_with_key>.2":233:12)
#loc203 = loc("<eval_with_key>.2":236:26)
#loc204 = loc("<eval_with_key>.2":240:14)
#loc205 = loc("<eval_with_key>.2":243:15)
#loc206 = loc("<eval_with_key>.2":244:14)
#loc207 = loc("<eval_with_key>.2":245:12)
#loc208 = loc("<eval_with_key>.2":246:12)
#loc209 = loc("<eval_with_key>.2":247:12)
#loc210 = loc("<eval_with_key>.2":248:13)
#loc211 = loc("<eval_with_key>.2":249:13)
#loc212 = loc("<eval_with_key>.2":250:13)
#loc213 = loc("<eval_with_key>.2":252:13)
#loc214 = loc("<eval_with_key>.2":253:13)
#loc215 = loc("<eval_with_key>.2":254:14)
#loc216 = loc("<eval_with_key>.2":257:15)
#loc217 = loc("<eval_with_key>.2":258:14)
#loc218 = loc("<eval_with_key>.2":259:13)
#loc219 = loc("<eval_with_key>.2":262:26)
#loc220 = loc("<eval_with_key>.2":266:14)
#loc221 = loc("<eval_with_key>.2":269:15)
#loc222 = loc("<eval_with_key>.2":270:14)
#loc223 = loc("<eval_with_key>.2":271:15)
#loc224 = loc("<eval_with_key>.2":272:15)
#loc225 = loc("<eval_with_key>.2":273:15)
#loc226 = loc("<eval_with_key>.2":274:14)
#loc227 = loc("<eval_with_key>.2":275:17)
#loc228 = loc("<eval_with_key>.2":276:14)
#loc229 = loc("<eval_with_key>.2":277:17)
#loc230 = loc("<eval_with_key>.2":278:14)
#loc231 = loc("<eval_with_key>.2":279:17)
#loc232 = loc("<eval_with_key>.2":280:18)
#loc233 = loc("<eval_with_key>.2":281:16)
#loc234 = loc("<eval_with_key>.2":282:24)
#loc235 = loc("<eval_with_key>.2":283:16)
#loc236 = loc("<eval_with_key>.2":284:24)
#loc237 = loc("<eval_with_key>.2":285:12)
#loc238 = loc("<eval_with_key>.2":286:21)
#loc239 = loc("<eval_with_key>.2":288:24)
#loc240 = loc("<eval_with_key>.2":289:12)
#loc241 = loc("<eval_with_key>.2":291:15)
#loc242 = loc("<eval_with_key>.2":292:15)
#loc243 = loc("<eval_with_key>.2":293:15)
#loc244 = loc("<eval_with_key>.2":294:15)
#loc245 = loc("<eval_with_key>.2":295:17)
#loc246 = loc("<eval_with_key>.2":297:24)
#loc247 = loc("<eval_with_key>.2":298:14)
#loc248 = loc("<eval_with_key>.2":299:17)
#loc249 = loc("<eval_with_key>.2":301:16)
#loc250 = loc("<eval_with_key>.2":302:24)
#loc251 = loc("<eval_with_key>.2":303:16)
#loc252 = loc("<eval_with_key>.2":304:24)
#loc253 = loc("<eval_with_key>.2":305:12)
#loc254 = loc("<eval_with_key>.2":306:21)
#loc255 = loc("<eval_with_key>.2":307:17)
#loc256 = loc("<eval_with_key>.2":308:14)
#loc257 = loc("<eval_with_key>.2":309:14)
#loc258 = loc("<eval_with_key>.2":310:14)
#loc259 = loc("<eval_with_key>.2":313:15)
#loc260 = loc("<eval_with_key>.2":314:14)
#loc261 = loc("<eval_with_key>.2":315:13)
#loc262 = loc("<eval_with_key>.2":318:26)
#loc263 = loc("<eval_with_key>.2":322:14)
#loc264 = loc("<eval_with_key>.2":325:15)
#loc265 = loc("<eval_with_key>.2":326:14)
#loc266 = loc("<eval_with_key>.2":327:13)
#loc267 = loc("<eval_with_key>.2":328:12)
#loc268 = loc("<eval_with_key>.2":329:13)
#loc269 = loc("<eval_with_key>.2":330:13)
#loc270 = loc("<eval_with_key>.2":331:13)
#loc271 = loc("<eval_with_key>.2":332:13)
#loc272 = loc("<eval_with_key>.2":334:13)
#loc273 = loc("<eval_with_key>.2":335:13)
#loc274 = loc("<eval_with_key>.2":336:14)
#loc275 = loc("<eval_with_key>.2":339:15)
#loc276 = loc("<eval_with_key>.2":340:14)
#loc277 = loc("<eval_with_key>.2":341:13)
#loc278 = loc("<eval_with_key>.2":344:26)
#loc279 = loc("<eval_with_key>.2":348:14)
#loc280 = loc("<eval_with_key>.2":351:15)
#loc281 = loc("<eval_with_key>.2":352:14)
#loc282 = loc("<eval_with_key>.2":353:15)
#loc283 = loc("<eval_with_key>.2":354:15)
#loc284 = loc("<eval_with_key>.2":355:15)
#loc285 = loc("<eval_with_key>.2":356:14)
#loc286 = loc("<eval_with_key>.2":357:17)
#loc287 = loc("<eval_with_key>.2":358:14)
#loc288 = loc("<eval_with_key>.2":359:17)
#loc289 = loc("<eval_with_key>.2":360:14)
#loc290 = loc("<eval_with_key>.2":361:17)
#loc291 = loc("<eval_with_key>.2":362:18)
#loc292 = loc("<eval_with_key>.2":363:16)
#loc293 = loc("<eval_with_key>.2":364:24)
#loc294 = loc("<eval_with_key>.2":365:16)
#loc295 = loc("<eval_with_key>.2":366:24)
#loc296 = loc("<eval_with_key>.2":367:12)
#loc297 = loc("<eval_with_key>.2":368:21)
#loc298 = loc("<eval_with_key>.2":370:24)
#loc299 = loc("<eval_with_key>.2":371:12)
#loc300 = loc("<eval_with_key>.2":373:15)
#loc301 = loc("<eval_with_key>.2":374:15)
#loc302 = loc("<eval_with_key>.2":375:15)
#loc303 = loc("<eval_with_key>.2":376:15)
#loc304 = loc("<eval_with_key>.2":377:17)
#loc305 = loc("<eval_with_key>.2":379:24)
#loc306 = loc("<eval_with_key>.2":380:14)
#loc307 = loc("<eval_with_key>.2":381:17)
#loc308 = loc("<eval_with_key>.2":383:16)
#loc309 = loc("<eval_with_key>.2":384:24)
#loc310 = loc("<eval_with_key>.2":385:16)
#loc311 = loc("<eval_with_key>.2":386:24)
#loc312 = loc("<eval_with_key>.2":387:12)
#loc313 = loc("<eval_with_key>.2":388:21)
#loc314 = loc("<eval_with_key>.2":389:17)
#loc315 = loc("<eval_with_key>.2":390:14)
#loc316 = loc("<eval_with_key>.2":391:14)
#loc317 = loc("<eval_with_key>.2":392:14)
#loc318 = loc("<eval_with_key>.2":395:15)
#loc319 = loc("<eval_with_key>.2":396:14)
#loc320 = loc("<eval_with_key>.2":397:13)
#loc321 = loc("<eval_with_key>.2":400:26)
#loc322 = loc("<eval_with_key>.2":404:14)
#loc323 = loc("<eval_with_key>.2":407:15)
#loc324 = loc("<eval_with_key>.2":408:14)
#loc325 = loc("<eval_with_key>.2":409:13)
#loc326 = loc("<eval_with_key>.2":410:12)
#loc327 = loc("<eval_with_key>.2":411:13)
#loc328 = loc("<eval_with_key>.2":412:13)
#loc329 = loc("<eval_with_key>.2":413:13)
#loc330 = loc("<eval_with_key>.2":414:13)
#loc331 = loc("<eval_with_key>.2":416:13)
#loc332 = loc("<eval_with_key>.2":417:13)
#loc333 = loc("<eval_with_key>.2":418:14)
#loc334 = loc("<eval_with_key>.2":421:15)
#loc335 = loc("<eval_with_key>.2":422:14)
#loc336 = loc("<eval_with_key>.2":423:13)
#loc337 = loc("<eval_with_key>.2":426:27)
#loc338 = loc("<eval_with_key>.2":430:14)
#loc339 = loc("<eval_with_key>.2":433:15)
#loc340 = loc("<eval_with_key>.2":434:14)
#loc341 = loc("<eval_with_key>.2":435:15)
#loc342 = loc("<eval_with_key>.2":436:15)
#loc343 = loc("<eval_with_key>.2":437:15)
#loc344 = loc("<eval_with_key>.2":438:14)
#loc345 = loc("<eval_with_key>.2":439:17)
#loc346 = loc("<eval_with_key>.2":440:14)
#loc347 = loc("<eval_with_key>.2":441:17)
#loc348 = loc("<eval_with_key>.2":442:14)
#loc349 = loc("<eval_with_key>.2":443:17)
#loc350 = loc("<eval_with_key>.2":444:18)
#loc351 = loc("<eval_with_key>.2":445:16)
#loc352 = loc("<eval_with_key>.2":446:24)
#loc353 = loc("<eval_with_key>.2":447:16)
#loc354 = loc("<eval_with_key>.2":448:24)
#loc355 = loc("<eval_with_key>.2":449:13)
#loc356 = loc("<eval_with_key>.2":450:22)
#loc357 = loc("<eval_with_key>.2":452:25)
#loc358 = loc("<eval_with_key>.2":453:12)
#loc359 = loc("<eval_with_key>.2":455:15)
#loc360 = loc("<eval_with_key>.2":456:15)
#loc361 = loc("<eval_with_key>.2":457:15)
#loc362 = loc("<eval_with_key>.2":458:15)
#loc363 = loc("<eval_with_key>.2":459:17)
#loc364 = loc("<eval_with_key>.2":461:25)
#loc365 = loc("<eval_with_key>.2":462:14)
#loc366 = loc("<eval_with_key>.2":463:17)
#loc367 = loc("<eval_with_key>.2":465:16)
#loc368 = loc("<eval_with_key>.2":466:24)
#loc369 = loc("<eval_with_key>.2":467:16)
#loc370 = loc("<eval_with_key>.2":468:24)
#loc371 = loc("<eval_with_key>.2":469:13)
#loc372 = loc("<eval_with_key>.2":470:22)
#loc373 = loc("<eval_with_key>.2":471:17)
#loc374 = loc("<eval_with_key>.2":472:14)
#loc375 = loc("<eval_with_key>.2":473:14)
#loc376 = loc("<eval_with_key>.2":474:14)
#loc377 = loc("<eval_with_key>.2":477:15)
#loc378 = loc("<eval_with_key>.2":478:14)
#loc379 = loc("<eval_with_key>.2":479:13)
#loc380 = loc("<eval_with_key>.2":482:27)
#loc381 = loc("<eval_with_key>.2":486:14)
#loc382 = loc("<eval_with_key>.2":489:15)
#loc383 = loc("<eval_with_key>.2":490:14)
#loc384 = loc("<eval_with_key>.2":491:13)
#loc385 = loc("<eval_with_key>.2":492:12)
#loc386 = loc("<eval_with_key>.2":493:13)
#loc387 = loc("<eval_with_key>.2":494:13)
#loc388 = loc("<eval_with_key>.2":495:13)
#loc389 = loc("<eval_with_key>.2":496:13)
#loc390 = loc("<eval_with_key>.2":498:13)
#loc391 = loc("<eval_with_key>.2":499:13)
#loc392 = loc("<eval_with_key>.2":500:14)
#loc393 = loc("<eval_with_key>.2":503:15)
#loc394 = loc("<eval_with_key>.2":504:14)
#loc395 = loc("<eval_with_key>.2":505:13)
#loc396 = loc("<eval_with_key>.2":508:27)
#loc397 = loc("<eval_with_key>.2":512:14)
#loc398 = loc("<eval_with_key>.2":515:15)
#loc399 = loc("<eval_with_key>.2":516:14)
#loc400 = loc("<eval_with_key>.2":517:15)
#loc401 = loc("<eval_with_key>.2":518:15)
#loc402 = loc("<eval_with_key>.2":519:15)
#loc403 = loc("<eval_with_key>.2":520:14)
#loc404 = loc("<eval_with_key>.2":521:17)
#loc405 = loc("<eval_with_key>.2":522:14)
#loc406 = loc("<eval_with_key>.2":523:17)
#loc407 = loc("<eval_with_key>.2":524:14)
#loc408 = loc("<eval_with_key>.2":525:17)
#loc409 = loc("<eval_with_key>.2":526:18)
#loc410 = loc("<eval_with_key>.2":527:16)
#loc411 = loc("<eval_with_key>.2":528:24)
#loc412 = loc("<eval_with_key>.2":529:16)
#loc413 = loc("<eval_with_key>.2":530:24)
#loc414 = loc("<eval_with_key>.2":531:13)
#loc415 = loc("<eval_with_key>.2":532:22)
#loc416 = loc("<eval_with_key>.2":534:25)
#loc417 = loc("<eval_with_key>.2":535:12)
#loc418 = loc("<eval_with_key>.2":537:15)
#loc419 = loc("<eval_with_key>.2":538:15)
#loc420 = loc("<eval_with_key>.2":539:15)
#loc421 = loc("<eval_with_key>.2":540:15)
#loc422 = loc("<eval_with_key>.2":541:17)
#loc423 = loc("<eval_with_key>.2":543:25)
#loc424 = loc("<eval_with_key>.2":544:14)
#loc425 = loc("<eval_with_key>.2":545:17)
#loc426 = loc("<eval_with_key>.2":547:16)
#loc427 = loc("<eval_with_key>.2":548:24)
#loc428 = loc("<eval_with_key>.2":549:16)
#loc429 = loc("<eval_with_key>.2":550:24)
#loc430 = loc("<eval_with_key>.2":551:13)
#loc431 = loc("<eval_with_key>.2":552:22)
#loc432 = loc("<eval_with_key>.2":553:17)
#loc433 = loc("<eval_with_key>.2":554:14)
#loc434 = loc("<eval_with_key>.2":555:14)
#loc435 = loc("<eval_with_key>.2":556:14)
#loc436 = loc("<eval_with_key>.2":559:15)
#loc437 = loc("<eval_with_key>.2":560:14)
#loc438 = loc("<eval_with_key>.2":561:13)
#loc439 = loc("<eval_with_key>.2":564:27)
#loc440 = loc("<eval_with_key>.2":568:14)
#loc441 = loc("<eval_with_key>.2":571:15)
#loc442 = loc("<eval_with_key>.2":572:14)
#loc443 = loc("<eval_with_key>.2":573:13)
#loc444 = loc("<eval_with_key>.2":574:12)
#loc445 = loc("<eval_with_key>.2":575:13)
#loc446 = loc("<eval_with_key>.2":576:13)
#loc447 = loc("<eval_with_key>.2":577:13)
#loc448 = loc("<eval_with_key>.2":578:13)
#loc449 = loc("<eval_with_key>.2":580:13)
#loc450 = loc("<eval_with_key>.2":581:13)
#loc451 = loc("<eval_with_key>.2":582:14)
#loc452 = loc("<eval_with_key>.2":585:15)
#loc453 = loc("<eval_with_key>.2":586:14)
#loc454 = loc("<eval_with_key>.2":587:13)
#loc455 = loc("<eval_with_key>.2":590:27)
#loc456 = loc("<eval_with_key>.2":594:14)
#loc457 = loc("<eval_with_key>.2":597:15)
#loc458 = loc("<eval_with_key>.2":598:14)
#loc459 = loc("<eval_with_key>.2":599:15)
#loc460 = loc("<eval_with_key>.2":600:15)
#loc461 = loc("<eval_with_key>.2":601:15)
#loc462 = loc("<eval_with_key>.2":602:14)
#loc463 = loc("<eval_with_key>.2":603:17)
#loc464 = loc("<eval_with_key>.2":604:14)
#loc465 = loc("<eval_with_key>.2":605:17)
#loc466 = loc("<eval_with_key>.2":606:14)
#loc467 = loc("<eval_with_key>.2":607:17)
#loc468 = loc("<eval_with_key>.2":608:18)
#loc469 = loc("<eval_with_key>.2":609:16)
#loc470 = loc("<eval_with_key>.2":610:24)
#loc471 = loc("<eval_with_key>.2":611:16)
#loc472 = loc("<eval_with_key>.2":612:24)
#loc473 = loc("<eval_with_key>.2":613:13)
#loc474 = loc("<eval_with_key>.2":614:22)
#loc475 = loc("<eval_with_key>.2":616:25)
#loc476 = loc("<eval_with_key>.2":617:12)
#loc477 = loc("<eval_with_key>.2":619:15)
#loc478 = loc("<eval_with_key>.2":620:15)
#loc479 = loc("<eval_with_key>.2":621:15)
#loc480 = loc("<eval_with_key>.2":622:15)
#loc481 = loc("<eval_with_key>.2":623:17)
#loc482 = loc("<eval_with_key>.2":625:25)
#loc483 = loc("<eval_with_key>.2":626:14)
#loc484 = loc("<eval_with_key>.2":627:17)
#loc485 = loc("<eval_with_key>.2":629:16)
#loc486 = loc("<eval_with_key>.2":630:24)
#loc487 = loc("<eval_with_key>.2":631:16)
#loc488 = loc("<eval_with_key>.2":632:24)
#loc489 = loc("<eval_with_key>.2":633:13)
#loc490 = loc("<eval_with_key>.2":634:22)
#loc491 = loc("<eval_with_key>.2":635:17)
#loc492 = loc("<eval_with_key>.2":636:14)
#loc493 = loc("<eval_with_key>.2":637:14)
#loc494 = loc("<eval_with_key>.2":638:14)
#loc495 = loc("<eval_with_key>.2":641:15)
#loc496 = loc("<eval_with_key>.2":642:14)
#loc497 = loc("<eval_with_key>.2":643:13)
#loc498 = loc("<eval_with_key>.2":646:27)
#loc499 = loc("<eval_with_key>.2":650:14)
#loc500 = loc("<eval_with_key>.2":653:15)
#loc501 = loc("<eval_with_key>.2":654:14)
#loc502 = loc("<eval_with_key>.2":655:13)
#loc503 = loc("<eval_with_key>.2":656:12)
#loc504 = loc("<eval_with_key>.2":657:13)
#loc505 = loc("<eval_with_key>.2":658:13)
#loc506 = loc("<eval_with_key>.2":659:13)
#loc507 = loc("<eval_with_key>.2":660:13)
#loc508 = loc("<eval_with_key>.2":662:13)
#loc509 = loc("<eval_with_key>.2":663:13)
#loc510 = loc("<eval_with_key>.2":664:14)
#loc511 = loc("<eval_with_key>.2":667:15)
#loc512 = loc("<eval_with_key>.2":668:14)
#loc513 = loc("<eval_with_key>.2":669:13)
#loc514 = loc("<eval_with_key>.2":672:27)
#loc515 = loc("<eval_with_key>.2":676:14)
#loc516 = loc("<eval_with_key>.2":679:15)
#loc517 = loc("<eval_with_key>.2":680:14)
#loc518 = loc("<eval_with_key>.2":681:15)
#loc519 = loc("<eval_with_key>.2":682:15)
#loc520 = loc("<eval_with_key>.2":683:15)
#loc521 = loc("<eval_with_key>.2":684:15)
#loc522 = loc("<eval_with_key>.2":685:17)
#loc523 = loc("<eval_with_key>.2":686:15)
#loc524 = loc("<eval_with_key>.2":687:17)
#loc525 = loc("<eval_with_key>.2":688:15)
#loc526 = loc("<eval_with_key>.2":689:17)
#loc527 = loc("<eval_with_key>.2":690:18)
#loc528 = loc("<eval_with_key>.2":691:16)
#loc529 = loc("<eval_with_key>.2":692:24)
#loc530 = loc("<eval_with_key>.2":693:16)
#loc531 = loc("<eval_with_key>.2":694:24)
#loc532 = loc("<eval_with_key>.2":695:13)
#loc533 = loc("<eval_with_key>.2":696:22)
#loc534 = loc("<eval_with_key>.2":698:25)
#loc535 = loc("<eval_with_key>.2":699:12)
#loc536 = loc("<eval_with_key>.2":701:15)
#loc537 = loc("<eval_with_key>.2":702:15)
#loc538 = loc("<eval_with_key>.2":703:15)
#loc539 = loc("<eval_with_key>.2":704:15)
#loc540 = loc("<eval_with_key>.2":705:17)
#loc541 = loc("<eval_with_key>.2":707:25)
#loc542 = loc("<eval_with_key>.2":708:14)
#loc543 = loc("<eval_with_key>.2":709:17)
#loc544 = loc("<eval_with_key>.2":711:16)
#loc545 = loc("<eval_with_key>.2":712:24)
#loc546 = loc("<eval_with_key>.2":713:16)
#loc547 = loc("<eval_with_key>.2":714:24)
#loc548 = loc("<eval_with_key>.2":715:13)
#loc549 = loc("<eval_with_key>.2":716:22)
#loc550 = loc("<eval_with_key>.2":717:17)
#loc551 = loc("<eval_with_key>.2":718:14)
#loc552 = loc("<eval_with_key>.2":719:15)
#loc553 = loc("<eval_with_key>.2":720:15)
#loc554 = loc("<eval_with_key>.2":723:15)
#loc555 = loc("<eval_with_key>.2":724:15)
#loc556 = loc("<eval_with_key>.2":725:13)
#loc557 = loc("<eval_with_key>.2":728:27)
#loc558 = loc("<eval_with_key>.2":732:15)
#loc559 = loc("<eval_with_key>.2":735:15)
#loc560 = loc("<eval_with_key>.2":736:15)
#loc561 = loc("<eval_with_key>.2":737:13)
#loc562 = loc("<eval_with_key>.2":738:12)
#loc563 = loc("<eval_with_key>.2":739:13)
#loc564 = loc("<eval_with_key>.2":740:13)
#loc565 = loc("<eval_with_key>.2":741:13)
#loc566 = loc("<eval_with_key>.2":742:13)
#loc567 = loc("<eval_with_key>.2":744:13)
#loc568 = loc("<eval_with_key>.2":745:13)
#loc569 = loc("<eval_with_key>.2":746:15)
#loc570 = loc("<eval_with_key>.2":749:15)
#loc571 = loc("<eval_with_key>.2":750:15)
#loc572 = loc("<eval_with_key>.2":751:13)
#loc573 = loc("<eval_with_key>.2":754:27)
#loc574 = loc("<eval_with_key>.2":758:15)
#loc575 = loc("<eval_with_key>.2":761:15)
#loc576 = loc("<eval_with_key>.2":762:15)
#loc577 = loc("<eval_with_key>.2":763:15)
#loc578 = loc("<eval_with_key>.2":764:15)
#loc579 = loc("<eval_with_key>.2":765:15)
#loc580 = loc("<eval_with_key>.2":766:15)
#loc581 = loc("<eval_with_key>.2":767:17)
#loc582 = loc("<eval_with_key>.2":768:15)
#loc583 = loc("<eval_with_key>.2":769:17)
#loc584 = loc("<eval_with_key>.2":770:15)
#loc585 = loc("<eval_with_key>.2":771:17)
#loc586 = loc("<eval_with_key>.2":772:18)
#loc587 = loc("<eval_with_key>.2":773:16)
#loc588 = loc("<eval_with_key>.2":774:24)
#loc589 = loc("<eval_with_key>.2":775:16)
#loc590 = loc("<eval_with_key>.2":776:24)
#loc591 = loc("<eval_with_key>.2":777:13)
#loc592 = loc("<eval_with_key>.2":778:22)
#loc593 = loc("<eval_with_key>.2":780:25)
#loc594 = loc("<eval_with_key>.2":781:12)
#loc595 = loc("<eval_with_key>.2":783:15)
#loc596 = loc("<eval_with_key>.2":784:15)
#loc597 = loc("<eval_with_key>.2":785:15)
#loc598 = loc("<eval_with_key>.2":786:15)
#loc599 = loc("<eval_with_key>.2":787:17)
#loc600 = loc("<eval_with_key>.2":789:25)
#loc601 = loc("<eval_with_key>.2":790:14)
#loc602 = loc("<eval_with_key>.2":791:17)
#loc603 = loc("<eval_with_key>.2":793:16)
#loc604 = loc("<eval_with_key>.2":794:24)
#loc605 = loc("<eval_with_key>.2":795:16)
#loc606 = loc("<eval_with_key>.2":796:24)
#loc607 = loc("<eval_with_key>.2":797:13)
#loc608 = loc("<eval_with_key>.2":798:22)
#loc609 = loc("<eval_with_key>.2":799:17)
#loc610 = loc("<eval_with_key>.2":800:14)
#loc611 = loc("<eval_with_key>.2":801:15)
#loc612 = loc("<eval_with_key>.2":802:15)
#loc613 = loc("<eval_with_key>.2":805:15)
#loc614 = loc("<eval_with_key>.2":806:15)
#loc615 = loc("<eval_with_key>.2":807:13)
#loc616 = loc("<eval_with_key>.2":810:27)
#loc617 = loc("<eval_with_key>.2":814:15)
#loc618 = loc("<eval_with_key>.2":817:15)
#loc619 = loc("<eval_with_key>.2":818:15)
#loc620 = loc("<eval_with_key>.2":819:13)
#loc621 = loc("<eval_with_key>.2":820:13)
#loc622 = loc("<eval_with_key>.2":821:13)
#loc623 = loc("<eval_with_key>.2":822:13)
#loc624 = loc("<eval_with_key>.2":823:13)
#loc625 = loc("<eval_with_key>.2":824:13)
#loc626 = loc("<eval_with_key>.2":826:13)
#loc627 = loc("<eval_with_key>.2":827:13)
#loc628 = loc("<eval_with_key>.2":828:15)
#loc629 = loc("<eval_with_key>.2":831:15)
#loc630 = loc("<eval_with_key>.2":832:15)
#loc631 = loc("<eval_with_key>.2":833:13)
#loc632 = loc("<eval_with_key>.2":836:27)
#loc633 = loc("<eval_with_key>.2":840:15)
#loc634 = loc("<eval_with_key>.2":843:15)
#loc635 = loc("<eval_with_key>.2":844:15)
#loc636 = loc("<eval_with_key>.2":845:15)
#loc637 = loc("<eval_with_key>.2":846:15)
#loc638 = loc("<eval_with_key>.2":847:15)
#loc639 = loc("<eval_with_key>.2":848:15)
#loc640 = loc("<eval_with_key>.2":849:17)
#loc641 = loc("<eval_with_key>.2":850:15)
#loc642 = loc("<eval_with_key>.2":851:17)
#loc643 = loc("<eval_with_key>.2":852:15)
#loc644 = loc("<eval_with_key>.2":853:17)
#loc645 = loc("<eval_with_key>.2":854:19)
#loc646 = loc("<eval_with_key>.2":855:16)
#loc647 = loc("<eval_with_key>.2":856:24)
#loc648 = loc("<eval_with_key>.2":857:16)
#loc649 = loc("<eval_with_key>.2":858:24)
#loc650 = loc("<eval_with_key>.2":859:13)
#loc651 = loc("<eval_with_key>.2":860:22)
#loc652 = loc("<eval_with_key>.2":862:25)
#loc653 = loc("<eval_with_key>.2":863:13)
#loc654 = loc("<eval_with_key>.2":865:15)
#loc655 = loc("<eval_with_key>.2":866:15)
#loc656 = loc("<eval_with_key>.2":867:15)
#loc657 = loc("<eval_with_key>.2":868:15)
#loc658 = loc("<eval_with_key>.2":869:18)
#loc659 = loc("<eval_with_key>.2":871:25)
#loc660 = loc("<eval_with_key>.2":872:15)
#loc661 = loc("<eval_with_key>.2":873:18)
#loc662 = loc("<eval_with_key>.2":875:16)
#loc663 = loc("<eval_with_key>.2":876:24)
#loc664 = loc("<eval_with_key>.2":877:16)
#loc665 = loc("<eval_with_key>.2":878:24)
#loc666 = loc("<eval_with_key>.2":879:13)
#loc667 = loc("<eval_with_key>.2":880:22)
#loc668 = loc("<eval_with_key>.2":881:17)
#loc669 = loc("<eval_with_key>.2":882:15)
#loc670 = loc("<eval_with_key>.2":883:15)
#loc671 = loc("<eval_with_key>.2":884:15)
#loc672 = loc("<eval_with_key>.2":887:15)
#loc673 = loc("<eval_with_key>.2":888:15)
#loc674 = loc("<eval_with_key>.2":889:13)
#loc675 = loc("<eval_with_key>.2":892:27)
#loc676 = loc("<eval_with_key>.2":896:15)
#loc677 = loc("<eval_with_key>.2":899:15)
#loc678 = loc("<eval_with_key>.2":900:15)
#loc679 = loc("<eval_with_key>.2":901:13)
#loc680 = loc("<eval_with_key>.2":902:13)
#loc681 = loc("<eval_with_key>.2":903:13)
#loc682 = loc("<eval_with_key>.2":904:13)
#loc683 = loc("<eval_with_key>.2":905:13)
#loc684 = loc("<eval_with_key>.2":906:14)
#loc685 = loc("<eval_with_key>.2":908:13)
#loc686 = loc("<eval_with_key>.2":909:13)
#loc687 = loc("<eval_with_key>.2":910:15)
#loc688 = loc("<eval_with_key>.2":913:15)
#loc689 = loc("<eval_with_key>.2":914:15)
#loc690 = loc("<eval_with_key>.2":915:13)
#loc691 = loc("<eval_with_key>.2":918:27)
#loc692 = loc("<eval_with_key>.2":922:15)
#loc693 = loc("<eval_with_key>.2":925:15)
#loc694 = loc("<eval_with_key>.2":926:15)
#loc695 = loc("<eval_with_key>.2":927:15)
#loc696 = loc("<eval_with_key>.2":928:15)
#loc697 = loc("<eval_with_key>.2":929:15)
#loc698 = loc("<eval_with_key>.2":930:15)
#loc699 = loc("<eval_with_key>.2":931:17)
#loc700 = loc("<eval_with_key>.2":932:15)
#loc701 = loc("<eval_with_key>.2":933:17)
#loc702 = loc("<eval_with_key>.2":934:15)
#loc703 = loc("<eval_with_key>.2":935:17)
#loc704 = loc("<eval_with_key>.2":936:19)
#loc705 = loc("<eval_with_key>.2":937:16)
#loc706 = loc("<eval_with_key>.2":938:24)
#loc707 = loc("<eval_with_key>.2":939:16)
#loc708 = loc("<eval_with_key>.2":940:24)
#loc709 = loc("<eval_with_key>.2":941:13)
#loc710 = loc("<eval_with_key>.2":942:22)
#loc711 = loc("<eval_with_key>.2":944:25)
#loc712 = loc("<eval_with_key>.2":945:13)
#loc713 = loc("<eval_with_key>.2":947:15)
#loc714 = loc("<eval_with_key>.2":948:15)
#loc715 = loc("<eval_with_key>.2":949:15)
#loc716 = loc("<eval_with_key>.2":950:15)
#loc717 = loc("<eval_with_key>.2":951:18)
#loc718 = loc("<eval_with_key>.2":953:25)
#loc719 = loc("<eval_with_key>.2":954:15)
#loc720 = loc("<eval_with_key>.2":955:18)
#loc721 = loc("<eval_with_key>.2":957:16)
#loc722 = loc("<eval_with_key>.2":958:24)
#loc723 = loc("<eval_with_key>.2":959:16)
#loc724 = loc("<eval_with_key>.2":960:24)
#loc725 = loc("<eval_with_key>.2":961:13)
#loc726 = loc("<eval_with_key>.2":962:22)
#loc727 = loc("<eval_with_key>.2":963:17)
#loc728 = loc("<eval_with_key>.2":964:15)
#loc729 = loc("<eval_with_key>.2":965:15)
#loc730 = loc("<eval_with_key>.2":966:15)
#loc731 = loc("<eval_with_key>.2":969:15)
#loc732 = loc("<eval_with_key>.2":970:15)
#loc733 = loc("<eval_with_key>.2":971:13)
#loc734 = loc("<eval_with_key>.2":974:27)
#loc735 = loc("<eval_with_key>.2":978:15)
#loc736 = loc("<eval_with_key>.2":981:15)
#loc737 = loc("<eval_with_key>.2":982:15)
#loc738 = loc("<eval_with_key>.2":983:13)
#loc739 = loc("<eval_with_key>.2":984:13)
#loc740 = loc("<eval_with_key>.2":985:13)
#loc741 = loc("<eval_with_key>.2":986:13)
#loc742 = loc("<eval_with_key>.2":987:13)
#loc743 = loc("<eval_with_key>.2":988:14)
#loc744 = loc("<eval_with_key>.2":990:13)
#loc745 = loc("<eval_with_key>.2":991:13)
#loc746 = loc("<eval_with_key>.2":992:15)
#loc747 = loc("<eval_with_key>.2":995:15)
#loc748 = loc("<eval_with_key>.2":996:15)
#loc749 = loc("<eval_with_key>.2":997:13)
#loc750 = loc("<eval_with_key>.2":1000:27)
#loc751 = loc("<eval_with_key>.2":1004:15)
#loc752 = loc("<eval_with_key>.2":1006:8)
#loc753 = loc(callsite(callsite("-":5066:13 at "-":6850:10) at "<eval_with_key>.2":1007:24))
#loc754 = loc("<eval_with_key>.2":1007:24)
#loc755 = loc("<eval_with_key>.2":1008:9)
#loc756 = loc("<eval_with_key>.2":1009:22)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment