Created
January 24, 2023 21:05
-
-
Save AmosLewis/3faccdf32c91d30f21daed12bf7197b4 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,128],si64> loc(unknown)) -> !torch.vtensor<[1,2],f32> { | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc1) | |
%int0 = torch.constant.int 0 loc(#loc2) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc4) | |
%true = torch.constant.bool true loc(#loc5) | |
%none = torch.constant.none loc(#loc) | |
%false = torch.constant.bool false loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc6) | |
%int128 = torch.constant.int 128 loc(#loc7) | |
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x768xf32>) : !torch.vtensor<[2,768],f32> loc(#loc) | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%187 = torch.vtensor.literal(dense<-3.40282347E+38> : tensor<f32>) : !torch.vtensor<[],f32> loc(#loc) | |
%188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%190 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2050x768xf32>) : !torch.vtensor<[2050,768],f32> loc(#loc) | |
%197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<50272x768xf32>) : !torch.vtensor<[50272,768],f32> loc(#loc) | |
%int4 = torch.constant.int 4 loc(#loc8) | |
%float-3.402820e38 = torch.constant.float -3.4028234663852886E+38 loc(#loc9) | |
%int11 = torch.constant.int 11 loc(#loc10) | |
%int2 = torch.constant.int 2 loc(#loc11) | |
%int768 = torch.constant.int 768 loc(#loc12) | |
%float1.000000e-05 = torch.constant.float 1.000000e-05 loc(#loc13) | |
%float1.250000e-01 = torch.constant.float 1.250000e-01 loc(#loc14) | |
%int12 = torch.constant.int 12 loc(#loc15) | |
%int64 = torch.constant.int 64 loc(#loc16) | |
%cpu = torch.constant.device "cpu" loc(#loc) | |
%198 = torch.prim.ListConstruct %int-1, %int128 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%199 = torch.aten.view %arg0, %198 : !torch.vtensor<[1,128],si64>, !torch.list<int> -> !torch.vtensor<[1,128],si64> loc(#loc17) | |
%200 = torch.aten.embedding %197, %199, %int1, %false, %false : !torch.vtensor<[50272,768],f32>, !torch.vtensor<[1,128],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,128,768],f32> loc(#loc18) | |
%201 = torch.prim.ListConstruct %int1, %int128 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc752) | |
%202 = torch.aten.ones %201, %int11, %none, %cpu, %false : !torch.list<int>, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,128],i1> loc(#loc22) | |
%203 = torch.aten.to.dtype %202, %int4, %false, %false, %none : !torch.vtensor<[1,128],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,128],si64> loc(#loc21) | |
%204 = torch.aten.cumsum %203, %int1, %none : !torch.vtensor<[1,128],si64>, !torch.int, !torch.none -> !torch.vtensor<[1,128],si64> loc(#loc23) | |
%205 = torch.aten.mul.Tensor %204, %203 : !torch.vtensor<[1,128],si64>, !torch.vtensor<[1,128],si64> -> !torch.vtensor<[1,128],si64> loc(#loc24) | |
%206 = torch.aten.sub.Scalar %205, %int1, %int1 : !torch.vtensor<[1,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,128],si64> loc(#loc25) | |
%207 = torch.aten.add.Scalar %206, %int2, %int1 : !torch.vtensor<[1,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,128],si64> loc(#loc26) | |
%208 = torch.aten.embedding %196, %207, %int-1, %false, %false : !torch.vtensor<[2050,768],f32>, !torch.vtensor<[1,128],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,128,768],f32> loc(#loc27) | |
%209 = torch.prim.ListConstruct %int128, %int128 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc753) | |
%210 = torch.prim.NumToTensor.Scalar %float-3.402820e38 : !torch.float -> !torch.vtensor<[],f64> loc(#loc31) | |
%211 = torch.aten.to.dtype %210, %int6, %false, %false, %none : !torch.vtensor<[],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32> loc(#loc31) | |
%212 = torch.aten.broadcast_to %211, %209 : !torch.vtensor<[],f32>, !torch.list<int> -> !torch.vtensor<[128,128],f32> loc(#loc31) | |
%213 = torch.aten.arange.start_step %int0, %int128, %int1, %none, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[128],si64> loc(#loc32) | |
%214 = torch.aten.add.Scalar %213, %int1, %int1 : !torch.vtensor<[128],si64>, !torch.int, !torch.int -> !torch.vtensor<[128],si64> loc(#loc33) | |
%215 = torch.prim.ListConstruct %int128, %int1 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc754) | |
%216 = torch.aten.view %214, %215 : !torch.vtensor<[128],si64>, !torch.list<int> -> !torch.vtensor<[128,1],si64> loc(#loc36) | |
%217 = torch.aten.lt.Tensor %213, %216 : !torch.vtensor<[128],si64>, !torch.vtensor<[128,1],si64> -> !torch.vtensor<[128,128],i1> loc(#loc30) | |
%218 = torch.aten.masked_fill.Scalar %212, %217, %int0 : !torch.vtensor<[128,128],f32>, !torch.vtensor<[128,128],i1>, !torch.int -> !torch.vtensor<[128,128],f32> loc(#loc37) | |
%219 = torch.aten.unsqueeze %218, %int0 : !torch.vtensor<[128,128],f32>, !torch.int -> !torch.vtensor<[1,128,128],f32> loc(#loc38) | |
%220 = torch.aten.unsqueeze %219, %int1 : !torch.vtensor<[1,128,128],f32>, !torch.int -> !torch.vtensor<[1,1,128,128],f32> loc(#loc39) | |
%221 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc755) | |
%222 = torch.aten.broadcast_to %220, %221 : !torch.vtensor<[1,1,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],f32> loc(#loc42) | |
%223 = torch.aten.unsqueeze %202, %int1 : !torch.vtensor<[1,128],i1>, !torch.int -> !torch.vtensor<[1,1,128],i1> loc(#loc43) | |
%224 = torch.aten.unsqueeze %223, %int2 : !torch.vtensor<[1,1,128],i1>, !torch.int -> !torch.vtensor<[1,1,1,128],i1> loc(#loc44) | |
%225 = torch.aten.broadcast_to %224, %221 : !torch.vtensor<[1,1,1,128],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc45) | |
%226 = torch.aten.to.dtype %225, %int6, %false, %false, %none : !torch.vtensor<[1,1,128,128],i1>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,128,128],f32> loc(#loc46) | |
%227 = torch.aten.rsub.Scalar %226, %float1.000000e00, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,1,128,128],f32> loc(#loc47) | |
%228 = torch.aten.to.dtype %227, %int11, %false, %false, %none : !torch.vtensor<[1,1,128,128],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,1,128,128],i1> loc(#loc48) | |
%229 = torch.aten.masked_fill.Scalar %227, %228, %float-3.402820e38 : !torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.float -> !torch.vtensor<[1,1,128,128],f32> loc(#loc49) | |
%230 = torch.aten.add.Tensor %229, %222, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,1,128,128],f32> loc(#loc50) | |
%231 = torch.aten.add.Tensor %200, %208, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc51) | |
%232 = torch.prim.ListConstruct %int768 : (!torch.int) -> !torch.list<int> loc(#loc756) | |
%result0, %result1, %result2 = torch.aten.native_layer_norm %231, %232, %195, %194, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc54) | |
%233 = torch.aten.transpose.int %193, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc55) | |
%234 = torch.prim.ListConstruct %int128, %int768 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc757) | |
%235 = torch.aten.view %result0, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc56) | |
%236 = torch.aten.mm %235, %233 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc53) | |
%237 = torch.aten.mul.Scalar %192, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc53) | |
%238 = torch.aten.add.Tensor %237, %236, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc53) | |
%239 = torch.prim.ListConstruct %int1, %int128, %int768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc758) | |
%240 = torch.aten.view %238, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc57) | |
%241 = torch.aten.mul.Scalar %240, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc58) | |
%242 = torch.aten.transpose.int %191, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc59) | |
%243 = torch.aten.view %result0, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc60) | |
%244 = torch.aten.mm %243, %242 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc61) | |
%245 = torch.aten.mul.Scalar %190, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc61) | |
%246 = torch.aten.add.Tensor %245, %244, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc61) | |
%247 = torch.aten.view %246, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc62) | |
%248 = torch.prim.ListConstruct %int1, %int-1, %int12, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%249 = torch.aten.view %247, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc63) | |
%250 = torch.aten.transpose.int %249, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc64) | |
%251 = torch.aten.clone %250, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc65) | |
%252 = torch.aten.transpose.int %189, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc66) | |
%253 = torch.aten.view %result0, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc67) | |
%254 = torch.aten.mm %253, %252 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc68) | |
%255 = torch.aten.mul.Scalar %188, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc68) | |
%256 = torch.aten.add.Tensor %255, %254, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc68) | |
%257 = torch.aten.view %256, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc69) | |
%258 = torch.aten.view %257, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc70) | |
%259 = torch.aten.transpose.int %258, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc71) | |
%260 = torch.aten.clone %259, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc72) | |
%261 = torch.prim.ListConstruct %int1, %int128, %int12, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc759) | |
%262 = torch.aten.view %241, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc73) | |
%263 = torch.aten.transpose.int %262, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc74) | |
%264 = torch.aten.clone %263, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc75) | |
%265 = torch.prim.ListConstruct %int12, %int-1, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%266 = torch.aten.view %264, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc76) | |
%267 = torch.aten.view %251, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc77) | |
%268 = torch.aten.view %260, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc78) | |
%269 = torch.aten.transpose.int %267, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc79) | |
%270 = torch.aten.bmm %266, %269 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc80) | |
%271 = torch.prim.ListConstruct %int1, %int12, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc760) | |
%272 = torch.aten.view %270, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc81) | |
%273 = torch.aten.add.Tensor %272, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc82) | |
%274 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc83) | |
%275 = torch.aten.maximum %273, %274 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc84) | |
%276 = torch.prim.ListConstruct %int12, %int128, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc761) | |
%277 = torch.aten.view %275, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc85) | |
%278 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%values, %indices = torch.aten.max.dim %277, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc86) | |
%279 = torch.aten.sub.Tensor %277, %values, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc87) | |
%280 = torch.aten.exp %279 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc88) | |
%281 = torch.aten.sum.dim_IntList %280, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc89) | |
%282 = torch.aten.div.Tensor %280, %281 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc90) | |
%283 = torch.aten.bmm %282, %268 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc91) | |
%284 = torch.prim.ListConstruct %int1, %int12, %int128, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc762) | |
%285 = torch.aten.view %283, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc92) | |
%286 = torch.aten.transpose.int %285, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc93) | |
%287 = torch.aten.clone %286, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc94) | |
%288 = torch.aten.view %287, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc95) | |
%289 = torch.aten.transpose.int %186, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc96) | |
%290 = torch.aten.view %288, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc97) | |
%291 = torch.aten.mm %290, %289 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc98) | |
%292 = torch.aten.mul.Scalar %185, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc98) | |
%293 = torch.aten.add.Tensor %292, %291, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc98) | |
%294 = torch.aten.view %293, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc99) | |
%295 = torch.aten.add.Tensor %231, %294, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc100) | |
%296 = torch.aten.view %295, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc101) | |
%result0_0, %result1_1, %result2_2 = torch.aten.native_layer_norm %296, %232, %184, %183, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc102) | |
%297 = torch.aten.transpose.int %182, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc103) | |
%298 = torch.aten.mm %result0_0, %297 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc104) | |
%299 = torch.aten.mul.Scalar %181, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc104) | |
%300 = torch.aten.add.Tensor %299, %298, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc104) | |
%301 = torch.aten.relu %300 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc105) | |
%302 = torch.aten.transpose.int %180, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc106) | |
%303 = torch.aten.mm %301, %302 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc107) | |
%304 = torch.aten.mul.Scalar %179, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc107) | |
%305 = torch.aten.add.Tensor %304, %303, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc107) | |
%306 = torch.aten.add.Tensor %296, %305, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc108) | |
%307 = torch.aten.view %306, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc109) | |
%result0_3, %result1_4, %result2_5 = torch.aten.native_layer_norm %307, %232, %178, %177, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc110) | |
%308 = torch.aten.transpose.int %176, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc111) | |
%309 = torch.aten.view %result0_3, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc112) | |
%310 = torch.aten.mm %309, %308 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc113) | |
%311 = torch.aten.mul.Scalar %175, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc113) | |
%312 = torch.aten.add.Tensor %311, %310, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc113) | |
%313 = torch.aten.view %312, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc114) | |
%314 = torch.aten.mul.Scalar %313, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc115) | |
%315 = torch.aten.transpose.int %174, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc116) | |
%316 = torch.aten.view %result0_3, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc117) | |
%317 = torch.aten.mm %316, %315 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc118) | |
%318 = torch.aten.mul.Scalar %173, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc118) | |
%319 = torch.aten.add.Tensor %318, %317, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc118) | |
%320 = torch.aten.view %319, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc119) | |
%321 = torch.aten.view %320, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc120) | |
%322 = torch.aten.transpose.int %321, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc121) | |
%323 = torch.aten.clone %322, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc122) | |
%324 = torch.aten.transpose.int %172, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc123) | |
%325 = torch.aten.view %result0_3, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc124) | |
%326 = torch.aten.mm %325, %324 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc125) | |
%327 = torch.aten.mul.Scalar %171, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc125) | |
%328 = torch.aten.add.Tensor %327, %326, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc125) | |
%329 = torch.aten.view %328, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc126) | |
%330 = torch.aten.view %329, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc127) | |
%331 = torch.aten.transpose.int %330, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc128) | |
%332 = torch.aten.clone %331, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc129) | |
%333 = torch.aten.view %314, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc130) | |
%334 = torch.aten.transpose.int %333, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc131) | |
%335 = torch.aten.clone %334, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc132) | |
%336 = torch.aten.view %335, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc133) | |
%337 = torch.aten.view %323, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc134) | |
%338 = torch.aten.view %332, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc135) | |
%339 = torch.aten.transpose.int %337, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc136) | |
%340 = torch.aten.bmm %336, %339 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc137) | |
%341 = torch.aten.view %340, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc138) | |
%342 = torch.aten.add.Tensor %341, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc139) | |
%343 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc140) | |
%344 = torch.aten.maximum %342, %343 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc141) | |
%345 = torch.aten.view %344, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc142) | |
%values_6, %indices_7 = torch.aten.max.dim %345, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc143) | |
%346 = torch.aten.sub.Tensor %345, %values_6, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc144) | |
%347 = torch.aten.exp %346 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc145) | |
%348 = torch.aten.sum.dim_IntList %347, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc146) | |
%349 = torch.aten.div.Tensor %347, %348 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc147) | |
%350 = torch.aten.bmm %349, %338 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc148) | |
%351 = torch.aten.view %350, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc149) | |
%352 = torch.aten.transpose.int %351, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc150) | |
%353 = torch.aten.clone %352, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc151) | |
%354 = torch.aten.view %353, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc152) | |
%355 = torch.aten.transpose.int %170, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc153) | |
%356 = torch.aten.view %354, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc154) | |
%357 = torch.aten.mm %356, %355 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc155) | |
%358 = torch.aten.mul.Scalar %169, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc155) | |
%359 = torch.aten.add.Tensor %358, %357, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc155) | |
%360 = torch.aten.view %359, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc156) | |
%361 = torch.aten.add.Tensor %307, %360, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc157) | |
%362 = torch.aten.view %361, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc158) | |
%result0_8, %result1_9, %result2_10 = torch.aten.native_layer_norm %362, %232, %168, %167, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc159) | |
%363 = torch.aten.transpose.int %166, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc160) | |
%364 = torch.aten.mm %result0_8, %363 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc161) | |
%365 = torch.aten.mul.Scalar %165, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc161) | |
%366 = torch.aten.add.Tensor %365, %364, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc161) | |
%367 = torch.aten.relu %366 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc162) | |
%368 = torch.aten.transpose.int %164, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc163) | |
%369 = torch.aten.mm %367, %368 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc164) | |
%370 = torch.aten.mul.Scalar %163, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc164) | |
%371 = torch.aten.add.Tensor %370, %369, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc164) | |
%372 = torch.aten.add.Tensor %362, %371, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc165) | |
%373 = torch.aten.view %372, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc166) | |
%result0_11, %result1_12, %result2_13 = torch.aten.native_layer_norm %373, %232, %162, %161, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc167) | |
%374 = torch.aten.transpose.int %160, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc168) | |
%375 = torch.aten.view %result0_11, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc169) | |
%376 = torch.aten.mm %375, %374 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc170) | |
%377 = torch.aten.mul.Scalar %159, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc170) | |
%378 = torch.aten.add.Tensor %377, %376, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc170) | |
%379 = torch.aten.view %378, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc171) | |
%380 = torch.aten.mul.Scalar %379, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc172) | |
%381 = torch.aten.transpose.int %158, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc173) | |
%382 = torch.aten.view %result0_11, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc174) | |
%383 = torch.aten.mm %382, %381 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc175) | |
%384 = torch.aten.mul.Scalar %157, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc175) | |
%385 = torch.aten.add.Tensor %384, %383, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc175) | |
%386 = torch.aten.view %385, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc176) | |
%387 = torch.aten.view %386, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc177) | |
%388 = torch.aten.transpose.int %387, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc178) | |
%389 = torch.aten.clone %388, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc179) | |
%390 = torch.aten.transpose.int %156, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc180) | |
%391 = torch.aten.view %result0_11, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc181) | |
%392 = torch.aten.mm %391, %390 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc182) | |
%393 = torch.aten.mul.Scalar %155, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc182) | |
%394 = torch.aten.add.Tensor %393, %392, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc182) | |
%395 = torch.aten.view %394, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc183) | |
%396 = torch.aten.view %395, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc184) | |
%397 = torch.aten.transpose.int %396, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc185) | |
%398 = torch.aten.clone %397, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc186) | |
%399 = torch.aten.view %380, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc187) | |
%400 = torch.aten.transpose.int %399, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc188) | |
%401 = torch.aten.clone %400, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc189) | |
%402 = torch.aten.view %401, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc190) | |
%403 = torch.aten.view %389, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc191) | |
%404 = torch.aten.view %398, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc192) | |
%405 = torch.aten.transpose.int %403, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc193) | |
%406 = torch.aten.bmm %402, %405 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc194) | |
%407 = torch.aten.view %406, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc195) | |
%408 = torch.aten.add.Tensor %407, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc196) | |
%409 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc197) | |
%410 = torch.aten.maximum %408, %409 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc198) | |
%411 = torch.aten.view %410, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc199) | |
%values_14, %indices_15 = torch.aten.max.dim %411, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc200) | |
%412 = torch.aten.sub.Tensor %411, %values_14, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc201) | |
%413 = torch.aten.exp %412 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc202) | |
%414 = torch.aten.sum.dim_IntList %413, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc203) | |
%415 = torch.aten.div.Tensor %413, %414 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc204) | |
%416 = torch.aten.bmm %415, %404 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc205) | |
%417 = torch.aten.view %416, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc206) | |
%418 = torch.aten.transpose.int %417, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc207) | |
%419 = torch.aten.clone %418, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc208) | |
%420 = torch.aten.view %419, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc209) | |
%421 = torch.aten.transpose.int %154, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc210) | |
%422 = torch.aten.view %420, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc211) | |
%423 = torch.aten.mm %422, %421 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc212) | |
%424 = torch.aten.mul.Scalar %153, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc212) | |
%425 = torch.aten.add.Tensor %424, %423, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc212) | |
%426 = torch.aten.view %425, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc213) | |
%427 = torch.aten.add.Tensor %373, %426, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc214) | |
%428 = torch.aten.view %427, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc215) | |
%result0_16, %result1_17, %result2_18 = torch.aten.native_layer_norm %428, %232, %152, %151, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc216) | |
%429 = torch.aten.transpose.int %150, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc217) | |
%430 = torch.aten.mm %result0_16, %429 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc218) | |
%431 = torch.aten.mul.Scalar %149, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc218) | |
%432 = torch.aten.add.Tensor %431, %430, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc218) | |
%433 = torch.aten.relu %432 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc219) | |
%434 = torch.aten.transpose.int %148, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc220) | |
%435 = torch.aten.mm %433, %434 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc221) | |
%436 = torch.aten.mul.Scalar %147, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc221) | |
%437 = torch.aten.add.Tensor %436, %435, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc221) | |
%438 = torch.aten.add.Tensor %428, %437, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc222) | |
%439 = torch.aten.view %438, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc223) | |
%result0_19, %result1_20, %result2_21 = torch.aten.native_layer_norm %439, %232, %146, %145, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc224) | |
%440 = torch.aten.transpose.int %144, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc225) | |
%441 = torch.aten.view %result0_19, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc226) | |
%442 = torch.aten.mm %441, %440 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc227) | |
%443 = torch.aten.mul.Scalar %143, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc227) | |
%444 = torch.aten.add.Tensor %443, %442, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc227) | |
%445 = torch.aten.view %444, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc228) | |
%446 = torch.aten.mul.Scalar %445, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc229) | |
%447 = torch.aten.transpose.int %142, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc230) | |
%448 = torch.aten.view %result0_19, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc231) | |
%449 = torch.aten.mm %448, %447 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc232) | |
%450 = torch.aten.mul.Scalar %141, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc232) | |
%451 = torch.aten.add.Tensor %450, %449, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc232) | |
%452 = torch.aten.view %451, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc233) | |
%453 = torch.aten.view %452, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc234) | |
%454 = torch.aten.transpose.int %453, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc235) | |
%455 = torch.aten.clone %454, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc236) | |
%456 = torch.aten.transpose.int %140, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc237) | |
%457 = torch.aten.view %result0_19, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc238) | |
%458 = torch.aten.mm %457, %456 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc239) | |
%459 = torch.aten.mul.Scalar %139, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc239) | |
%460 = torch.aten.add.Tensor %459, %458, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc239) | |
%461 = torch.aten.view %460, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc240) | |
%462 = torch.aten.view %461, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc241) | |
%463 = torch.aten.transpose.int %462, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc242) | |
%464 = torch.aten.clone %463, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc243) | |
%465 = torch.aten.view %446, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc244) | |
%466 = torch.aten.transpose.int %465, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc245) | |
%467 = torch.aten.clone %466, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc246) | |
%468 = torch.aten.view %467, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc247) | |
%469 = torch.aten.view %455, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc248) | |
%470 = torch.aten.view %464, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc249) | |
%471 = torch.aten.transpose.int %469, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc250) | |
%472 = torch.aten.bmm %468, %471 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc251) | |
%473 = torch.aten.view %472, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc252) | |
%474 = torch.aten.add.Tensor %473, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc253) | |
%475 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc254) | |
%476 = torch.aten.maximum %474, %475 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc255) | |
%477 = torch.aten.view %476, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc256) | |
%values_22, %indices_23 = torch.aten.max.dim %477, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc257) | |
%478 = torch.aten.sub.Tensor %477, %values_22, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc258) | |
%479 = torch.aten.exp %478 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc259) | |
%480 = torch.aten.sum.dim_IntList %479, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc260) | |
%481 = torch.aten.div.Tensor %479, %480 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc261) | |
%482 = torch.aten.bmm %481, %470 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc262) | |
%483 = torch.aten.view %482, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc263) | |
%484 = torch.aten.transpose.int %483, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc264) | |
%485 = torch.aten.clone %484, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc265) | |
%486 = torch.aten.view %485, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc266) | |
%487 = torch.aten.transpose.int %138, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc267) | |
%488 = torch.aten.view %486, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc268) | |
%489 = torch.aten.mm %488, %487 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc269) | |
%490 = torch.aten.mul.Scalar %137, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc269) | |
%491 = torch.aten.add.Tensor %490, %489, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc269) | |
%492 = torch.aten.view %491, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc270) | |
%493 = torch.aten.add.Tensor %439, %492, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc271) | |
%494 = torch.aten.view %493, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc272) | |
%result0_24, %result1_25, %result2_26 = torch.aten.native_layer_norm %494, %232, %136, %135, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc273) | |
%495 = torch.aten.transpose.int %134, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc274) | |
%496 = torch.aten.mm %result0_24, %495 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc275) | |
%497 = torch.aten.mul.Scalar %133, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc275) | |
%498 = torch.aten.add.Tensor %497, %496, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc275) | |
%499 = torch.aten.relu %498 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc276) | |
%500 = torch.aten.transpose.int %132, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc277) | |
%501 = torch.aten.mm %499, %500 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc278) | |
%502 = torch.aten.mul.Scalar %131, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc278) | |
%503 = torch.aten.add.Tensor %502, %501, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc278) | |
%504 = torch.aten.add.Tensor %494, %503, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc279) | |
%505 = torch.aten.view %504, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc280) | |
%result0_27, %result1_28, %result2_29 = torch.aten.native_layer_norm %505, %232, %130, %129, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc281) | |
%506 = torch.aten.transpose.int %128, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc282) | |
%507 = torch.aten.view %result0_27, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc283) | |
%508 = torch.aten.mm %507, %506 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc284) | |
%509 = torch.aten.mul.Scalar %127, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc284) | |
%510 = torch.aten.add.Tensor %509, %508, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc284) | |
%511 = torch.aten.view %510, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc285) | |
%512 = torch.aten.mul.Scalar %511, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc286) | |
%513 = torch.aten.transpose.int %126, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc287) | |
%514 = torch.aten.view %result0_27, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc288) | |
%515 = torch.aten.mm %514, %513 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc289) | |
%516 = torch.aten.mul.Scalar %125, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc289) | |
%517 = torch.aten.add.Tensor %516, %515, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc289) | |
%518 = torch.aten.view %517, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc290) | |
%519 = torch.aten.view %518, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc291) | |
%520 = torch.aten.transpose.int %519, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc292) | |
%521 = torch.aten.clone %520, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc293) | |
%522 = torch.aten.transpose.int %124, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc294) | |
%523 = torch.aten.view %result0_27, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc295) | |
%524 = torch.aten.mm %523, %522 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc296) | |
%525 = torch.aten.mul.Scalar %123, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc296) | |
%526 = torch.aten.add.Tensor %525, %524, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc296) | |
%527 = torch.aten.view %526, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc297) | |
%528 = torch.aten.view %527, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc298) | |
%529 = torch.aten.transpose.int %528, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc299) | |
%530 = torch.aten.clone %529, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc300) | |
%531 = torch.aten.view %512, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc301) | |
%532 = torch.aten.transpose.int %531, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc302) | |
%533 = torch.aten.clone %532, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc303) | |
%534 = torch.aten.view %533, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc304) | |
%535 = torch.aten.view %521, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc305) | |
%536 = torch.aten.view %530, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc306) | |
%537 = torch.aten.transpose.int %535, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc307) | |
%538 = torch.aten.bmm %534, %537 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc308) | |
%539 = torch.aten.view %538, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc309) | |
%540 = torch.aten.add.Tensor %539, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc310) | |
%541 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc311) | |
%542 = torch.aten.maximum %540, %541 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc312) | |
%543 = torch.aten.view %542, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc313) | |
%values_30, %indices_31 = torch.aten.max.dim %543, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc314) | |
%544 = torch.aten.sub.Tensor %543, %values_30, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc315) | |
%545 = torch.aten.exp %544 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc316) | |
%546 = torch.aten.sum.dim_IntList %545, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc317) | |
%547 = torch.aten.div.Tensor %545, %546 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc318) | |
%548 = torch.aten.bmm %547, %536 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc319) | |
%549 = torch.aten.view %548, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc320) | |
%550 = torch.aten.transpose.int %549, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc321) | |
%551 = torch.aten.clone %550, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc322) | |
%552 = torch.aten.view %551, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc323) | |
%553 = torch.aten.transpose.int %122, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc324) | |
%554 = torch.aten.view %552, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc325) | |
%555 = torch.aten.mm %554, %553 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc326) | |
%556 = torch.aten.mul.Scalar %121, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc326) | |
%557 = torch.aten.add.Tensor %556, %555, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc326) | |
%558 = torch.aten.view %557, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc327) | |
%559 = torch.aten.add.Tensor %505, %558, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc328) | |
%560 = torch.aten.view %559, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc329) | |
%result0_32, %result1_33, %result2_34 = torch.aten.native_layer_norm %560, %232, %120, %119, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc330) | |
%561 = torch.aten.transpose.int %118, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc331) | |
%562 = torch.aten.mm %result0_32, %561 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc332) | |
%563 = torch.aten.mul.Scalar %117, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc332) | |
%564 = torch.aten.add.Tensor %563, %562, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc332) | |
%565 = torch.aten.relu %564 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc333) | |
%566 = torch.aten.transpose.int %116, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc334) | |
%567 = torch.aten.mm %565, %566 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc335) | |
%568 = torch.aten.mul.Scalar %115, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc335) | |
%569 = torch.aten.add.Tensor %568, %567, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc335) | |
%570 = torch.aten.add.Tensor %560, %569, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc336) | |
%571 = torch.aten.view %570, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc337) | |
%result0_35, %result1_36, %result2_37 = torch.aten.native_layer_norm %571, %232, %114, %113, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc338) | |
%572 = torch.aten.transpose.int %112, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc339) | |
%573 = torch.aten.view %result0_35, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc340) | |
%574 = torch.aten.mm %573, %572 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc341) | |
%575 = torch.aten.mul.Scalar %111, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc341) | |
%576 = torch.aten.add.Tensor %575, %574, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc341) | |
%577 = torch.aten.view %576, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc342) | |
%578 = torch.aten.mul.Scalar %577, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc343) | |
%579 = torch.aten.transpose.int %110, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc344) | |
%580 = torch.aten.view %result0_35, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc345) | |
%581 = torch.aten.mm %580, %579 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc346) | |
%582 = torch.aten.mul.Scalar %109, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc346) | |
%583 = torch.aten.add.Tensor %582, %581, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc346) | |
%584 = torch.aten.view %583, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc347) | |
%585 = torch.aten.view %584, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc348) | |
%586 = torch.aten.transpose.int %585, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc349) | |
%587 = torch.aten.clone %586, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc350) | |
%588 = torch.aten.transpose.int %108, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc351) | |
%589 = torch.aten.view %result0_35, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc352) | |
%590 = torch.aten.mm %589, %588 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc353) | |
%591 = torch.aten.mul.Scalar %107, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc353) | |
%592 = torch.aten.add.Tensor %591, %590, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc353) | |
%593 = torch.aten.view %592, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc354) | |
%594 = torch.aten.view %593, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc355) | |
%595 = torch.aten.transpose.int %594, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc356) | |
%596 = torch.aten.clone %595, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc357) | |
%597 = torch.aten.view %578, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc358) | |
%598 = torch.aten.transpose.int %597, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc359) | |
%599 = torch.aten.clone %598, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc360) | |
%600 = torch.aten.view %599, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc361) | |
%601 = torch.aten.view %587, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc362) | |
%602 = torch.aten.view %596, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc363) | |
%603 = torch.aten.transpose.int %601, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc364) | |
%604 = torch.aten.bmm %600, %603 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc365) | |
%605 = torch.aten.view %604, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc366) | |
%606 = torch.aten.add.Tensor %605, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc367) | |
%607 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc368) | |
%608 = torch.aten.maximum %606, %607 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc369) | |
%609 = torch.aten.view %608, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc370) | |
%values_38, %indices_39 = torch.aten.max.dim %609, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc371) | |
%610 = torch.aten.sub.Tensor %609, %values_38, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc372) | |
%611 = torch.aten.exp %610 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc373) | |
%612 = torch.aten.sum.dim_IntList %611, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc374) | |
%613 = torch.aten.div.Tensor %611, %612 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc375) | |
%614 = torch.aten.bmm %613, %602 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc376) | |
%615 = torch.aten.view %614, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc377) | |
%616 = torch.aten.transpose.int %615, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc378) | |
%617 = torch.aten.clone %616, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc379) | |
%618 = torch.aten.view %617, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc380) | |
%619 = torch.aten.transpose.int %106, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc381) | |
%620 = torch.aten.view %618, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc382) | |
%621 = torch.aten.mm %620, %619 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc383) | |
%622 = torch.aten.mul.Scalar %105, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc383) | |
%623 = torch.aten.add.Tensor %622, %621, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc383) | |
%624 = torch.aten.view %623, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc384) | |
%625 = torch.aten.add.Tensor %571, %624, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc385) | |
%626 = torch.aten.view %625, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc386) | |
%result0_40, %result1_41, %result2_42 = torch.aten.native_layer_norm %626, %232, %104, %103, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc387) | |
%627 = torch.aten.transpose.int %102, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc388) | |
%628 = torch.aten.mm %result0_40, %627 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc389) | |
%629 = torch.aten.mul.Scalar %101, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc389) | |
%630 = torch.aten.add.Tensor %629, %628, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc389) | |
%631 = torch.aten.relu %630 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc390) | |
%632 = torch.aten.transpose.int %100, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc391) | |
%633 = torch.aten.mm %631, %632 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc392) | |
%634 = torch.aten.mul.Scalar %99, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc392) | |
%635 = torch.aten.add.Tensor %634, %633, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc392) | |
%636 = torch.aten.add.Tensor %626, %635, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc393) | |
%637 = torch.aten.view %636, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc394) | |
%result0_43, %result1_44, %result2_45 = torch.aten.native_layer_norm %637, %232, %98, %97, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc395) | |
%638 = torch.aten.transpose.int %96, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc396) | |
%639 = torch.aten.view %result0_43, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc397) | |
%640 = torch.aten.mm %639, %638 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc398) | |
%641 = torch.aten.mul.Scalar %95, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc398) | |
%642 = torch.aten.add.Tensor %641, %640, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc398) | |
%643 = torch.aten.view %642, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc399) | |
%644 = torch.aten.mul.Scalar %643, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc400) | |
%645 = torch.aten.transpose.int %94, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc401) | |
%646 = torch.aten.view %result0_43, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc402) | |
%647 = torch.aten.mm %646, %645 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc403) | |
%648 = torch.aten.mul.Scalar %93, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc403) | |
%649 = torch.aten.add.Tensor %648, %647, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc403) | |
%650 = torch.aten.view %649, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc404) | |
%651 = torch.aten.view %650, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc405) | |
%652 = torch.aten.transpose.int %651, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc406) | |
%653 = torch.aten.clone %652, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc407) | |
%654 = torch.aten.transpose.int %92, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc408) | |
%655 = torch.aten.view %result0_43, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc409) | |
%656 = torch.aten.mm %655, %654 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc410) | |
%657 = torch.aten.mul.Scalar %91, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc410) | |
%658 = torch.aten.add.Tensor %657, %656, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc410) | |
%659 = torch.aten.view %658, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc411) | |
%660 = torch.aten.view %659, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc412) | |
%661 = torch.aten.transpose.int %660, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc413) | |
%662 = torch.aten.clone %661, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc414) | |
%663 = torch.aten.view %644, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc415) | |
%664 = torch.aten.transpose.int %663, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc416) | |
%665 = torch.aten.clone %664, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc417) | |
%666 = torch.aten.view %665, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc418) | |
%667 = torch.aten.view %653, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc419) | |
%668 = torch.aten.view %662, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc420) | |
%669 = torch.aten.transpose.int %667, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc421) | |
%670 = torch.aten.bmm %666, %669 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc422) | |
%671 = torch.aten.view %670, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc423) | |
%672 = torch.aten.add.Tensor %671, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc424) | |
%673 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc425) | |
%674 = torch.aten.maximum %672, %673 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc426) | |
%675 = torch.aten.view %674, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc427) | |
%values_46, %indices_47 = torch.aten.max.dim %675, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc428) | |
%676 = torch.aten.sub.Tensor %675, %values_46, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc429) | |
%677 = torch.aten.exp %676 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc430) | |
%678 = torch.aten.sum.dim_IntList %677, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc431) | |
%679 = torch.aten.div.Tensor %677, %678 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc432) | |
%680 = torch.aten.bmm %679, %668 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc433) | |
%681 = torch.aten.view %680, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc434) | |
%682 = torch.aten.transpose.int %681, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc435) | |
%683 = torch.aten.clone %682, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc436) | |
%684 = torch.aten.view %683, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc437) | |
%685 = torch.aten.transpose.int %90, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc438) | |
%686 = torch.aten.view %684, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc439) | |
%687 = torch.aten.mm %686, %685 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc440) | |
%688 = torch.aten.mul.Scalar %89, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc440) | |
%689 = torch.aten.add.Tensor %688, %687, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc440) | |
%690 = torch.aten.view %689, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc441) | |
%691 = torch.aten.add.Tensor %637, %690, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc442) | |
%692 = torch.aten.view %691, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc443) | |
%result0_48, %result1_49, %result2_50 = torch.aten.native_layer_norm %692, %232, %88, %87, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc444) | |
%693 = torch.aten.transpose.int %86, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc445) | |
%694 = torch.aten.mm %result0_48, %693 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc446) | |
%695 = torch.aten.mul.Scalar %85, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc446) | |
%696 = torch.aten.add.Tensor %695, %694, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc446) | |
%697 = torch.aten.relu %696 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc447) | |
%698 = torch.aten.transpose.int %84, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc448) | |
%699 = torch.aten.mm %697, %698 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc449) | |
%700 = torch.aten.mul.Scalar %83, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc449) | |
%701 = torch.aten.add.Tensor %700, %699, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc449) | |
%702 = torch.aten.add.Tensor %692, %701, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc450) | |
%703 = torch.aten.view %702, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc451) | |
%result0_51, %result1_52, %result2_53 = torch.aten.native_layer_norm %703, %232, %82, %81, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc452) | |
%704 = torch.aten.transpose.int %80, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc453) | |
%705 = torch.aten.view %result0_51, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc454) | |
%706 = torch.aten.mm %705, %704 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc455) | |
%707 = torch.aten.mul.Scalar %79, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc455) | |
%708 = torch.aten.add.Tensor %707, %706, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc455) | |
%709 = torch.aten.view %708, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc456) | |
%710 = torch.aten.mul.Scalar %709, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc457) | |
%711 = torch.aten.transpose.int %78, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc458) | |
%712 = torch.aten.view %result0_51, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc459) | |
%713 = torch.aten.mm %712, %711 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc460) | |
%714 = torch.aten.mul.Scalar %77, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc460) | |
%715 = torch.aten.add.Tensor %714, %713, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc460) | |
%716 = torch.aten.view %715, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc461) | |
%717 = torch.aten.view %716, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc462) | |
%718 = torch.aten.transpose.int %717, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc463) | |
%719 = torch.aten.clone %718, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc464) | |
%720 = torch.aten.transpose.int %76, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc465) | |
%721 = torch.aten.view %result0_51, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc466) | |
%722 = torch.aten.mm %721, %720 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc467) | |
%723 = torch.aten.mul.Scalar %75, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc467) | |
%724 = torch.aten.add.Tensor %723, %722, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc467) | |
%725 = torch.aten.view %724, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc468) | |
%726 = torch.aten.view %725, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc469) | |
%727 = torch.aten.transpose.int %726, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc470) | |
%728 = torch.aten.clone %727, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc471) | |
%729 = torch.aten.view %710, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc472) | |
%730 = torch.aten.transpose.int %729, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc473) | |
%731 = torch.aten.clone %730, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc474) | |
%732 = torch.aten.view %731, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc475) | |
%733 = torch.aten.view %719, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc476) | |
%734 = torch.aten.view %728, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc477) | |
%735 = torch.aten.transpose.int %733, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc478) | |
%736 = torch.aten.bmm %732, %735 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc479) | |
%737 = torch.aten.view %736, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc480) | |
%738 = torch.aten.add.Tensor %737, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc481) | |
%739 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc482) | |
%740 = torch.aten.maximum %738, %739 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc483) | |
%741 = torch.aten.view %740, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc484) | |
%values_54, %indices_55 = torch.aten.max.dim %741, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc485) | |
%742 = torch.aten.sub.Tensor %741, %values_54, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc486) | |
%743 = torch.aten.exp %742 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc487) | |
%744 = torch.aten.sum.dim_IntList %743, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc488) | |
%745 = torch.aten.div.Tensor %743, %744 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc489) | |
%746 = torch.aten.bmm %745, %734 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc490) | |
%747 = torch.aten.view %746, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc491) | |
%748 = torch.aten.transpose.int %747, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc492) | |
%749 = torch.aten.clone %748, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc493) | |
%750 = torch.aten.view %749, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc494) | |
%751 = torch.aten.transpose.int %74, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc495) | |
%752 = torch.aten.view %750, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc496) | |
%753 = torch.aten.mm %752, %751 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc497) | |
%754 = torch.aten.mul.Scalar %73, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc497) | |
%755 = torch.aten.add.Tensor %754, %753, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc497) | |
%756 = torch.aten.view %755, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc498) | |
%757 = torch.aten.add.Tensor %703, %756, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc499) | |
%758 = torch.aten.view %757, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc500) | |
%result0_56, %result1_57, %result2_58 = torch.aten.native_layer_norm %758, %232, %72, %71, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc501) | |
%759 = torch.aten.transpose.int %70, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc502) | |
%760 = torch.aten.mm %result0_56, %759 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc503) | |
%761 = torch.aten.mul.Scalar %69, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc503) | |
%762 = torch.aten.add.Tensor %761, %760, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc503) | |
%763 = torch.aten.relu %762 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc504) | |
%764 = torch.aten.transpose.int %68, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc505) | |
%765 = torch.aten.mm %763, %764 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc506) | |
%766 = torch.aten.mul.Scalar %67, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc506) | |
%767 = torch.aten.add.Tensor %766, %765, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc506) | |
%768 = torch.aten.add.Tensor %758, %767, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc507) | |
%769 = torch.aten.view %768, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc508) | |
%result0_59, %result1_60, %result2_61 = torch.aten.native_layer_norm %769, %232, %66, %65, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc509) | |
%770 = torch.aten.transpose.int %64, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc510) | |
%771 = torch.aten.view %result0_59, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc511) | |
%772 = torch.aten.mm %771, %770 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc512) | |
%773 = torch.aten.mul.Scalar %63, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc512) | |
%774 = torch.aten.add.Tensor %773, %772, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc512) | |
%775 = torch.aten.view %774, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc513) | |
%776 = torch.aten.mul.Scalar %775, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc514) | |
%777 = torch.aten.transpose.int %62, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc515) | |
%778 = torch.aten.view %result0_59, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc516) | |
%779 = torch.aten.mm %778, %777 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc517) | |
%780 = torch.aten.mul.Scalar %61, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc517) | |
%781 = torch.aten.add.Tensor %780, %779, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc517) | |
%782 = torch.aten.view %781, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc518) | |
%783 = torch.aten.view %782, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc519) | |
%784 = torch.aten.transpose.int %783, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc520) | |
%785 = torch.aten.clone %784, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc521) | |
%786 = torch.aten.transpose.int %60, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc522) | |
%787 = torch.aten.view %result0_59, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc523) | |
%788 = torch.aten.mm %787, %786 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc524) | |
%789 = torch.aten.mul.Scalar %59, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc524) | |
%790 = torch.aten.add.Tensor %789, %788, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc524) | |
%791 = torch.aten.view %790, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc525) | |
%792 = torch.aten.view %791, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc526) | |
%793 = torch.aten.transpose.int %792, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc527) | |
%794 = torch.aten.clone %793, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc528) | |
%795 = torch.aten.view %776, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc529) | |
%796 = torch.aten.transpose.int %795, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc530) | |
%797 = torch.aten.clone %796, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc531) | |
%798 = torch.aten.view %797, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc532) | |
%799 = torch.aten.view %785, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc533) | |
%800 = torch.aten.view %794, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc534) | |
%801 = torch.aten.transpose.int %799, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc535) | |
%802 = torch.aten.bmm %798, %801 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc536) | |
%803 = torch.aten.view %802, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc537) | |
%804 = torch.aten.add.Tensor %803, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc538) | |
%805 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc539) | |
%806 = torch.aten.maximum %804, %805 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc540) | |
%807 = torch.aten.view %806, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc541) | |
%values_62, %indices_63 = torch.aten.max.dim %807, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc542) | |
%808 = torch.aten.sub.Tensor %807, %values_62, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc543) | |
%809 = torch.aten.exp %808 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc544) | |
%810 = torch.aten.sum.dim_IntList %809, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc545) | |
%811 = torch.aten.div.Tensor %809, %810 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc546) | |
%812 = torch.aten.bmm %811, %800 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc547) | |
%813 = torch.aten.view %812, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc548) | |
%814 = torch.aten.transpose.int %813, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc549) | |
%815 = torch.aten.clone %814, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc550) | |
%816 = torch.aten.view %815, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc551) | |
%817 = torch.aten.transpose.int %58, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc552) | |
%818 = torch.aten.view %816, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc553) | |
%819 = torch.aten.mm %818, %817 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc554) | |
%820 = torch.aten.mul.Scalar %57, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc554) | |
%821 = torch.aten.add.Tensor %820, %819, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc554) | |
%822 = torch.aten.view %821, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc555) | |
%823 = torch.aten.add.Tensor %769, %822, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc556) | |
%824 = torch.aten.view %823, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc557) | |
%result0_64, %result1_65, %result2_66 = torch.aten.native_layer_norm %824, %232, %56, %55, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc558) | |
%825 = torch.aten.transpose.int %54, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc559) | |
%826 = torch.aten.mm %result0_64, %825 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc560) | |
%827 = torch.aten.mul.Scalar %53, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc560) | |
%828 = torch.aten.add.Tensor %827, %826, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc560) | |
%829 = torch.aten.relu %828 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc561) | |
%830 = torch.aten.transpose.int %52, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc562) | |
%831 = torch.aten.mm %829, %830 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc563) | |
%832 = torch.aten.mul.Scalar %51, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc563) | |
%833 = torch.aten.add.Tensor %832, %831, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc563) | |
%834 = torch.aten.add.Tensor %824, %833, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc564) | |
%835 = torch.aten.view %834, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc565) | |
%result0_67, %result1_68, %result2_69 = torch.aten.native_layer_norm %835, %232, %50, %49, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc566) | |
%836 = torch.aten.transpose.int %48, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc567) | |
%837 = torch.aten.view %result0_67, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc568) | |
%838 = torch.aten.mm %837, %836 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc569) | |
%839 = torch.aten.mul.Scalar %47, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc569) | |
%840 = torch.aten.add.Tensor %839, %838, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc569) | |
%841 = torch.aten.view %840, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc570) | |
%842 = torch.aten.mul.Scalar %841, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc571) | |
%843 = torch.aten.transpose.int %46, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc572) | |
%844 = torch.aten.view %result0_67, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc573) | |
%845 = torch.aten.mm %844, %843 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc574) | |
%846 = torch.aten.mul.Scalar %45, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc574) | |
%847 = torch.aten.add.Tensor %846, %845, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc574) | |
%848 = torch.aten.view %847, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc575) | |
%849 = torch.aten.view %848, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc576) | |
%850 = torch.aten.transpose.int %849, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc577) | |
%851 = torch.aten.clone %850, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc578) | |
%852 = torch.aten.transpose.int %44, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc579) | |
%853 = torch.aten.view %result0_67, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc580) | |
%854 = torch.aten.mm %853, %852 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc581) | |
%855 = torch.aten.mul.Scalar %43, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc581) | |
%856 = torch.aten.add.Tensor %855, %854, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc581) | |
%857 = torch.aten.view %856, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc582) | |
%858 = torch.aten.view %857, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc583) | |
%859 = torch.aten.transpose.int %858, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc584) | |
%860 = torch.aten.clone %859, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc585) | |
%861 = torch.aten.view %842, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc586) | |
%862 = torch.aten.transpose.int %861, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc587) | |
%863 = torch.aten.clone %862, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc588) | |
%864 = torch.aten.view %863, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc589) | |
%865 = torch.aten.view %851, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc590) | |
%866 = torch.aten.view %860, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc591) | |
%867 = torch.aten.transpose.int %865, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc592) | |
%868 = torch.aten.bmm %864, %867 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc593) | |
%869 = torch.aten.view %868, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc594) | |
%870 = torch.aten.add.Tensor %869, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc595) | |
%871 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc596) | |
%872 = torch.aten.maximum %870, %871 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc597) | |
%873 = torch.aten.view %872, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc598) | |
%values_70, %indices_71 = torch.aten.max.dim %873, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc599) | |
%874 = torch.aten.sub.Tensor %873, %values_70, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc600) | |
%875 = torch.aten.exp %874 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc601) | |
%876 = torch.aten.sum.dim_IntList %875, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc602) | |
%877 = torch.aten.div.Tensor %875, %876 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc603) | |
%878 = torch.aten.bmm %877, %866 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc604) | |
%879 = torch.aten.view %878, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc605) | |
%880 = torch.aten.transpose.int %879, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc606) | |
%881 = torch.aten.clone %880, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc607) | |
%882 = torch.aten.view %881, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc608) | |
%883 = torch.aten.transpose.int %42, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc609) | |
%884 = torch.aten.view %882, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc610) | |
%885 = torch.aten.mm %884, %883 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc611) | |
%886 = torch.aten.mul.Scalar %41, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc611) | |
%887 = torch.aten.add.Tensor %886, %885, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc611) | |
%888 = torch.aten.view %887, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc612) | |
%889 = torch.aten.add.Tensor %835, %888, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc613) | |
%890 = torch.aten.view %889, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc614) | |
%result0_72, %result1_73, %result2_74 = torch.aten.native_layer_norm %890, %232, %40, %39, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc615) | |
%891 = torch.aten.transpose.int %38, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc616) | |
%892 = torch.aten.mm %result0_72, %891 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc617) | |
%893 = torch.aten.mul.Scalar %37, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc617) | |
%894 = torch.aten.add.Tensor %893, %892, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc617) | |
%895 = torch.aten.relu %894 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc618) | |
%896 = torch.aten.transpose.int %36, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc619) | |
%897 = torch.aten.mm %895, %896 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc620) | |
%898 = torch.aten.mul.Scalar %35, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc620) | |
%899 = torch.aten.add.Tensor %898, %897, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc620) | |
%900 = torch.aten.add.Tensor %890, %899, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc621) | |
%901 = torch.aten.view %900, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc622) | |
%result0_75, %result1_76, %result2_77 = torch.aten.native_layer_norm %901, %232, %34, %33, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc623) | |
%902 = torch.aten.transpose.int %32, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc624) | |
%903 = torch.aten.view %result0_75, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc625) | |
%904 = torch.aten.mm %903, %902 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc626) | |
%905 = torch.aten.mul.Scalar %31, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc626) | |
%906 = torch.aten.add.Tensor %905, %904, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc626) | |
%907 = torch.aten.view %906, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc627) | |
%908 = torch.aten.mul.Scalar %907, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc628) | |
%909 = torch.aten.transpose.int %30, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc629) | |
%910 = torch.aten.view %result0_75, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc630) | |
%911 = torch.aten.mm %910, %909 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc631) | |
%912 = torch.aten.mul.Scalar %29, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc631) | |
%913 = torch.aten.add.Tensor %912, %911, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc631) | |
%914 = torch.aten.view %913, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc632) | |
%915 = torch.aten.view %914, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc633) | |
%916 = torch.aten.transpose.int %915, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc634) | |
%917 = torch.aten.clone %916, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc635) | |
%918 = torch.aten.transpose.int %28, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc636) | |
%919 = torch.aten.view %result0_75, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc637) | |
%920 = torch.aten.mm %919, %918 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc638) | |
%921 = torch.aten.mul.Scalar %27, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc638) | |
%922 = torch.aten.add.Tensor %921, %920, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc638) | |
%923 = torch.aten.view %922, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc639) | |
%924 = torch.aten.view %923, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc640) | |
%925 = torch.aten.transpose.int %924, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc641) | |
%926 = torch.aten.clone %925, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc642) | |
%927 = torch.aten.view %908, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc643) | |
%928 = torch.aten.transpose.int %927, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc644) | |
%929 = torch.aten.clone %928, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc645) | |
%930 = torch.aten.view %929, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc646) | |
%931 = torch.aten.view %917, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc647) | |
%932 = torch.aten.view %926, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc648) | |
%933 = torch.aten.transpose.int %931, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc649) | |
%934 = torch.aten.bmm %930, %933 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc650) | |
%935 = torch.aten.view %934, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc651) | |
%936 = torch.aten.add.Tensor %935, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc652) | |
%937 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc653) | |
%938 = torch.aten.maximum %936, %937 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc654) | |
%939 = torch.aten.view %938, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc655) | |
%values_78, %indices_79 = torch.aten.max.dim %939, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc656) | |
%940 = torch.aten.sub.Tensor %939, %values_78, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc657) | |
%941 = torch.aten.exp %940 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc658) | |
%942 = torch.aten.sum.dim_IntList %941, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc659) | |
%943 = torch.aten.div.Tensor %941, %942 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc660) | |
%944 = torch.aten.bmm %943, %932 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc661) | |
%945 = torch.aten.view %944, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc662) | |
%946 = torch.aten.transpose.int %945, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc663) | |
%947 = torch.aten.clone %946, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc664) | |
%948 = torch.aten.view %947, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc665) | |
%949 = torch.aten.transpose.int %26, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc666) | |
%950 = torch.aten.view %948, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc667) | |
%951 = torch.aten.mm %950, %949 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc668) | |
%952 = torch.aten.mul.Scalar %25, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc668) | |
%953 = torch.aten.add.Tensor %952, %951, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc668) | |
%954 = torch.aten.view %953, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc669) | |
%955 = torch.aten.add.Tensor %901, %954, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc670) | |
%956 = torch.aten.view %955, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc671) | |
%result0_80, %result1_81, %result2_82 = torch.aten.native_layer_norm %956, %232, %24, %23, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc672) | |
%957 = torch.aten.transpose.int %22, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc673) | |
%958 = torch.aten.mm %result0_80, %957 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc674) | |
%959 = torch.aten.mul.Scalar %21, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc674) | |
%960 = torch.aten.add.Tensor %959, %958, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc674) | |
%961 = torch.aten.relu %960 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc675) | |
%962 = torch.aten.transpose.int %20, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc676) | |
%963 = torch.aten.mm %961, %962 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc677) | |
%964 = torch.aten.mul.Scalar %19, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc677) | |
%965 = torch.aten.add.Tensor %964, %963, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc677) | |
%966 = torch.aten.add.Tensor %956, %965, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc678) | |
%967 = torch.aten.view %966, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc679) | |
%result0_83, %result1_84, %result2_85 = torch.aten.native_layer_norm %967, %232, %18, %17, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc680) | |
%968 = torch.aten.transpose.int %16, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc681) | |
%969 = torch.aten.view %result0_83, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc682) | |
%970 = torch.aten.mm %969, %968 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc683) | |
%971 = torch.aten.mul.Scalar %15, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc683) | |
%972 = torch.aten.add.Tensor %971, %970, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc683) | |
%973 = torch.aten.view %972, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc684) | |
%974 = torch.aten.mul.Scalar %973, %float1.250000e-01 : !torch.vtensor<[1,128,768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32> loc(#loc685) | |
%975 = torch.aten.transpose.int %14, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc686) | |
%976 = torch.aten.view %result0_83, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc687) | |
%977 = torch.aten.mm %976, %975 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc688) | |
%978 = torch.aten.mul.Scalar %13, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc688) | |
%979 = torch.aten.add.Tensor %978, %977, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc688) | |
%980 = torch.aten.view %979, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc689) | |
%981 = torch.aten.view %980, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc690) | |
%982 = torch.aten.transpose.int %981, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc691) | |
%983 = torch.aten.clone %982, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc692) | |
%984 = torch.aten.transpose.int %12, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc693) | |
%985 = torch.aten.view %result0_83, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc694) | |
%986 = torch.aten.mm %985, %984 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc695) | |
%987 = torch.aten.mul.Scalar %11, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc695) | |
%988 = torch.aten.add.Tensor %987, %986, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc695) | |
%989 = torch.aten.view %988, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc696) | |
%990 = torch.aten.view %989, %248 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc697) | |
%991 = torch.aten.transpose.int %990, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc698) | |
%992 = torch.aten.clone %991, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc699) | |
%993 = torch.aten.view %974, %261 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc700) | |
%994 = torch.aten.transpose.int %993, %int1, %int2 : !torch.vtensor<[1,128,12,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc701) | |
%995 = torch.aten.clone %994, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc702) | |
%996 = torch.aten.view %995, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc703) | |
%997 = torch.aten.view %983, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc704) | |
%998 = torch.aten.view %992, %265 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc705) | |
%999 = torch.aten.transpose.int %997, %int1, %int2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc706) | |
%1000 = torch.aten.bmm %996, %999 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc707) | |
%1001 = torch.aten.view %1000, %271 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc708) | |
%1002 = torch.aten.add.Tensor %1001, %230, %int1 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc709) | |
%1003 = torch.aten.clone %187, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc710) | |
%1004 = torch.aten.maximum %1002, %1003 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc711) | |
%1005 = torch.aten.view %1004, %276 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc712) | |
%values_86, %indices_87 = torch.aten.max.dim %1005, %int-1, %true : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[12,128,1],f32>, !torch.vtensor<[12,128,1],si64> loc(#loc713) | |
%1006 = torch.aten.sub.Tensor %1005, %values_86, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc714) | |
%1007 = torch.aten.exp %1006 : !torch.vtensor<[12,128,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc715) | |
%1008 = torch.aten.sum.dim_IntList %1007, %278, %true, %none : !torch.vtensor<[12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[12,128,1],f32> loc(#loc716) | |
%1009 = torch.aten.div.Tensor %1007, %1008 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,1],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc717) | |
%1010 = torch.aten.bmm %1009, %998 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc718) | |
%1011 = torch.aten.view %1010, %284 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc719) | |
%1012 = torch.aten.transpose.int %1011, %int1, %int2 : !torch.vtensor<[1,12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc720) | |
%1013 = torch.aten.clone %1012, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc721) | |
%1014 = torch.aten.view %1013, %239 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc722) | |
%1015 = torch.aten.transpose.int %10, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc723) | |
%1016 = torch.aten.view %1014, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc724) | |
%1017 = torch.aten.mm %1016, %1015 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc725) | |
%1018 = torch.aten.mul.Scalar %9, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc725) | |
%1019 = torch.aten.add.Tensor %1018, %1017, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc725) | |
%1020 = torch.aten.view %1019, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc726) | |
%1021 = torch.aten.add.Tensor %967, %1020, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc727) | |
%1022 = torch.aten.view %1021, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc728) | |
%result0_88, %result1_89, %result2_90 = torch.aten.native_layer_norm %1022, %232, %8, %7, %float1.000000e-05 : !torch.vtensor<[128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,1],f32>, !torch.vtensor<[128,1],f32> loc(#loc729) | |
%1023 = torch.aten.transpose.int %6, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc730) | |
%1024 = torch.aten.mm %result0_88, %1023 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc731) | |
%1025 = torch.aten.mul.Scalar %5, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc731) | |
%1026 = torch.aten.add.Tensor %1025, %1024, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc731) | |
%1027 = torch.aten.relu %1026 : !torch.vtensor<[128,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc732) | |
%1028 = torch.aten.transpose.int %4, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc733) | |
%1029 = torch.aten.mm %1027, %1028 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc734) | |
%1030 = torch.aten.mul.Scalar %3, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc734) | |
%1031 = torch.aten.add.Tensor %1030, %1029, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc734) | |
%1032 = torch.aten.add.Tensor %1022, %1031, %int1 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc735) | |
%1033 = torch.aten.view %1032, %239 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc736) | |
%result0_91, %result1_92, %result2_93 = torch.aten.native_layer_norm %1033, %232, %2, %1, %float1.000000e-05 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc737) | |
%1034 = torch.aten.transpose.int %0, %int0, %int1 : !torch.vtensor<[2,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,2],f32> loc(#loc738) | |
%1035 = torch.aten.view %result0_91, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc739) | |
%1036 = torch.aten.mm %1035, %1034 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,2],f32> -> !torch.vtensor<[128,2],f32> loc(#loc740) | |
%1037 = torch.prim.ListConstruct %int1, %int128, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc763) | |
%1038 = torch.aten.view %1036, %1037 : !torch.vtensor<[128,2],f32>, !torch.list<int> -> !torch.vtensor<[1,128,2],f32> loc(#loc741) | |
%1039 = torch.aten.ne.Scalar %arg0, %int1 : !torch.vtensor<[1,128],si64>, !torch.int -> !torch.vtensor<[1,128],i1> loc(#loc742) | |
%1040 = torch.aten.sum.dim_IntList %1039, %278, %false, %none : !torch.vtensor<[1,128],i1>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1],si64> loc(#loc743) | |
%1041 = torch.aten.sub.Scalar %1040, %int1, %int1 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64> loc(#loc744) | |
%1042 = torch.aten.arange.start_step %int0, %int1, %int1, %none, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1],si64> loc(#loc745) | |
%1043 = torch.prim.ListConstruct %1042, %1041 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor> loc(#loc746) | |
%1044 = torch.aten.index.Tensor %1038, %1043 : !torch.vtensor<[1,128,2],f32>, !torch.list<vtensor> -> !torch.vtensor<[1,2],f32> loc(#loc746) | |
return %1044 : !torch.vtensor<[1,2],f32> loc(#loc) | |
} loc(#loc) | |
} loc(#loc) | |
#loc1 = loc("<eval_with_key>.2":37:55) | |
#loc2 = loc("<eval_with_key>.2":13:40) | |
#loc3 = loc("<eval_with_key>.2":5:41) | |
#loc4 = loc("<eval_with_key>.2":5:40) | |
#loc5 = loc("<eval_with_key>.2":87:46) | |
#loc6 = loc("<eval_with_key>.2":36:76) | |
#loc7 = loc("<eval_with_key>.2":5:44) | |
#loc8 = loc("<eval_with_key>.2":9:70) | |
#loc9 = loc("<eval_with_key>.2":20:43) | |
#loc10 = loc("<eval_with_key>.2":8:49) | |
#loc11 = loc("<eval_with_key>.2":15:38) | |
#loc12 = loc("<eval_with_key>.2":44:65) | |
#loc13 = loc("<eval_with_key>.2":44:107) | |
#loc14 = loc("<eval_with_key>.2":54:39) | |
#loc15 = loc("<eval_with_key>.2":61:49) | |
#loc16 = loc("<eval_with_key>.2":61:53) | |
#loc17 = loc("<eval_with_key>.2":5:11) | |
#loc18 = loc("<eval_with_key>.2":7:16) | |
#loc19 = loc("-":9:12) | |
#loc20 = loc("-":5852:10) | |
#loc21 = loc("<eval_with_key>.2":9:27) | |
#loc22 = loc("<eval_with_key>.2":8:11) | |
#loc23 = loc("<eval_with_key>.2":10:13) | |
#loc24 = loc("<eval_with_key>.2":11:10) | |
#loc25 = loc("<eval_with_key>.2":12:10) | |
#loc26 = loc("<eval_with_key>.2":15:10) | |
#loc27 = loc("<eval_with_key>.2":17:18) | |
#loc28 = loc("-":1238:13) | |
#loc29 = loc("-":6765:10) | |
#loc30 = loc("<eval_with_key>.2":24:9) | |
#loc31 = loc("<eval_with_key>.2":20:11) | |
#loc32 = loc("<eval_with_key>.2":21:13) | |
#loc33 = loc("<eval_with_key>.2":22:12) | |
#loc34 = loc("-":4446:13) | |
#loc35 = loc("-":6341:10) | |
#loc36 = loc("<eval_with_key>.2":23:13) | |
#loc37 = loc("<eval_with_key>.2":25:19) | |
#loc38 = loc("<eval_with_key>.2":26:16) | |
#loc39 = loc("<eval_with_key>.2":27:18) | |
#loc40 = loc("-":4614:15) | |
#loc41 = loc("-":6329:10) | |
#loc42 = loc("<eval_with_key>.2":30:13) | |
#loc43 = loc("<eval_with_key>.2":32:18) | |
#loc44 = loc("<eval_with_key>.2":33:18) | |
#loc45 = loc("<eval_with_key>.2":35:15) | |
#loc46 = loc("<eval_with_key>.2":36:29) | |
#loc47 = loc("<eval_with_key>.2":37:11) | |
#loc48 = loc("<eval_with_key>.2":38:29) | |
#loc49 = loc("<eval_with_key>.2":39:18) | |
#loc50 = loc("<eval_with_key>.2":40:12) | |
#loc51 = loc("<eval_with_key>.2":41:12) | |
#loc52 = loc("-":5931:10) | |
#loc53 = loc("<eval_with_key>.2":52:12) | |
#loc54 = loc("<eval_with_key>.2":44:24) | |
#loc55 = loc("<eval_with_key>.2":49:8) | |
#loc56 = loc("<eval_with_key>.2":50:13) | |
#loc57 = loc("<eval_with_key>.2":53:13) | |
#loc58 = loc("<eval_with_key>.2":54:12) | |
#loc59 = loc("<eval_with_key>.2":56:10) | |
#loc60 = loc("<eval_with_key>.2":57:13) | |
#loc61 = loc("<eval_with_key>.2":59:14) | |
#loc62 = loc("<eval_with_key>.2":60:13) | |
#loc63 = loc("<eval_with_key>.2":61:13) | |
#loc64 = loc("<eval_with_key>.2":62:16) | |
#loc65 = loc("<eval_with_key>.2":63:12) | |
#loc66 = loc("<eval_with_key>.2":65:10) | |
#loc67 = loc("<eval_with_key>.2":66:13) | |
#loc68 = loc("<eval_with_key>.2":68:14) | |
#loc69 = loc("<eval_with_key>.2":69:13) | |
#loc70 = loc("<eval_with_key>.2":70:13) | |
#loc71 = loc("<eval_with_key>.2":71:18) | |
#loc72 = loc("<eval_with_key>.2":72:14) | |
#loc73 = loc("<eval_with_key>.2":73:14) | |
#loc74 = loc("<eval_with_key>.2":74:18) | |
#loc75 = loc("<eval_with_key>.2":75:14) | |
#loc76 = loc("<eval_with_key>.2":76:14) | |
#loc77 = loc("<eval_with_key>.2":77:14) | |
#loc78 = loc("<eval_with_key>.2":78:14) | |
#loc79 = loc("<eval_with_key>.2":79:18) | |
#loc80 = loc("<eval_with_key>.2":80:10) | |
#loc81 = loc("<eval_with_key>.2":81:14) | |
#loc82 = loc("<eval_with_key>.2":82:12) | |
#loc83 = loc("<eval_with_key>.2":84:24) | |
#loc84 = loc("<eval_with_key>.2":85:14) | |
#loc85 = loc("<eval_with_key>.2":86:14) | |
#loc86 = loc("<eval_with_key>.2":87:11) | |
#loc87 = loc("<eval_with_key>.2":88:12) | |
#loc88 = loc("<eval_with_key>.2":89:10) | |
#loc89 = loc("<eval_with_key>.2":90:12) | |
#loc90 = loc("<eval_with_key>.2":91:10) | |
#loc91 = loc("<eval_with_key>.2":93:12) | |
#loc92 = loc("<eval_with_key>.2":94:14) | |
#loc93 = loc("<eval_with_key>.2":95:18) | |
#loc94 = loc("<eval_with_key>.2":96:14) | |
#loc95 = loc("<eval_with_key>.2":97:19) | |
#loc96 = loc("<eval_with_key>.2":99:10) | |
#loc97 = loc("<eval_with_key>.2":100:14) | |
#loc98 = loc("<eval_with_key>.2":102:14) | |
#loc99 = loc("<eval_with_key>.2":103:14) | |
#loc100 = loc("<eval_with_key>.2":104:12) | |
#loc101 = loc("<eval_with_key>.2":105:14) | |
#loc102 = loc("<eval_with_key>.2":108:26) | |
#loc103 = loc("<eval_with_key>.2":113:10) | |
#loc104 = loc("<eval_with_key>.2":115:14) | |
#loc105 = loc("<eval_with_key>.2":116:11) | |
#loc106 = loc("<eval_with_key>.2":119:10) | |
#loc107 = loc("<eval_with_key>.2":121:14) | |
#loc108 = loc("<eval_with_key>.2":122:12) | |
#loc109 = loc("<eval_with_key>.2":123:14) | |
#loc110 = loc("<eval_with_key>.2":126:26) | |
#loc111 = loc("<eval_with_key>.2":131:10) | |
#loc112 = loc("<eval_with_key>.2":132:14) | |
#loc113 = loc("<eval_with_key>.2":134:14) | |
#loc114 = loc("<eval_with_key>.2":135:14) | |
#loc115 = loc("<eval_with_key>.2":136:12) | |
#loc116 = loc("<eval_with_key>.2":138:10) | |
#loc117 = loc("<eval_with_key>.2":139:14) | |
#loc118 = loc("<eval_with_key>.2":141:14) | |
#loc119 = loc("<eval_with_key>.2":142:14) | |
#loc120 = loc("<eval_with_key>.2":143:14) | |
#loc121 = loc("<eval_with_key>.2":144:18) | |
#loc122 = loc("<eval_with_key>.2":145:14) | |
#loc123 = loc("<eval_with_key>.2":147:10) | |
#loc124 = loc("<eval_with_key>.2":148:14) | |
#loc125 = loc("<eval_with_key>.2":150:14) | |
#loc126 = loc("<eval_with_key>.2":151:14) | |
#loc127 = loc("<eval_with_key>.2":152:14) | |
#loc128 = loc("<eval_with_key>.2":153:18) | |
#loc129 = loc("<eval_with_key>.2":154:14) | |
#loc130 = loc("<eval_with_key>.2":155:14) | |
#loc131 = loc("<eval_with_key>.2":156:18) | |
#loc132 = loc("<eval_with_key>.2":157:14) | |
#loc133 = loc("<eval_with_key>.2":158:14) | |
#loc134 = loc("<eval_with_key>.2":159:14) | |
#loc135 = loc("<eval_with_key>.2":160:14) | |
#loc136 = loc("<eval_with_key>.2":161:18) | |
#loc137 = loc("<eval_with_key>.2":162:12) | |
#loc138 = loc("<eval_with_key>.2":163:14) | |
#loc139 = loc("<eval_with_key>.2":164:12) | |
#loc140 = loc("<eval_with_key>.2":166:24) | |
#loc141 = loc("<eval_with_key>.2":167:16) | |
#loc142 = loc("<eval_with_key>.2":168:14) | |
#loc143 = loc("<eval_with_key>.2":169:13) | |
#loc144 = loc("<eval_with_key>.2":170:12) | |
#loc145 = loc("<eval_with_key>.2":171:12) | |
#loc146 = loc("<eval_with_key>.2":172:12) | |
#loc147 = loc("<eval_with_key>.2":173:12) | |
#loc148 = loc("<eval_with_key>.2":175:12) | |
#loc149 = loc("<eval_with_key>.2":176:14) | |
#loc150 = loc("<eval_with_key>.2":177:18) | |
#loc151 = loc("<eval_with_key>.2":178:14) | |
#loc152 = loc("<eval_with_key>.2":179:21) | |
#loc153 = loc("<eval_with_key>.2":181:10) | |
#loc154 = loc("<eval_with_key>.2":182:14) | |
#loc155 = loc("<eval_with_key>.2":184:14) | |
#loc156 = loc("<eval_with_key>.2":185:14) | |
#loc157 = loc("<eval_with_key>.2":186:12) | |
#loc158 = loc("<eval_with_key>.2":187:14) | |
#loc159 = loc("<eval_with_key>.2":190:26) | |
#loc160 = loc("<eval_with_key>.2":195:11) | |
#loc161 = loc("<eval_with_key>.2":197:15) | |
#loc162 = loc("<eval_with_key>.2":198:13) | |
#loc163 = loc("<eval_with_key>.2":201:11) | |
#loc164 = loc("<eval_with_key>.2":203:15) | |
#loc165 = loc("<eval_with_key>.2":204:12) | |
#loc166 = loc("<eval_with_key>.2":205:14) | |
#loc167 = loc("<eval_with_key>.2":208:26) | |
#loc168 = loc("<eval_with_key>.2":213:11) | |
#loc169 = loc("<eval_with_key>.2":214:14) | |
#loc170 = loc("<eval_with_key>.2":216:15) | |
#loc171 = loc("<eval_with_key>.2":217:14) | |
#loc172 = loc("<eval_with_key>.2":218:12) | |
#loc173 = loc("<eval_with_key>.2":220:11) | |
#loc174 = loc("<eval_with_key>.2":221:14) | |
#loc175 = loc("<eval_with_key>.2":223:15) | |
#loc176 = loc("<eval_with_key>.2":224:14) | |
#loc177 = loc("<eval_with_key>.2":225:14) | |
#loc178 = loc("<eval_with_key>.2":226:19) | |
#loc179 = loc("<eval_with_key>.2":227:14) | |
#loc180 = loc("<eval_with_key>.2":229:11) | |
#loc181 = loc("<eval_with_key>.2":230:14) | |
#loc182 = loc("<eval_with_key>.2":232:15) | |
#loc183 = loc("<eval_with_key>.2":233:14) | |
#loc184 = loc("<eval_with_key>.2":234:14) | |
#loc185 = loc("<eval_with_key>.2":235:19) | |
#loc186 = loc("<eval_with_key>.2":236:14) | |
#loc187 = loc("<eval_with_key>.2":237:14) | |
#loc188 = loc("<eval_with_key>.2":238:19) | |
#loc189 = loc("<eval_with_key>.2":239:15) | |
#loc190 = loc("<eval_with_key>.2":240:14) | |
#loc191 = loc("<eval_with_key>.2":241:14) | |
#loc192 = loc("<eval_with_key>.2":242:14) | |
#loc193 = loc("<eval_with_key>.2":243:19) | |
#loc194 = loc("<eval_with_key>.2":244:12) | |
#loc195 = loc("<eval_with_key>.2":245:14) | |
#loc196 = loc("<eval_with_key>.2":246:13) | |
#loc197 = loc("<eval_with_key>.2":248:24) | |
#loc198 = loc("<eval_with_key>.2":249:16) | |
#loc199 = loc("<eval_with_key>.2":250:14) | |
#loc200 = loc("<eval_with_key>.2":251:13) | |
#loc201 = loc("<eval_with_key>.2":252:12) | |
#loc202 = loc("<eval_with_key>.2":253:12) | |
#loc203 = loc("<eval_with_key>.2":254:12) | |
#loc204 = loc("<eval_with_key>.2":255:12) | |
#loc205 = loc("<eval_with_key>.2":257:12) | |
#loc206 = loc("<eval_with_key>.2":258:14) | |
#loc207 = loc("<eval_with_key>.2":259:19) | |
#loc208 = loc("<eval_with_key>.2":260:15) | |
#loc209 = loc("<eval_with_key>.2":261:21) | |
#loc210 = loc("<eval_with_key>.2":263:11) | |
#loc211 = loc("<eval_with_key>.2":264:14) | |
#loc212 = loc("<eval_with_key>.2":266:15) | |
#loc213 = loc("<eval_with_key>.2":267:14) | |
#loc214 = loc("<eval_with_key>.2":268:13) | |
#loc215 = loc("<eval_with_key>.2":269:14) | |
#loc216 = loc("<eval_with_key>.2":272:26) | |
#loc217 = loc("<eval_with_key>.2":277:11) | |
#loc218 = loc("<eval_with_key>.2":279:15) | |
#loc219 = loc("<eval_with_key>.2":280:13) | |
#loc220 = loc("<eval_with_key>.2":283:11) | |
#loc221 = loc("<eval_with_key>.2":285:15) | |
#loc222 = loc("<eval_with_key>.2":286:13) | |
#loc223 = loc("<eval_with_key>.2":287:14) | |
#loc224 = loc("<eval_with_key>.2":290:26) | |
#loc225 = loc("<eval_with_key>.2":295:11) | |
#loc226 = loc("<eval_with_key>.2":296:14) | |
#loc227 = loc("<eval_with_key>.2":298:15) | |
#loc228 = loc("<eval_with_key>.2":299:14) | |
#loc229 = loc("<eval_with_key>.2":300:12) | |
#loc230 = loc("<eval_with_key>.2":302:11) | |
#loc231 = loc("<eval_with_key>.2":303:14) | |
#loc232 = loc("<eval_with_key>.2":305:15) | |
#loc233 = loc("<eval_with_key>.2":306:14) | |
#loc234 = loc("<eval_with_key>.2":307:14) | |
#loc235 = loc("<eval_with_key>.2":308:19) | |
#loc236 = loc("<eval_with_key>.2":309:15) | |
#loc237 = loc("<eval_with_key>.2":311:11) | |
#loc238 = loc("<eval_with_key>.2":312:14) | |
#loc239 = loc("<eval_with_key>.2":314:15) | |
#loc240 = loc("<eval_with_key>.2":315:14) | |
#loc241 = loc("<eval_with_key>.2":316:14) | |
#loc242 = loc("<eval_with_key>.2":317:19) | |
#loc243 = loc("<eval_with_key>.2":318:15) | |
#loc244 = loc("<eval_with_key>.2":319:14) | |
#loc245 = loc("<eval_with_key>.2":320:19) | |
#loc246 = loc("<eval_with_key>.2":321:15) | |
#loc247 = loc("<eval_with_key>.2":322:14) | |
#loc248 = loc("<eval_with_key>.2":323:14) | |
#loc249 = loc("<eval_with_key>.2":324:14) | |
#loc250 = loc("<eval_with_key>.2":325:19) | |
#loc251 = loc("<eval_with_key>.2":326:12) | |
#loc252 = loc("<eval_with_key>.2":327:14) | |
#loc253 = loc("<eval_with_key>.2":328:13) | |
#loc254 = loc("<eval_with_key>.2":330:24) | |
#loc255 = loc("<eval_with_key>.2":331:16) | |
#loc256 = loc("<eval_with_key>.2":332:14) | |
#loc257 = loc("<eval_with_key>.2":333:13) | |
#loc258 = loc("<eval_with_key>.2":334:12) | |
#loc259 = loc("<eval_with_key>.2":335:12) | |
#loc260 = loc("<eval_with_key>.2":336:12) | |
#loc261 = loc("<eval_with_key>.2":337:12) | |
#loc262 = loc("<eval_with_key>.2":339:12) | |
#loc263 = loc("<eval_with_key>.2":340:14) | |
#loc264 = loc("<eval_with_key>.2":341:19) | |
#loc265 = loc("<eval_with_key>.2":342:15) | |
#loc266 = loc("<eval_with_key>.2":343:21) | |
#loc267 = loc("<eval_with_key>.2":345:11) | |
#loc268 = loc("<eval_with_key>.2":346:14) | |
#loc269 = loc("<eval_with_key>.2":348:15) | |
#loc270 = loc("<eval_with_key>.2":349:14) | |
#loc271 = loc("<eval_with_key>.2":350:13) | |
#loc272 = loc("<eval_with_key>.2":351:14) | |
#loc273 = loc("<eval_with_key>.2":354:26) | |
#loc274 = loc("<eval_with_key>.2":359:11) | |
#loc275 = loc("<eval_with_key>.2":361:15) | |
#loc276 = loc("<eval_with_key>.2":362:13) | |
#loc277 = loc("<eval_with_key>.2":365:11) | |
#loc278 = loc("<eval_with_key>.2":367:15) | |
#loc279 = loc("<eval_with_key>.2":368:13) | |
#loc280 = loc("<eval_with_key>.2":369:14) | |
#loc281 = loc("<eval_with_key>.2":372:26) | |
#loc282 = loc("<eval_with_key>.2":377:11) | |
#loc283 = loc("<eval_with_key>.2":378:14) | |
#loc284 = loc("<eval_with_key>.2":380:15) | |
#loc285 = loc("<eval_with_key>.2":381:14) | |
#loc286 = loc("<eval_with_key>.2":382:12) | |
#loc287 = loc("<eval_with_key>.2":384:11) | |
#loc288 = loc("<eval_with_key>.2":385:14) | |
#loc289 = loc("<eval_with_key>.2":387:15) | |
#loc290 = loc("<eval_with_key>.2":388:14) | |
#loc291 = loc("<eval_with_key>.2":389:14) | |
#loc292 = loc("<eval_with_key>.2":390:19) | |
#loc293 = loc("<eval_with_key>.2":391:15) | |
#loc294 = loc("<eval_with_key>.2":393:11) | |
#loc295 = loc("<eval_with_key>.2":394:14) | |
#loc296 = loc("<eval_with_key>.2":396:15) | |
#loc297 = loc("<eval_with_key>.2":397:14) | |
#loc298 = loc("<eval_with_key>.2":398:14) | |
#loc299 = loc("<eval_with_key>.2":399:19) | |
#loc300 = loc("<eval_with_key>.2":400:15) | |
#loc301 = loc("<eval_with_key>.2":401:14) | |
#loc302 = loc("<eval_with_key>.2":402:19) | |
#loc303 = loc("<eval_with_key>.2":403:15) | |
#loc304 = loc("<eval_with_key>.2":404:14) | |
#loc305 = loc("<eval_with_key>.2":405:14) | |
#loc306 = loc("<eval_with_key>.2":406:14) | |
#loc307 = loc("<eval_with_key>.2":407:19) | |
#loc308 = loc("<eval_with_key>.2":408:12) | |
#loc309 = loc("<eval_with_key>.2":409:14) | |
#loc310 = loc("<eval_with_key>.2":410:13) | |
#loc311 = loc("<eval_with_key>.2":412:24) | |
#loc312 = loc("<eval_with_key>.2":413:16) | |
#loc313 = loc("<eval_with_key>.2":414:14) | |
#loc314 = loc("<eval_with_key>.2":415:13) | |
#loc315 = loc("<eval_with_key>.2":416:12) | |
#loc316 = loc("<eval_with_key>.2":417:12) | |
#loc317 = loc("<eval_with_key>.2":418:12) | |
#loc318 = loc("<eval_with_key>.2":419:12) | |
#loc319 = loc("<eval_with_key>.2":421:12) | |
#loc320 = loc("<eval_with_key>.2":422:14) | |
#loc321 = loc("<eval_with_key>.2":423:19) | |
#loc322 = loc("<eval_with_key>.2":424:15) | |
#loc323 = loc("<eval_with_key>.2":425:21) | |
#loc324 = loc("<eval_with_key>.2":427:11) | |
#loc325 = loc("<eval_with_key>.2":428:14) | |
#loc326 = loc("<eval_with_key>.2":430:15) | |
#loc327 = loc("<eval_with_key>.2":431:14) | |
#loc328 = loc("<eval_with_key>.2":432:13) | |
#loc329 = loc("<eval_with_key>.2":433:14) | |
#loc330 = loc("<eval_with_key>.2":436:26) | |
#loc331 = loc("<eval_with_key>.2":441:11) | |
#loc332 = loc("<eval_with_key>.2":443:15) | |
#loc333 = loc("<eval_with_key>.2":444:13) | |
#loc334 = loc("<eval_with_key>.2":447:11) | |
#loc335 = loc("<eval_with_key>.2":449:15) | |
#loc336 = loc("<eval_with_key>.2":450:13) | |
#loc337 = loc("<eval_with_key>.2":451:14) | |
#loc338 = loc("<eval_with_key>.2":454:27) | |
#loc339 = loc("<eval_with_key>.2":459:11) | |
#loc340 = loc("<eval_with_key>.2":460:14) | |
#loc341 = loc("<eval_with_key>.2":462:15) | |
#loc342 = loc("<eval_with_key>.2":463:14) | |
#loc343 = loc("<eval_with_key>.2":464:12) | |
#loc344 = loc("<eval_with_key>.2":466:11) | |
#loc345 = loc("<eval_with_key>.2":467:14) | |
#loc346 = loc("<eval_with_key>.2":469:15) | |
#loc347 = loc("<eval_with_key>.2":470:15) | |
#loc348 = loc("<eval_with_key>.2":471:15) | |
#loc349 = loc("<eval_with_key>.2":472:19) | |
#loc350 = loc("<eval_with_key>.2":473:15) | |
#loc351 = loc("<eval_with_key>.2":475:11) | |
#loc352 = loc("<eval_with_key>.2":476:15) | |
#loc353 = loc("<eval_with_key>.2":478:15) | |
#loc354 = loc("<eval_with_key>.2":479:15) | |
#loc355 = loc("<eval_with_key>.2":480:15) | |
#loc356 = loc("<eval_with_key>.2":481:19) | |
#loc357 = loc("<eval_with_key>.2":482:15) | |
#loc358 = loc("<eval_with_key>.2":483:15) | |
#loc359 = loc("<eval_with_key>.2":484:19) | |
#loc360 = loc("<eval_with_key>.2":485:15) | |
#loc361 = loc("<eval_with_key>.2":486:15) | |
#loc362 = loc("<eval_with_key>.2":487:15) | |
#loc363 = loc("<eval_with_key>.2":488:15) | |
#loc364 = loc("<eval_with_key>.2":489:19) | |
#loc365 = loc("<eval_with_key>.2":490:13) | |
#loc366 = loc("<eval_with_key>.2":491:15) | |
#loc367 = loc("<eval_with_key>.2":492:13) | |
#loc368 = loc("<eval_with_key>.2":494:24) | |
#loc369 = loc("<eval_with_key>.2":495:16) | |
#loc370 = loc("<eval_with_key>.2":496:15) | |
#loc371 = loc("<eval_with_key>.2":497:13) | |
#loc372 = loc("<eval_with_key>.2":498:12) | |
#loc373 = loc("<eval_with_key>.2":499:12) | |
#loc374 = loc("<eval_with_key>.2":500:12) | |
#loc375 = loc("<eval_with_key>.2":501:12) | |
#loc376 = loc("<eval_with_key>.2":503:13) | |
#loc377 = loc("<eval_with_key>.2":504:15) | |
#loc378 = loc("<eval_with_key>.2":505:19) | |
#loc379 = loc("<eval_with_key>.2":506:15) | |
#loc380 = loc("<eval_with_key>.2":507:21) | |
#loc381 = loc("<eval_with_key>.2":509:11) | |
#loc382 = loc("<eval_with_key>.2":510:15) | |
#loc383 = loc("<eval_with_key>.2":512:15) | |
#loc384 = loc("<eval_with_key>.2":513:15) | |
#loc385 = loc("<eval_with_key>.2":514:13) | |
#loc386 = loc("<eval_with_key>.2":515:15) | |
#loc387 = loc("<eval_with_key>.2":518:27) | |
#loc388 = loc("<eval_with_key>.2":523:11) | |
#loc389 = loc("<eval_with_key>.2":525:15) | |
#loc390 = loc("<eval_with_key>.2":526:13) | |
#loc391 = loc("<eval_with_key>.2":529:11) | |
#loc392 = loc("<eval_with_key>.2":531:15) | |
#loc393 = loc("<eval_with_key>.2":532:13) | |
#loc394 = loc("<eval_with_key>.2":533:15) | |
#loc395 = loc("<eval_with_key>.2":536:27) | |
#loc396 = loc("<eval_with_key>.2":541:11) | |
#loc397 = loc("<eval_with_key>.2":542:15) | |
#loc398 = loc("<eval_with_key>.2":544:15) | |
#loc399 = loc("<eval_with_key>.2":545:15) | |
#loc400 = loc("<eval_with_key>.2":546:12) | |
#loc401 = loc("<eval_with_key>.2":548:11) | |
#loc402 = loc("<eval_with_key>.2":549:15) | |
#loc403 = loc("<eval_with_key>.2":551:15) | |
#loc404 = loc("<eval_with_key>.2":552:15) | |
#loc405 = loc("<eval_with_key>.2":553:15) | |
#loc406 = loc("<eval_with_key>.2":554:19) | |
#loc407 = loc("<eval_with_key>.2":555:15) | |
#loc408 = loc("<eval_with_key>.2":557:11) | |
#loc409 = loc("<eval_with_key>.2":558:15) | |
#loc410 = loc("<eval_with_key>.2":560:15) | |
#loc411 = loc("<eval_with_key>.2":561:15) | |
#loc412 = loc("<eval_with_key>.2":562:15) | |
#loc413 = loc("<eval_with_key>.2":563:19) | |
#loc414 = loc("<eval_with_key>.2":564:15) | |
#loc415 = loc("<eval_with_key>.2":565:15) | |
#loc416 = loc("<eval_with_key>.2":566:19) | |
#loc417 = loc("<eval_with_key>.2":567:15) | |
#loc418 = loc("<eval_with_key>.2":568:15) | |
#loc419 = loc("<eval_with_key>.2":569:15) | |
#loc420 = loc("<eval_with_key>.2":570:15) | |
#loc421 = loc("<eval_with_key>.2":571:19) | |
#loc422 = loc("<eval_with_key>.2":572:13) | |
#loc423 = loc("<eval_with_key>.2":573:15) | |
#loc424 = loc("<eval_with_key>.2":574:13) | |
#loc425 = loc("<eval_with_key>.2":576:24) | |
#loc426 = loc("<eval_with_key>.2":577:16) | |
#loc427 = loc("<eval_with_key>.2":578:15) | |
#loc428 = loc("<eval_with_key>.2":579:13) | |
#loc429 = loc("<eval_with_key>.2":580:12) | |
#loc430 = loc("<eval_with_key>.2":581:12) | |
#loc431 = loc("<eval_with_key>.2":582:12) | |
#loc432 = loc("<eval_with_key>.2":583:12) | |
#loc433 = loc("<eval_with_key>.2":585:13) | |
#loc434 = loc("<eval_with_key>.2":586:15) | |
#loc435 = loc("<eval_with_key>.2":587:19) | |
#loc436 = loc("<eval_with_key>.2":588:15) | |
#loc437 = loc("<eval_with_key>.2":589:21) | |
#loc438 = loc("<eval_with_key>.2":591:11) | |
#loc439 = loc("<eval_with_key>.2":592:15) | |
#loc440 = loc("<eval_with_key>.2":594:15) | |
#loc441 = loc("<eval_with_key>.2":595:15) | |
#loc442 = loc("<eval_with_key>.2":596:13) | |
#loc443 = loc("<eval_with_key>.2":597:15) | |
#loc444 = loc("<eval_with_key>.2":600:27) | |
#loc445 = loc("<eval_with_key>.2":605:11) | |
#loc446 = loc("<eval_with_key>.2":607:15) | |
#loc447 = loc("<eval_with_key>.2":608:13) | |
#loc448 = loc("<eval_with_key>.2":611:11) | |
#loc449 = loc("<eval_with_key>.2":613:15) | |
#loc450 = loc("<eval_with_key>.2":614:13) | |
#loc451 = loc("<eval_with_key>.2":615:15) | |
#loc452 = loc("<eval_with_key>.2":618:27) | |
#loc453 = loc("<eval_with_key>.2":623:11) | |
#loc454 = loc("<eval_with_key>.2":624:15) | |
#loc455 = loc("<eval_with_key>.2":626:15) | |
#loc456 = loc("<eval_with_key>.2":627:15) | |
#loc457 = loc("<eval_with_key>.2":628:12) | |
#loc458 = loc("<eval_with_key>.2":630:11) | |
#loc459 = loc("<eval_with_key>.2":631:15) | |
#loc460 = loc("<eval_with_key>.2":633:15) | |
#loc461 = loc("<eval_with_key>.2":634:15) | |
#loc462 = loc("<eval_with_key>.2":635:15) | |
#loc463 = loc("<eval_with_key>.2":636:19) | |
#loc464 = loc("<eval_with_key>.2":637:15) | |
#loc465 = loc("<eval_with_key>.2":639:11) | |
#loc466 = loc("<eval_with_key>.2":640:15) | |
#loc467 = loc("<eval_with_key>.2":642:15) | |
#loc468 = loc("<eval_with_key>.2":643:15) | |
#loc469 = loc("<eval_with_key>.2":644:15) | |
#loc470 = loc("<eval_with_key>.2":645:19) | |
#loc471 = loc("<eval_with_key>.2":646:15) | |
#loc472 = loc("<eval_with_key>.2":647:15) | |
#loc473 = loc("<eval_with_key>.2":648:19) | |
#loc474 = loc("<eval_with_key>.2":649:15) | |
#loc475 = loc("<eval_with_key>.2":650:15) | |
#loc476 = loc("<eval_with_key>.2":651:15) | |
#loc477 = loc("<eval_with_key>.2":652:15) | |
#loc478 = loc("<eval_with_key>.2":653:19) | |
#loc479 = loc("<eval_with_key>.2":654:13) | |
#loc480 = loc("<eval_with_key>.2":655:15) | |
#loc481 = loc("<eval_with_key>.2":656:13) | |
#loc482 = loc("<eval_with_key>.2":658:24) | |
#loc483 = loc("<eval_with_key>.2":659:16) | |
#loc484 = loc("<eval_with_key>.2":660:15) | |
#loc485 = loc("<eval_with_key>.2":661:13) | |
#loc486 = loc("<eval_with_key>.2":662:12) | |
#loc487 = loc("<eval_with_key>.2":663:12) | |
#loc488 = loc("<eval_with_key>.2":664:12) | |
#loc489 = loc("<eval_with_key>.2":665:12) | |
#loc490 = loc("<eval_with_key>.2":667:13) | |
#loc491 = loc("<eval_with_key>.2":668:15) | |
#loc492 = loc("<eval_with_key>.2":669:19) | |
#loc493 = loc("<eval_with_key>.2":670:15) | |
#loc494 = loc("<eval_with_key>.2":671:21) | |
#loc495 = loc("<eval_with_key>.2":673:11) | |
#loc496 = loc("<eval_with_key>.2":674:15) | |
#loc497 = loc("<eval_with_key>.2":676:15) | |
#loc498 = loc("<eval_with_key>.2":677:15) | |
#loc499 = loc("<eval_with_key>.2":678:13) | |
#loc500 = loc("<eval_with_key>.2":679:15) | |
#loc501 = loc("<eval_with_key>.2":682:27) | |
#loc502 = loc("<eval_with_key>.2":687:11) | |
#loc503 = loc("<eval_with_key>.2":689:15) | |
#loc504 = loc("<eval_with_key>.2":690:13) | |
#loc505 = loc("<eval_with_key>.2":693:11) | |
#loc506 = loc("<eval_with_key>.2":695:15) | |
#loc507 = loc("<eval_with_key>.2":696:13) | |
#loc508 = loc("<eval_with_key>.2":697:15) | |
#loc509 = loc("<eval_with_key>.2":700:27) | |
#loc510 = loc("<eval_with_key>.2":705:11) | |
#loc511 = loc("<eval_with_key>.2":706:15) | |
#loc512 = loc("<eval_with_key>.2":708:15) | |
#loc513 = loc("<eval_with_key>.2":709:15) | |
#loc514 = loc("<eval_with_key>.2":710:12) | |
#loc515 = loc("<eval_with_key>.2":712:11) | |
#loc516 = loc("<eval_with_key>.2":713:15) | |
#loc517 = loc("<eval_with_key>.2":715:15) | |
#loc518 = loc("<eval_with_key>.2":716:15) | |
#loc519 = loc("<eval_with_key>.2":717:15) | |
#loc520 = loc("<eval_with_key>.2":718:19) | |
#loc521 = loc("<eval_with_key>.2":719:15) | |
#loc522 = loc("<eval_with_key>.2":721:11) | |
#loc523 = loc("<eval_with_key>.2":722:15) | |
#loc524 = loc("<eval_with_key>.2":724:15) | |
#loc525 = loc("<eval_with_key>.2":725:15) | |
#loc526 = loc("<eval_with_key>.2":726:15) | |
#loc527 = loc("<eval_with_key>.2":727:19) | |
#loc528 = loc("<eval_with_key>.2":728:15) | |
#loc529 = loc("<eval_with_key>.2":729:15) | |
#loc530 = loc("<eval_with_key>.2":730:19) | |
#loc531 = loc("<eval_with_key>.2":731:15) | |
#loc532 = loc("<eval_with_key>.2":732:15) | |
#loc533 = loc("<eval_with_key>.2":733:15) | |
#loc534 = loc("<eval_with_key>.2":734:15) | |
#loc535 = loc("<eval_with_key>.2":735:19) | |
#loc536 = loc("<eval_with_key>.2":736:13) | |
#loc537 = loc("<eval_with_key>.2":737:15) | |
#loc538 = loc("<eval_with_key>.2":738:13) | |
#loc539 = loc("<eval_with_key>.2":740:24) | |
#loc540 = loc("<eval_with_key>.2":741:16) | |
#loc541 = loc("<eval_with_key>.2":742:15) | |
#loc542 = loc("<eval_with_key>.2":743:13) | |
#loc543 = loc("<eval_with_key>.2":744:12) | |
#loc544 = loc("<eval_with_key>.2":745:12) | |
#loc545 = loc("<eval_with_key>.2":746:12) | |
#loc546 = loc("<eval_with_key>.2":747:12) | |
#loc547 = loc("<eval_with_key>.2":749:13) | |
#loc548 = loc("<eval_with_key>.2":750:15) | |
#loc549 = loc("<eval_with_key>.2":751:19) | |
#loc550 = loc("<eval_with_key>.2":752:15) | |
#loc551 = loc("<eval_with_key>.2":753:21) | |
#loc552 = loc("<eval_with_key>.2":755:11) | |
#loc553 = loc("<eval_with_key>.2":756:15) | |
#loc554 = loc("<eval_with_key>.2":758:15) | |
#loc555 = loc("<eval_with_key>.2":759:15) | |
#loc556 = loc("<eval_with_key>.2":760:13) | |
#loc557 = loc("<eval_with_key>.2":761:15) | |
#loc558 = loc("<eval_with_key>.2":764:27) | |
#loc559 = loc("<eval_with_key>.2":769:11) | |
#loc560 = loc("<eval_with_key>.2":771:15) | |
#loc561 = loc("<eval_with_key>.2":772:13) | |
#loc562 = loc("<eval_with_key>.2":775:11) | |
#loc563 = loc("<eval_with_key>.2":777:15) | |
#loc564 = loc("<eval_with_key>.2":778:13) | |
#loc565 = loc("<eval_with_key>.2":779:15) | |
#loc566 = loc("<eval_with_key>.2":782:27) | |
#loc567 = loc("<eval_with_key>.2":787:11) | |
#loc568 = loc("<eval_with_key>.2":788:15) | |
#loc569 = loc("<eval_with_key>.2":790:15) | |
#loc570 = loc("<eval_with_key>.2":791:15) | |
#loc571 = loc("<eval_with_key>.2":792:13) | |
#loc572 = loc("<eval_with_key>.2":794:11) | |
#loc573 = loc("<eval_with_key>.2":795:15) | |
#loc574 = loc("<eval_with_key>.2":797:15) | |
#loc575 = loc("<eval_with_key>.2":798:15) | |
#loc576 = loc("<eval_with_key>.2":799:15) | |
#loc577 = loc("<eval_with_key>.2":800:19) | |
#loc578 = loc("<eval_with_key>.2":801:15) | |
#loc579 = loc("<eval_with_key>.2":803:11) | |
#loc580 = loc("<eval_with_key>.2":804:15) | |
#loc581 = loc("<eval_with_key>.2":806:15) | |
#loc582 = loc("<eval_with_key>.2":807:15) | |
#loc583 = loc("<eval_with_key>.2":808:15) | |
#loc584 = loc("<eval_with_key>.2":809:19) | |
#loc585 = loc("<eval_with_key>.2":810:15) | |
#loc586 = loc("<eval_with_key>.2":811:15) | |
#loc587 = loc("<eval_with_key>.2":812:19) | |
#loc588 = loc("<eval_with_key>.2":813:15) | |
#loc589 = loc("<eval_with_key>.2":814:15) | |
#loc590 = loc("<eval_with_key>.2":815:15) | |
#loc591 = loc("<eval_with_key>.2":816:15) | |
#loc592 = loc("<eval_with_key>.2":817:19) | |
#loc593 = loc("<eval_with_key>.2":818:13) | |
#loc594 = loc("<eval_with_key>.2":819:15) | |
#loc595 = loc("<eval_with_key>.2":820:13) | |
#loc596 = loc("<eval_with_key>.2":822:25) | |
#loc597 = loc("<eval_with_key>.2":823:16) | |
#loc598 = loc("<eval_with_key>.2":824:15) | |
#loc599 = loc("<eval_with_key>.2":825:13) | |
#loc600 = loc("<eval_with_key>.2":826:13) | |
#loc601 = loc("<eval_with_key>.2":827:12) | |
#loc602 = loc("<eval_with_key>.2":828:13) | |
#loc603 = loc("<eval_with_key>.2":829:12) | |
#loc604 = loc("<eval_with_key>.2":831:13) | |
#loc605 = loc("<eval_with_key>.2":832:15) | |
#loc606 = loc("<eval_with_key>.2":833:19) | |
#loc607 = loc("<eval_with_key>.2":834:15) | |
#loc608 = loc("<eval_with_key>.2":835:21) | |
#loc609 = loc("<eval_with_key>.2":837:11) | |
#loc610 = loc("<eval_with_key>.2":838:15) | |
#loc611 = loc("<eval_with_key>.2":840:15) | |
#loc612 = loc("<eval_with_key>.2":841:15) | |
#loc613 = loc("<eval_with_key>.2":842:13) | |
#loc614 = loc("<eval_with_key>.2":843:15) | |
#loc615 = loc("<eval_with_key>.2":846:27) | |
#loc616 = loc("<eval_with_key>.2":851:11) | |
#loc617 = loc("<eval_with_key>.2":853:15) | |
#loc618 = loc("<eval_with_key>.2":854:13) | |
#loc619 = loc("<eval_with_key>.2":857:11) | |
#loc620 = loc("<eval_with_key>.2":859:15) | |
#loc621 = loc("<eval_with_key>.2":860:13) | |
#loc622 = loc("<eval_with_key>.2":861:15) | |
#loc623 = loc("<eval_with_key>.2":864:27) | |
#loc624 = loc("<eval_with_key>.2":869:11) | |
#loc625 = loc("<eval_with_key>.2":870:15) | |
#loc626 = loc("<eval_with_key>.2":872:15) | |
#loc627 = loc("<eval_with_key>.2":873:15) | |
#loc628 = loc("<eval_with_key>.2":874:13) | |
#loc629 = loc("<eval_with_key>.2":876:11) | |
#loc630 = loc("<eval_with_key>.2":877:15) | |
#loc631 = loc("<eval_with_key>.2":879:15) | |
#loc632 = loc("<eval_with_key>.2":880:15) | |
#loc633 = loc("<eval_with_key>.2":881:15) | |
#loc634 = loc("<eval_with_key>.2":882:19) | |
#loc635 = loc("<eval_with_key>.2":883:15) | |
#loc636 = loc("<eval_with_key>.2":885:11) | |
#loc637 = loc("<eval_with_key>.2":886:15) | |
#loc638 = loc("<eval_with_key>.2":888:15) | |
#loc639 = loc("<eval_with_key>.2":889:15) | |
#loc640 = loc("<eval_with_key>.2":890:15) | |
#loc641 = loc("<eval_with_key>.2":891:19) | |
#loc642 = loc("<eval_with_key>.2":892:15) | |
#loc643 = loc("<eval_with_key>.2":893:15) | |
#loc644 = loc("<eval_with_key>.2":894:19) | |
#loc645 = loc("<eval_with_key>.2":895:15) | |
#loc646 = loc("<eval_with_key>.2":896:15) | |
#loc647 = loc("<eval_with_key>.2":897:15) | |
#loc648 = loc("<eval_with_key>.2":898:15) | |
#loc649 = loc("<eval_with_key>.2":899:19) | |
#loc650 = loc("<eval_with_key>.2":900:13) | |
#loc651 = loc("<eval_with_key>.2":901:15) | |
#loc652 = loc("<eval_with_key>.2":902:13) | |
#loc653 = loc("<eval_with_key>.2":904:25) | |
#loc654 = loc("<eval_with_key>.2":905:17) | |
#loc655 = loc("<eval_with_key>.2":906:15) | |
#loc656 = loc("<eval_with_key>.2":907:14) | |
#loc657 = loc("<eval_with_key>.2":908:13) | |
#loc658 = loc("<eval_with_key>.2":909:13) | |
#loc659 = loc("<eval_with_key>.2":910:13) | |
#loc660 = loc("<eval_with_key>.2":911:13) | |
#loc661 = loc("<eval_with_key>.2":913:13) | |
#loc662 = loc("<eval_with_key>.2":914:15) | |
#loc663 = loc("<eval_with_key>.2":915:19) | |
#loc664 = loc("<eval_with_key>.2":916:15) | |
#loc665 = loc("<eval_with_key>.2":917:22) | |
#loc666 = loc("<eval_with_key>.2":919:11) | |
#loc667 = loc("<eval_with_key>.2":920:15) | |
#loc668 = loc("<eval_with_key>.2":922:15) | |
#loc669 = loc("<eval_with_key>.2":923:15) | |
#loc670 = loc("<eval_with_key>.2":924:13) | |
#loc671 = loc("<eval_with_key>.2":925:15) | |
#loc672 = loc("<eval_with_key>.2":928:27) | |
#loc673 = loc("<eval_with_key>.2":933:11) | |
#loc674 = loc("<eval_with_key>.2":935:15) | |
#loc675 = loc("<eval_with_key>.2":936:14) | |
#loc676 = loc("<eval_with_key>.2":939:11) | |
#loc677 = loc("<eval_with_key>.2":941:15) | |
#loc678 = loc("<eval_with_key>.2":942:13) | |
#loc679 = loc("<eval_with_key>.2":943:15) | |
#loc680 = loc("<eval_with_key>.2":946:27) | |
#loc681 = loc("<eval_with_key>.2":951:11) | |
#loc682 = loc("<eval_with_key>.2":952:15) | |
#loc683 = loc("<eval_with_key>.2":954:15) | |
#loc684 = loc("<eval_with_key>.2":955:15) | |
#loc685 = loc("<eval_with_key>.2":956:13) | |
#loc686 = loc("<eval_with_key>.2":958:11) | |
#loc687 = loc("<eval_with_key>.2":959:15) | |
#loc688 = loc("<eval_with_key>.2":961:15) | |
#loc689 = loc("<eval_with_key>.2":962:15) | |
#loc690 = loc("<eval_with_key>.2":963:15) | |
#loc691 = loc("<eval_with_key>.2":964:19) | |
#loc692 = loc("<eval_with_key>.2":965:15) | |
#loc693 = loc("<eval_with_key>.2":967:11) | |
#loc694 = loc("<eval_with_key>.2":968:15) | |
#loc695 = loc("<eval_with_key>.2":970:15) | |
#loc696 = loc("<eval_with_key>.2":971:15) | |
#loc697 = loc("<eval_with_key>.2":972:15) | |
#loc698 = loc("<eval_with_key>.2":973:19) | |
#loc699 = loc("<eval_with_key>.2":974:15) | |
#loc700 = loc("<eval_with_key>.2":975:15) | |
#loc701 = loc("<eval_with_key>.2":976:19) | |
#loc702 = loc("<eval_with_key>.2":977:15) | |
#loc703 = loc("<eval_with_key>.2":978:15) | |
#loc704 = loc("<eval_with_key>.2":979:15) | |
#loc705 = loc("<eval_with_key>.2":980:15) | |
#loc706 = loc("<eval_with_key>.2":981:19) | |
#loc707 = loc("<eval_with_key>.2":982:13) | |
#loc708 = loc("<eval_with_key>.2":983:15) | |
#loc709 = loc("<eval_with_key>.2":984:13) | |
#loc710 = loc("<eval_with_key>.2":986:25) | |
#loc711 = loc("<eval_with_key>.2":987:17) | |
#loc712 = loc("<eval_with_key>.2":988:15) | |
#loc713 = loc("<eval_with_key>.2":989:14) | |
#loc714 = loc("<eval_with_key>.2":990:13) | |
#loc715 = loc("<eval_with_key>.2":991:13) | |
#loc716 = loc("<eval_with_key>.2":992:13) | |
#loc717 = loc("<eval_with_key>.2":993:13) | |
#loc718 = loc("<eval_with_key>.2":995:13) | |
#loc719 = loc("<eval_with_key>.2":996:15) | |
#loc720 = loc("<eval_with_key>.2":997:19) | |
#loc721 = loc("<eval_with_key>.2":998:15) | |
#loc722 = loc("<eval_with_key>.2":999:22) | |
#loc723 = loc("<eval_with_key>.2":1001:11) | |
#loc724 = loc("<eval_with_key>.2":1002:15) | |
#loc725 = loc("<eval_with_key>.2":1004:15) | |
#loc726 = loc("<eval_with_key>.2":1005:15) | |
#loc727 = loc("<eval_with_key>.2":1006:13) | |
#loc728 = loc("<eval_with_key>.2":1007:15) | |
#loc729 = loc("<eval_with_key>.2":1010:27) | |
#loc730 = loc("<eval_with_key>.2":1015:11) | |
#loc731 = loc("<eval_with_key>.2":1017:15) | |
#loc732 = loc("<eval_with_key>.2":1018:14) | |
#loc733 = loc("<eval_with_key>.2":1021:11) | |
#loc734 = loc("<eval_with_key>.2":1023:15) | |
#loc735 = loc("<eval_with_key>.2":1024:13) | |
#loc736 = loc("<eval_with_key>.2":1025:15) | |
#loc737 = loc("<eval_with_key>.2":1028:27) | |
#loc738 = loc("<eval_with_key>.2":1033:11) | |
#loc739 = loc("<eval_with_key>.2":1034:15) | |
#loc740 = loc("<eval_with_key>.2":1035:9) | |
#loc741 = loc("<eval_with_key>.2":1036:22) | |
#loc742 = loc("<eval_with_key>.2":1037:9) | |
#loc743 = loc("<eval_with_key>.2":1038:13) | |
#loc744 = loc("<eval_with_key>.2":1039:13) | |
#loc745 = loc("<eval_with_key>.2":1040:15) | |
#loc746 = loc("<eval_with_key>.2":1041:12) | |
#loc747 = loc(callsite(#loc19 at #loc20)) | |
#loc748 = loc(callsite(#loc28 at #loc29)) | |
#loc749 = loc(callsite(#loc34 at #loc35)) | |
#loc750 = loc(callsite(#loc40 at #loc41)) | |
#loc751 = loc(callsite(#loc19 at #loc52)) | |
#loc752 = loc(callsite(#loc747 at #loc21)) | |
#loc753 = loc(callsite(#loc748 at #loc30)) | |
#loc754 = loc(callsite(#loc749 at #loc36)) | |
#loc755 = loc(callsite(#loc750 at #loc42)) | |
#loc756 = loc(callsite(#loc751 at #loc53)) | |
#loc757 = loc(callsite(#loc749 at #loc56)) | |
#loc758 = loc(callsite(#loc749 at #loc57)) | |
#loc759 = loc(callsite(#loc749 at #loc73)) | |
#loc760 = loc(callsite(#loc749 at #loc81)) | |
#loc761 = loc(callsite(#loc749 at #loc85)) | |
#loc762 = loc(callsite(#loc749 at #loc92)) | |
#loc763 = loc(callsite(#loc749 at #loc741)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment