Created
December 16, 2022 21:35
-
-
Save AmosLewis/ee201c0d123c8fd4818fcf89bdefd552 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,128],si64> loc(unknown)) -> !torch.vtensor<[1,2],f32> { | |
%int0 = torch.constant.int 0 loc(#loc1) | |
%int1 = torch.constant.int 1 loc(#loc2) | |
%true = torch.constant.bool true loc(#loc3) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc3) | |
%none = torch.constant.none loc(#loc) | |
%int11 = torch.constant.int 11 loc(#loc4) | |
%false = torch.constant.bool false loc(#loc5) | |
%int128 = torch.constant.int 128 loc(#loc6) | |
%int12 = torch.constant.int 12 loc(#loc7) | |
%int512 = torch.constant.int 512 loc(#loc8) | |
%int64 = torch.constant.int 64 loc(#loc9) | |
%0 = torch.vtensor.literal(dense<0.000000e+00> : tensor<2xf32>) : !torch.vtensor<[2],f32> loc(#loc) | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x768xf32>) : !torch.vtensor<[2,768],f32> loc(#loc) | |
%2 = torch.vtensor.literal(dense<0.000000e+00> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x3072xf32>) : !torch.vtensor<[768,3072],f32> loc(#loc) | |
%184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> loc(#loc) | |
%185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x768xf32>) : !torch.vtensor<[3072,768],f32> loc(#loc) | |
%186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%187 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%190 = torch.vtensor.literal(dense<-3.40282347E+38> : tensor<f32>) : !torch.vtensor<[],f32> loc(#loc) | |
%191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768x768xf32>) : !torch.vtensor<[768,768],f32> loc(#loc) | |
%197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%198 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%199 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x768xf32>) : !torch.vtensor<[512,768],f32> loc(#loc) | |
%200 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128xsi64>) : !torch.vtensor<[128,128],si64> loc(#loc) | |
%201 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%202 = torch.vtensor.literal(dense_resource<__elided__> : tensor<768xf32>) : !torch.vtensor<[768],f32> loc(#loc) | |
%203 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128100x768xf32>) : !torch.vtensor<[128100,768],f32> loc(#loc) | |
%int-2 = torch.constant.int -2 loc(#loc10) | |
%int-1 = torch.constant.int -1 loc(#loc11) | |
%str = torch.constant.str "none" loc(#loc) | |
%int768 = torch.constant.int 768 loc(#loc12) | |
%float9.999990e-08 = torch.constant.float 9.9999999999999995E-8 loc(#loc13) | |
%int2 = torch.constant.int 2 loc(#loc14) | |
%int3 = torch.constant.int 3 loc(#loc15) | |
%float1.385640e01 = torch.constant.float 13.856406460551018 loc(#loc16) | |
%int256 = torch.constant.int 256 loc(#loc17) | |
%int511 = torch.constant.int 511 loc(#loc18) | |
%int3072 = torch.constant.int 3072 loc(#loc19) | |
%cpu = torch.constant.device "cpu" loc(#loc) | |
%204 = torch.prim.ListConstruct %int1, %int128 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%205 = torch.aten.ones %204, %none, %none, %cpu, %false : !torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,128],f32> loc(#loc20) | |
%206 = torch.aten.embedding %203, %arg0, %int0, %false, %false : !torch.vtensor<[128100,768],f32>, !torch.vtensor<[1,128],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[1,128,768],f32> loc(#loc21) | |
%207 = torch.prim.ListConstruct %int768 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%result0, %result1, %result2 = torch.aten.native_layer_norm %206, %207, %202, %201, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc22) | |
%208 = torch.aten.unsqueeze %205, %int2 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,128,1],f32> loc(#loc23) | |
%209 = torch.aten.mul.Tensor %result0, %208 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32> -> !torch.vtensor<[1,128,768],f32> loc(#loc24) | |
%210 = torch.aten.unsqueeze %205, %int1 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> loc(#loc25) | |
%211 = torch.aten.unsqueeze %210, %int2 : !torch.vtensor<[1,1,128],f32>, !torch.int -> !torch.vtensor<[1,1,1,128],f32> loc(#loc26) | |
%212 = torch.aten.squeeze.dim %211, %int-2 : !torch.vtensor<[1,1,1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> loc(#loc27) | |
%213 = torch.aten.unsqueeze %212, %int-1 : !torch.vtensor<[1,1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128,1],f32> loc(#loc28) | |
%214 = torch.aten.mul.Tensor %211, %213 : !torch.vtensor<[1,1,1,128],f32>, !torch.vtensor<[1,1,128,1],f32> -> !torch.vtensor<[1,1,128,128],f32> loc(#loc29) | |
%215 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc30) | |
%216 = torch.aten.to.dtype %215, %int1, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],si8> loc(#loc30) | |
%217 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc30) | |
%218 = torch.aten.broadcast_to %216, %217 : !torch.vtensor<[],si8>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],si8> loc(#loc30) | |
%219 = torch.aten.copy %218, %214, %false : !torch.vtensor<[1,1,128,128],si8>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],si8> loc(#loc30) | |
%220 = torch.aten.clone %200, %none : !torch.vtensor<[128,128],si64>, !torch.none -> !torch.vtensor<[128,128],si64> loc(#loc31) | |
%221 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc32) | |
%222 = torch.prim.ListConstruct %int128, %int128 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc32) | |
%223 = torch.aten.broadcast_to %221, %222 : !torch.vtensor<[],si64>, !torch.list<int> -> !torch.vtensor<[128,128],si64> loc(#loc32) | |
%224 = torch.aten.copy %223, %220, %false : !torch.vtensor<[128,128],si64>, !torch.vtensor<[128,128],si64>, !torch.bool -> !torch.vtensor<[128,128],si64> loc(#loc32) | |
%225 = torch.aten.unsqueeze %224, %int0 : !torch.vtensor<[128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc33) | |
%result0_0, %result1_1, %result2_2 = torch.aten.native_layer_norm %199, %207, %198, %197, %float9.999990e-08 : !torch.vtensor<[512,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[512,768],f32>, !torch.vtensor<[512,1],f32>, !torch.vtensor<[512,1],f32> loc(#loc34) | |
%226 = torch.aten.transpose.int %196, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc35) | |
%227 = torch.prim.ListConstruct %int128, %int768 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1222) | |
%228 = torch.aten.view %209, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc38) | |
%229 = torch.aten.mm %228, %226 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc39) | |
%230 = torch.aten.mul.Scalar %195, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc39) | |
%231 = torch.aten.add.Tensor %230, %229, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc39) | |
%232 = torch.prim.ListConstruct %int1, %int128, %int768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1223) | |
%233 = torch.aten.view %231, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc40) | |
%234 = torch.prim.ListConstruct %int1, %int128, %int12, %int-1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%235 = torch.aten.view %233, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc41) | |
%236 = torch.prim.ListConstruct %int0, %int2, %int1, %int3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%237 = torch.aten.permute %235, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc42) | |
%238 = torch.aten.clone %237, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc43) | |
%239 = torch.prim.ListConstruct %int-1, %int128, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%240 = torch.aten.view %238, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc44) | |
%241 = torch.aten.transpose.int %194, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc45) | |
%242 = torch.aten.view %209, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc46) | |
%243 = torch.aten.mm %242, %241 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc47) | |
%244 = torch.aten.mul.Scalar %193, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc47) | |
%245 = torch.aten.add.Tensor %244, %243, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc47) | |
%246 = torch.aten.view %245, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc48) | |
%247 = torch.aten.view %246, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc49) | |
%248 = torch.aten.permute %247, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc50) | |
%249 = torch.aten.clone %248, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc51) | |
%250 = torch.aten.view %249, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc52) | |
%251 = torch.aten.transpose.int %192, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc53) | |
%252 = torch.aten.view %209, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc54) | |
%253 = torch.aten.mm %252, %251 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc55) | |
%254 = torch.aten.mul.Scalar %191, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc55) | |
%255 = torch.aten.add.Tensor %254, %253, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc55) | |
%256 = torch.aten.view %255, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc56) | |
%257 = torch.aten.view %256, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc57) | |
%258 = torch.aten.permute %257, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc58) | |
%259 = torch.aten.clone %258, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc59) | |
%260 = torch.aten.view %259, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc60) | |
%261 = torch.aten.transpose.int %250, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc61) | |
%262 = torch.aten.bmm %240, %261 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc62) | |
%263 = torch.aten.div.Scalar %262, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc63) | |
%264 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc64) | |
%265 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc65) | |
%266 = torch.aten.transpose.int %196, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc66) | |
%267 = torch.prim.ListConstruct %int512, %int768 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1224) | |
%268 = torch.aten.view %265, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc67) | |
%269 = torch.aten.mm %268, %266 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc68) | |
%270 = torch.aten.mul.Scalar %195, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc68) | |
%271 = torch.aten.add.Tensor %270, %269, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc68) | |
%272 = torch.prim.ListConstruct %int1, %int512, %int768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1225) | |
%273 = torch.aten.view %271, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc69) | |
%274 = torch.prim.ListConstruct %int1, %int512, %int12, %int-1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%275 = torch.aten.view %273, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc70) | |
%276 = torch.aten.permute %275, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc71) | |
%277 = torch.aten.clone %276, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc72) | |
%278 = torch.prim.ListConstruct %int-1, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%279 = torch.aten.view %277, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc73) | |
%280 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc74) | |
%281 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc74) | |
%282 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc74) | |
%283 = torch.aten.view %279, %280 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc74) | |
%284 = torch.aten.broadcast_to %283, %281 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc74) | |
%285 = torch.aten.view %284, %282 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc74) | |
%286 = torch.aten.transpose.int %194, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc75) | |
%287 = torch.aten.view %265, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc76) | |
%288 = torch.aten.mm %287, %286 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc77) | |
%289 = torch.aten.mul.Scalar %193, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc77) | |
%290 = torch.aten.add.Tensor %289, %288, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc77) | |
%291 = torch.aten.view %290, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc78) | |
%292 = torch.aten.view %291, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc79) | |
%293 = torch.aten.permute %292, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc80) | |
%294 = torch.aten.clone %293, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc81) | |
%295 = torch.aten.view %294, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc82) | |
%296 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc83) | |
%297 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc83) | |
%298 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc83) | |
%299 = torch.aten.view %295, %296 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc83) | |
%300 = torch.aten.broadcast_to %299, %297 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc83) | |
%301 = torch.aten.view %300, %298 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc83) | |
%302 = torch.aten.transpose.int %301, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc84) | |
%303 = torch.aten.bmm %240, %302 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc85) | |
%304 = torch.aten.add.Scalar %264, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc86) | |
%305 = torch.aten.clamp %304, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc87) | |
%306 = torch.aten.squeeze.dim %305, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc88) | |
%307 = torch.prim.ListConstruct %int12, %int128, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1226) | |
%308 = torch.aten.broadcast_to %306, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc91) | |
%309 = torch.aten.gather %303, %int-1, %308, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc92) | |
%310 = torch.aten.div.Scalar %309, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc93) | |
%311 = torch.aten.add.Scalar %310, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc94) | |
%312 = torch.aten.neg %264 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc95) | |
%313 = torch.aten.add.Scalar %312, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc96) | |
%314 = torch.aten.clamp %313, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc97) | |
%315 = torch.aten.transpose.int %285, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc98) | |
%316 = torch.aten.bmm %250, %315 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc99) | |
%317 = torch.aten.squeeze.dim %314, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc100) | |
%318 = torch.aten.broadcast_to %317, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc101) | |
%319 = torch.aten.gather %316, %int-1, %318, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc102) | |
%320 = torch.aten.transpose.int %319, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc103) | |
%321 = torch.aten.div.Scalar %320, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc104) | |
%322 = torch.aten.add.Tensor %311, %321, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc105) | |
%323 = torch.aten.add.Tensor %263, %322, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc106) | |
%324 = torch.prim.ListConstruct %int-1, %int12, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%325 = torch.aten.view %323, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc107) | |
%326 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc108) | |
%327 = torch.aten.to.dtype %326, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc108) | |
%328 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc108) | |
%329 = torch.aten.broadcast_to %327, %328 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc108) | |
%330 = torch.aten.copy %329, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc108) | |
%331 = torch.aten.bitwise_not %330 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc109) | |
%332 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc110) | |
%333 = torch.aten.masked_fill.Tensor %325, %331, %332 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc111) | |
%values, %indices = torch.aten.max.dim %333, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc3) | |
%334 = torch.aten.sub.Tensor %333, %values, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc3) | |
%335 = torch.aten.exp %334 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc3) | |
%336 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc3) | |
%337 = torch.aten.sum.dim_IntList %335, %336, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc3) | |
%338 = torch.aten.div.Tensor %335, %337 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc3) | |
%339 = torch.aten.masked_fill.Scalar %338, %331, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc112) | |
%340 = torch.prim.ListConstruct %int-1, %int128, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%341 = torch.aten.view %339, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc113) | |
%342 = torch.aten.bmm %341, %260 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc114) | |
%343 = torch.prim.ListConstruct %int-1, %int12, %int128, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%344 = torch.aten.view %342, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc115) | |
%345 = torch.aten.permute %344, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc116) | |
%346 = torch.aten.clone %345, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc117) | |
%347 = torch.prim.ListConstruct %int1, %int128, %int-1 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%348 = torch.aten.view %346, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc118) | |
%349 = torch.aten.transpose.int %189, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc119) | |
%350 = torch.aten.view %348, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc120) | |
%351 = torch.aten.mm %350, %349 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc121) | |
%352 = torch.aten.mul.Scalar %188, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc121) | |
%353 = torch.aten.add.Tensor %352, %351, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc121) | |
%354 = torch.aten.view %353, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc122) | |
%355 = torch.aten.add.Tensor %354, %209, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc123) | |
%result0_3, %result1_4, %result2_5 = torch.aten.native_layer_norm %355, %207, %187, %186, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc124) | |
%356 = torch.aten.transpose.int %185, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc125) | |
%357 = torch.aten.view %result0_3, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc126) | |
%358 = torch.aten.mm %357, %356 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc127) | |
%359 = torch.aten.mul.Scalar %184, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc127) | |
%360 = torch.aten.add.Tensor %359, %358, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc127) | |
%361 = torch.prim.ListConstruct %int1, %int128, %int3072 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1227) | |
%362 = torch.aten.view %360, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc128) | |
%363 = torch.aten.gelu %362, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc129) | |
%364 = torch.aten.transpose.int %183, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc130) | |
%365 = torch.prim.ListConstruct %int128, %int3072 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc1228) | |
%366 = torch.aten.view %363, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc131) | |
%367 = torch.aten.mm %366, %364 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc132) | |
%368 = torch.aten.mul.Scalar %182, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc132) | |
%369 = torch.aten.add.Tensor %368, %367, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc132) | |
%370 = torch.aten.view %369, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc133) | |
%371 = torch.aten.add.Tensor %370, %result0_3, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc134) | |
%result0_6, %result1_7, %result2_8 = torch.aten.native_layer_norm %371, %207, %181, %180, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc135) | |
%372 = torch.aten.transpose.int %179, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc136) | |
%373 = torch.aten.view %result0_6, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc137) | |
%374 = torch.aten.mm %373, %372 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc138) | |
%375 = torch.aten.mul.Scalar %178, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc138) | |
%376 = torch.aten.add.Tensor %375, %374, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc138) | |
%377 = torch.aten.view %376, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc139) | |
%378 = torch.aten.view %377, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc140) | |
%379 = torch.aten.permute %378, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc141) | |
%380 = torch.aten.clone %379, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc142) | |
%381 = torch.aten.view %380, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc143) | |
%382 = torch.aten.transpose.int %177, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc144) | |
%383 = torch.aten.view %result0_6, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc145) | |
%384 = torch.aten.mm %383, %382 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc146) | |
%385 = torch.aten.mul.Scalar %176, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc146) | |
%386 = torch.aten.add.Tensor %385, %384, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc146) | |
%387 = torch.aten.view %386, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc147) | |
%388 = torch.aten.view %387, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc148) | |
%389 = torch.aten.permute %388, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc149) | |
%390 = torch.aten.clone %389, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc150) | |
%391 = torch.aten.view %390, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc151) | |
%392 = torch.aten.transpose.int %175, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc152) | |
%393 = torch.aten.view %result0_6, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc153) | |
%394 = torch.aten.mm %393, %392 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc154) | |
%395 = torch.aten.mul.Scalar %174, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc154) | |
%396 = torch.aten.add.Tensor %395, %394, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc154) | |
%397 = torch.aten.view %396, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc155) | |
%398 = torch.aten.view %397, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc156) | |
%399 = torch.aten.permute %398, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc157) | |
%400 = torch.aten.clone %399, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc158) | |
%401 = torch.aten.view %400, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc159) | |
%402 = torch.aten.transpose.int %391, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc160) | |
%403 = torch.aten.bmm %381, %402 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc161) | |
%404 = torch.aten.div.Scalar %403, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc162) | |
%405 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc163) | |
%406 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc164) | |
%407 = torch.aten.transpose.int %179, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc165) | |
%408 = torch.aten.view %406, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc166) | |
%409 = torch.aten.mm %408, %407 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc167) | |
%410 = torch.aten.mul.Scalar %178, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc167) | |
%411 = torch.aten.add.Tensor %410, %409, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc167) | |
%412 = torch.aten.view %411, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc168) | |
%413 = torch.aten.view %412, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc169) | |
%414 = torch.aten.permute %413, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc170) | |
%415 = torch.aten.clone %414, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc171) | |
%416 = torch.aten.view %415, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc172) | |
%417 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc173) | |
%418 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc173) | |
%419 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc173) | |
%420 = torch.aten.view %416, %417 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc173) | |
%421 = torch.aten.broadcast_to %420, %418 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc173) | |
%422 = torch.aten.view %421, %419 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc173) | |
%423 = torch.aten.transpose.int %177, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc174) | |
%424 = torch.aten.view %406, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc175) | |
%425 = torch.aten.mm %424, %423 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc176) | |
%426 = torch.aten.mul.Scalar %176, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc176) | |
%427 = torch.aten.add.Tensor %426, %425, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc176) | |
%428 = torch.aten.view %427, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc177) | |
%429 = torch.aten.view %428, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc178) | |
%430 = torch.aten.permute %429, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc179) | |
%431 = torch.aten.clone %430, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc180) | |
%432 = torch.aten.view %431, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc181) | |
%433 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc182) | |
%434 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc182) | |
%435 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc182) | |
%436 = torch.aten.view %432, %433 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc182) | |
%437 = torch.aten.broadcast_to %436, %434 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc182) | |
%438 = torch.aten.view %437, %435 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc182) | |
%439 = torch.aten.transpose.int %438, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc183) | |
%440 = torch.aten.bmm %381, %439 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc184) | |
%441 = torch.aten.add.Scalar %405, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc185) | |
%442 = torch.aten.clamp %441, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc186) | |
%443 = torch.aten.squeeze.dim %442, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc187) | |
%444 = torch.aten.broadcast_to %443, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc188) | |
%445 = torch.aten.gather %440, %int-1, %444, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc189) | |
%446 = torch.aten.div.Scalar %445, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc190) | |
%447 = torch.aten.add.Scalar %446, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc191) | |
%448 = torch.aten.neg %405 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc192) | |
%449 = torch.aten.add.Scalar %448, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc193) | |
%450 = torch.aten.clamp %449, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc194) | |
%451 = torch.aten.transpose.int %422, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc195) | |
%452 = torch.aten.bmm %391, %451 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc196) | |
%453 = torch.aten.squeeze.dim %450, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc197) | |
%454 = torch.aten.broadcast_to %453, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc198) | |
%455 = torch.aten.gather %452, %int-1, %454, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc199) | |
%456 = torch.aten.transpose.int %455, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc200) | |
%457 = torch.aten.div.Scalar %456, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc201) | |
%458 = torch.aten.add.Tensor %447, %457, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc202) | |
%459 = torch.aten.add.Tensor %404, %458, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc203) | |
%460 = torch.aten.view %459, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc204) | |
%461 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc205) | |
%462 = torch.aten.to.dtype %461, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc205) | |
%463 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc205) | |
%464 = torch.aten.broadcast_to %462, %463 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc205) | |
%465 = torch.aten.copy %464, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc205) | |
%466 = torch.aten.bitwise_not %465 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc206) | |
%467 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc207) | |
%468 = torch.aten.masked_fill.Tensor %460, %466, %467 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc208) | |
%values_9, %indices_10 = torch.aten.max.dim %468, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc209) | |
%469 = torch.aten.sub.Tensor %468, %values_9, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc209) | |
%470 = torch.aten.exp %469 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc209) | |
%471 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc209) | |
%472 = torch.aten.sum.dim_IntList %470, %471, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc209) | |
%473 = torch.aten.div.Tensor %470, %472 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc209) | |
%474 = torch.aten.masked_fill.Scalar %473, %466, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc210) | |
%475 = torch.aten.view %474, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc211) | |
%476 = torch.aten.bmm %475, %401 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc212) | |
%477 = torch.aten.view %476, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc213) | |
%478 = torch.aten.permute %477, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc214) | |
%479 = torch.aten.clone %478, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc215) | |
%480 = torch.aten.view %479, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc216) | |
%481 = torch.aten.transpose.int %173, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc217) | |
%482 = torch.aten.view %480, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc218) | |
%483 = torch.aten.mm %482, %481 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc219) | |
%484 = torch.aten.mul.Scalar %172, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc219) | |
%485 = torch.aten.add.Tensor %484, %483, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc219) | |
%486 = torch.aten.view %485, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc220) | |
%487 = torch.aten.add.Tensor %486, %result0_6, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc221) | |
%result0_11, %result1_12, %result2_13 = torch.aten.native_layer_norm %487, %207, %171, %170, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc222) | |
%488 = torch.aten.transpose.int %169, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc223) | |
%489 = torch.aten.view %result0_11, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc224) | |
%490 = torch.aten.mm %489, %488 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc225) | |
%491 = torch.aten.mul.Scalar %168, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc225) | |
%492 = torch.aten.add.Tensor %491, %490, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc225) | |
%493 = torch.aten.view %492, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc226) | |
%494 = torch.aten.gelu %493, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc227) | |
%495 = torch.aten.transpose.int %167, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc228) | |
%496 = torch.aten.view %494, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc229) | |
%497 = torch.aten.mm %496, %495 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc230) | |
%498 = torch.aten.mul.Scalar %166, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc230) | |
%499 = torch.aten.add.Tensor %498, %497, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc230) | |
%500 = torch.aten.view %499, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc231) | |
%501 = torch.aten.add.Tensor %500, %result0_11, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc232) | |
%result0_14, %result1_15, %result2_16 = torch.aten.native_layer_norm %501, %207, %165, %164, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc233) | |
%502 = torch.aten.transpose.int %163, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc234) | |
%503 = torch.aten.view %result0_14, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc235) | |
%504 = torch.aten.mm %503, %502 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc236) | |
%505 = torch.aten.mul.Scalar %162, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc236) | |
%506 = torch.aten.add.Tensor %505, %504, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc236) | |
%507 = torch.aten.view %506, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc237) | |
%508 = torch.aten.view %507, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc238) | |
%509 = torch.aten.permute %508, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc239) | |
%510 = torch.aten.clone %509, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc240) | |
%511 = torch.aten.view %510, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc241) | |
%512 = torch.aten.transpose.int %161, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc242) | |
%513 = torch.aten.view %result0_14, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc243) | |
%514 = torch.aten.mm %513, %512 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc244) | |
%515 = torch.aten.mul.Scalar %160, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc244) | |
%516 = torch.aten.add.Tensor %515, %514, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc244) | |
%517 = torch.aten.view %516, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc245) | |
%518 = torch.aten.view %517, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc246) | |
%519 = torch.aten.permute %518, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc247) | |
%520 = torch.aten.clone %519, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc248) | |
%521 = torch.aten.view %520, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc249) | |
%522 = torch.aten.transpose.int %159, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc250) | |
%523 = torch.aten.view %result0_14, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc251) | |
%524 = torch.aten.mm %523, %522 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc252) | |
%525 = torch.aten.mul.Scalar %158, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc252) | |
%526 = torch.aten.add.Tensor %525, %524, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc252) | |
%527 = torch.aten.view %526, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc253) | |
%528 = torch.aten.view %527, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc254) | |
%529 = torch.aten.permute %528, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc255) | |
%530 = torch.aten.clone %529, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc256) | |
%531 = torch.aten.view %530, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc257) | |
%532 = torch.aten.transpose.int %521, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc258) | |
%533 = torch.aten.bmm %511, %532 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc259) | |
%534 = torch.aten.div.Scalar %533, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc260) | |
%535 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc261) | |
%536 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc262) | |
%537 = torch.aten.transpose.int %163, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc263) | |
%538 = torch.aten.view %536, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc264) | |
%539 = torch.aten.mm %538, %537 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc265) | |
%540 = torch.aten.mul.Scalar %162, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc265) | |
%541 = torch.aten.add.Tensor %540, %539, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc265) | |
%542 = torch.aten.view %541, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc266) | |
%543 = torch.aten.view %542, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc267) | |
%544 = torch.aten.permute %543, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc268) | |
%545 = torch.aten.clone %544, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc269) | |
%546 = torch.aten.view %545, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc270) | |
%547 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc271) | |
%548 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc271) | |
%549 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc271) | |
%550 = torch.aten.view %546, %547 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc271) | |
%551 = torch.aten.broadcast_to %550, %548 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc271) | |
%552 = torch.aten.view %551, %549 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc271) | |
%553 = torch.aten.transpose.int %161, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc272) | |
%554 = torch.aten.view %536, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc273) | |
%555 = torch.aten.mm %554, %553 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc274) | |
%556 = torch.aten.mul.Scalar %160, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc274) | |
%557 = torch.aten.add.Tensor %556, %555, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc274) | |
%558 = torch.aten.view %557, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc275) | |
%559 = torch.aten.view %558, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc276) | |
%560 = torch.aten.permute %559, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc277) | |
%561 = torch.aten.clone %560, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc278) | |
%562 = torch.aten.view %561, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc279) | |
%563 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc280) | |
%564 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc280) | |
%565 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc280) | |
%566 = torch.aten.view %562, %563 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc280) | |
%567 = torch.aten.broadcast_to %566, %564 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc280) | |
%568 = torch.aten.view %567, %565 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc280) | |
%569 = torch.aten.transpose.int %568, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc281) | |
%570 = torch.aten.bmm %511, %569 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc282) | |
%571 = torch.aten.add.Scalar %535, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc283) | |
%572 = torch.aten.clamp %571, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc284) | |
%573 = torch.aten.squeeze.dim %572, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc285) | |
%574 = torch.aten.broadcast_to %573, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc286) | |
%575 = torch.aten.gather %570, %int-1, %574, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc287) | |
%576 = torch.aten.div.Scalar %575, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc288) | |
%577 = torch.aten.add.Scalar %576, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc289) | |
%578 = torch.aten.neg %535 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc290) | |
%579 = torch.aten.add.Scalar %578, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc291) | |
%580 = torch.aten.clamp %579, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc292) | |
%581 = torch.aten.transpose.int %552, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc293) | |
%582 = torch.aten.bmm %521, %581 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc294) | |
%583 = torch.aten.squeeze.dim %580, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc295) | |
%584 = torch.aten.broadcast_to %583, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc296) | |
%585 = torch.aten.gather %582, %int-1, %584, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc297) | |
%586 = torch.aten.transpose.int %585, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc298) | |
%587 = torch.aten.div.Scalar %586, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc299) | |
%588 = torch.aten.add.Tensor %577, %587, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc300) | |
%589 = torch.aten.add.Tensor %534, %588, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc301) | |
%590 = torch.aten.view %589, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc302) | |
%591 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc303) | |
%592 = torch.aten.to.dtype %591, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc303) | |
%593 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc303) | |
%594 = torch.aten.broadcast_to %592, %593 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc303) | |
%595 = torch.aten.copy %594, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc303) | |
%596 = torch.aten.bitwise_not %595 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc304) | |
%597 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc305) | |
%598 = torch.aten.masked_fill.Tensor %590, %596, %597 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc306) | |
%values_17, %indices_18 = torch.aten.max.dim %598, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc307) | |
%599 = torch.aten.sub.Tensor %598, %values_17, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc307) | |
%600 = torch.aten.exp %599 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc307) | |
%601 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc307) | |
%602 = torch.aten.sum.dim_IntList %600, %601, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc307) | |
%603 = torch.aten.div.Tensor %600, %602 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc307) | |
%604 = torch.aten.masked_fill.Scalar %603, %596, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc308) | |
%605 = torch.aten.view %604, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc309) | |
%606 = torch.aten.bmm %605, %531 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc310) | |
%607 = torch.aten.view %606, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc311) | |
%608 = torch.aten.permute %607, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc312) | |
%609 = torch.aten.clone %608, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc313) | |
%610 = torch.aten.view %609, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc314) | |
%611 = torch.aten.transpose.int %157, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc315) | |
%612 = torch.aten.view %610, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc316) | |
%613 = torch.aten.mm %612, %611 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc317) | |
%614 = torch.aten.mul.Scalar %156, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc317) | |
%615 = torch.aten.add.Tensor %614, %613, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc317) | |
%616 = torch.aten.view %615, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc318) | |
%617 = torch.aten.add.Tensor %616, %result0_14, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc319) | |
%result0_19, %result1_20, %result2_21 = torch.aten.native_layer_norm %617, %207, %155, %154, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc320) | |
%618 = torch.aten.transpose.int %153, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc321) | |
%619 = torch.aten.view %result0_19, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc322) | |
%620 = torch.aten.mm %619, %618 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc323) | |
%621 = torch.aten.mul.Scalar %152, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc323) | |
%622 = torch.aten.add.Tensor %621, %620, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc323) | |
%623 = torch.aten.view %622, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc324) | |
%624 = torch.aten.gelu %623, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc325) | |
%625 = torch.aten.transpose.int %151, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc326) | |
%626 = torch.aten.view %624, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc327) | |
%627 = torch.aten.mm %626, %625 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc328) | |
%628 = torch.aten.mul.Scalar %150, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc328) | |
%629 = torch.aten.add.Tensor %628, %627, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc328) | |
%630 = torch.aten.view %629, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc329) | |
%631 = torch.aten.add.Tensor %630, %result0_19, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc330) | |
%result0_22, %result1_23, %result2_24 = torch.aten.native_layer_norm %631, %207, %149, %148, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc331) | |
%632 = torch.aten.transpose.int %147, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc332) | |
%633 = torch.aten.view %result0_22, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc333) | |
%634 = torch.aten.mm %633, %632 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc334) | |
%635 = torch.aten.mul.Scalar %146, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc334) | |
%636 = torch.aten.add.Tensor %635, %634, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc334) | |
%637 = torch.aten.view %636, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc335) | |
%638 = torch.aten.view %637, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc336) | |
%639 = torch.aten.permute %638, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc337) | |
%640 = torch.aten.clone %639, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc338) | |
%641 = torch.aten.view %640, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc339) | |
%642 = torch.aten.transpose.int %145, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc340) | |
%643 = torch.aten.view %result0_22, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc341) | |
%644 = torch.aten.mm %643, %642 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc342) | |
%645 = torch.aten.mul.Scalar %144, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc342) | |
%646 = torch.aten.add.Tensor %645, %644, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc342) | |
%647 = torch.aten.view %646, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc343) | |
%648 = torch.aten.view %647, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc344) | |
%649 = torch.aten.permute %648, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc345) | |
%650 = torch.aten.clone %649, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc346) | |
%651 = torch.aten.view %650, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc347) | |
%652 = torch.aten.transpose.int %143, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc348) | |
%653 = torch.aten.view %result0_22, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc349) | |
%654 = torch.aten.mm %653, %652 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc350) | |
%655 = torch.aten.mul.Scalar %142, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc350) | |
%656 = torch.aten.add.Tensor %655, %654, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc350) | |
%657 = torch.aten.view %656, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc351) | |
%658 = torch.aten.view %657, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc352) | |
%659 = torch.aten.permute %658, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc353) | |
%660 = torch.aten.clone %659, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc354) | |
%661 = torch.aten.view %660, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc355) | |
%662 = torch.aten.transpose.int %651, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc356) | |
%663 = torch.aten.bmm %641, %662 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc357) | |
%664 = torch.aten.div.Scalar %663, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc358) | |
%665 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc359) | |
%666 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc360) | |
%667 = torch.aten.transpose.int %147, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc361) | |
%668 = torch.aten.view %666, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc362) | |
%669 = torch.aten.mm %668, %667 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc363) | |
%670 = torch.aten.mul.Scalar %146, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc363) | |
%671 = torch.aten.add.Tensor %670, %669, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc363) | |
%672 = torch.aten.view %671, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc364) | |
%673 = torch.aten.view %672, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc365) | |
%674 = torch.aten.permute %673, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc366) | |
%675 = torch.aten.clone %674, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc367) | |
%676 = torch.aten.view %675, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc368) | |
%677 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc369) | |
%678 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc369) | |
%679 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc369) | |
%680 = torch.aten.view %676, %677 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc369) | |
%681 = torch.aten.broadcast_to %680, %678 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc369) | |
%682 = torch.aten.view %681, %679 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc369) | |
%683 = torch.aten.transpose.int %145, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc370) | |
%684 = torch.aten.view %666, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc371) | |
%685 = torch.aten.mm %684, %683 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc372) | |
%686 = torch.aten.mul.Scalar %144, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc372) | |
%687 = torch.aten.add.Tensor %686, %685, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc372) | |
%688 = torch.aten.view %687, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc373) | |
%689 = torch.aten.view %688, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc374) | |
%690 = torch.aten.permute %689, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc375) | |
%691 = torch.aten.clone %690, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc376) | |
%692 = torch.aten.view %691, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc377) | |
%693 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc378) | |
%694 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc378) | |
%695 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc378) | |
%696 = torch.aten.view %692, %693 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc378) | |
%697 = torch.aten.broadcast_to %696, %694 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc378) | |
%698 = torch.aten.view %697, %695 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc378) | |
%699 = torch.aten.transpose.int %698, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc379) | |
%700 = torch.aten.bmm %641, %699 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc380) | |
%701 = torch.aten.add.Scalar %665, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc381) | |
%702 = torch.aten.clamp %701, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc382) | |
%703 = torch.aten.squeeze.dim %702, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc383) | |
%704 = torch.aten.broadcast_to %703, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc384) | |
%705 = torch.aten.gather %700, %int-1, %704, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc385) | |
%706 = torch.aten.div.Scalar %705, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc386) | |
%707 = torch.aten.add.Scalar %706, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc387) | |
%708 = torch.aten.neg %665 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc388) | |
%709 = torch.aten.add.Scalar %708, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc389) | |
%710 = torch.aten.clamp %709, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc390) | |
%711 = torch.aten.transpose.int %682, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc391) | |
%712 = torch.aten.bmm %651, %711 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc392) | |
%713 = torch.aten.squeeze.dim %710, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc393) | |
%714 = torch.aten.broadcast_to %713, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc394) | |
%715 = torch.aten.gather %712, %int-1, %714, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc395) | |
%716 = torch.aten.transpose.int %715, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc396) | |
%717 = torch.aten.div.Scalar %716, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc397) | |
%718 = torch.aten.add.Tensor %707, %717, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc398) | |
%719 = torch.aten.add.Tensor %664, %718, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc399) | |
%720 = torch.aten.view %719, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc400) | |
%721 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc401) | |
%722 = torch.aten.to.dtype %721, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc401) | |
%723 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc401) | |
%724 = torch.aten.broadcast_to %722, %723 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc401) | |
%725 = torch.aten.copy %724, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc401) | |
%726 = torch.aten.bitwise_not %725 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc402) | |
%727 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc403) | |
%728 = torch.aten.masked_fill.Tensor %720, %726, %727 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc404) | |
%values_25, %indices_26 = torch.aten.max.dim %728, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc405) | |
%729 = torch.aten.sub.Tensor %728, %values_25, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc405) | |
%730 = torch.aten.exp %729 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc405) | |
%731 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc405) | |
%732 = torch.aten.sum.dim_IntList %730, %731, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc405) | |
%733 = torch.aten.div.Tensor %730, %732 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc405) | |
%734 = torch.aten.masked_fill.Scalar %733, %726, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc406) | |
%735 = torch.aten.view %734, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc407) | |
%736 = torch.aten.bmm %735, %661 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc408) | |
%737 = torch.aten.view %736, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc409) | |
%738 = torch.aten.permute %737, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc410) | |
%739 = torch.aten.clone %738, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc411) | |
%740 = torch.aten.view %739, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc412) | |
%741 = torch.aten.transpose.int %141, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc413) | |
%742 = torch.aten.view %740, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc414) | |
%743 = torch.aten.mm %742, %741 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc415) | |
%744 = torch.aten.mul.Scalar %140, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc415) | |
%745 = torch.aten.add.Tensor %744, %743, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc415) | |
%746 = torch.aten.view %745, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc416) | |
%747 = torch.aten.add.Tensor %746, %result0_22, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc417) | |
%result0_27, %result1_28, %result2_29 = torch.aten.native_layer_norm %747, %207, %139, %138, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc418) | |
%748 = torch.aten.transpose.int %137, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc419) | |
%749 = torch.aten.view %result0_27, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc420) | |
%750 = torch.aten.mm %749, %748 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc421) | |
%751 = torch.aten.mul.Scalar %136, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc421) | |
%752 = torch.aten.add.Tensor %751, %750, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc421) | |
%753 = torch.aten.view %752, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc422) | |
%754 = torch.aten.gelu %753, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc423) | |
%755 = torch.aten.transpose.int %135, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc424) | |
%756 = torch.aten.view %754, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc425) | |
%757 = torch.aten.mm %756, %755 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc426) | |
%758 = torch.aten.mul.Scalar %134, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc426) | |
%759 = torch.aten.add.Tensor %758, %757, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc426) | |
%760 = torch.aten.view %759, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc427) | |
%761 = torch.aten.add.Tensor %760, %result0_27, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc428) | |
%result0_30, %result1_31, %result2_32 = torch.aten.native_layer_norm %761, %207, %133, %132, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc429) | |
%762 = torch.aten.transpose.int %131, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc430) | |
%763 = torch.aten.view %result0_30, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc431) | |
%764 = torch.aten.mm %763, %762 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc432) | |
%765 = torch.aten.mul.Scalar %130, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc432) | |
%766 = torch.aten.add.Tensor %765, %764, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc432) | |
%767 = torch.aten.view %766, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc433) | |
%768 = torch.aten.view %767, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc434) | |
%769 = torch.aten.permute %768, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc435) | |
%770 = torch.aten.clone %769, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc436) | |
%771 = torch.aten.view %770, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc437) | |
%772 = torch.aten.transpose.int %129, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc438) | |
%773 = torch.aten.view %result0_30, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc439) | |
%774 = torch.aten.mm %773, %772 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc440) | |
%775 = torch.aten.mul.Scalar %128, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc440) | |
%776 = torch.aten.add.Tensor %775, %774, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc440) | |
%777 = torch.aten.view %776, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc441) | |
%778 = torch.aten.view %777, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc442) | |
%779 = torch.aten.permute %778, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc443) | |
%780 = torch.aten.clone %779, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc444) | |
%781 = torch.aten.view %780, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc445) | |
%782 = torch.aten.transpose.int %127, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc446) | |
%783 = torch.aten.view %result0_30, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc447) | |
%784 = torch.aten.mm %783, %782 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc448) | |
%785 = torch.aten.mul.Scalar %126, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc448) | |
%786 = torch.aten.add.Tensor %785, %784, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc448) | |
%787 = torch.aten.view %786, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc449) | |
%788 = torch.aten.view %787, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc450) | |
%789 = torch.aten.permute %788, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc451) | |
%790 = torch.aten.clone %789, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc452) | |
%791 = torch.aten.view %790, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc453) | |
%792 = torch.aten.transpose.int %781, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc454) | |
%793 = torch.aten.bmm %771, %792 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc455) | |
%794 = torch.aten.div.Scalar %793, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc456) | |
%795 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc457) | |
%796 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc458) | |
%797 = torch.aten.transpose.int %131, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc459) | |
%798 = torch.aten.view %796, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc460) | |
%799 = torch.aten.mm %798, %797 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc461) | |
%800 = torch.aten.mul.Scalar %130, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc461) | |
%801 = torch.aten.add.Tensor %800, %799, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc461) | |
%802 = torch.aten.view %801, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc462) | |
%803 = torch.aten.view %802, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc463) | |
%804 = torch.aten.permute %803, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc464) | |
%805 = torch.aten.clone %804, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc465) | |
%806 = torch.aten.view %805, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc466) | |
%807 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc467) | |
%808 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc467) | |
%809 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc467) | |
%810 = torch.aten.view %806, %807 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc467) | |
%811 = torch.aten.broadcast_to %810, %808 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc467) | |
%812 = torch.aten.view %811, %809 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc467) | |
%813 = torch.aten.transpose.int %129, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc468) | |
%814 = torch.aten.view %796, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc469) | |
%815 = torch.aten.mm %814, %813 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc470) | |
%816 = torch.aten.mul.Scalar %128, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc470) | |
%817 = torch.aten.add.Tensor %816, %815, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc470) | |
%818 = torch.aten.view %817, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc471) | |
%819 = torch.aten.view %818, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc472) | |
%820 = torch.aten.permute %819, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc473) | |
%821 = torch.aten.clone %820, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc474) | |
%822 = torch.aten.view %821, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc475) | |
%823 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc476) | |
%824 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc476) | |
%825 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc476) | |
%826 = torch.aten.view %822, %823 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc476) | |
%827 = torch.aten.broadcast_to %826, %824 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc476) | |
%828 = torch.aten.view %827, %825 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc476) | |
%829 = torch.aten.transpose.int %828, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc477) | |
%830 = torch.aten.bmm %771, %829 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc478) | |
%831 = torch.aten.add.Scalar %795, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc479) | |
%832 = torch.aten.clamp %831, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc480) | |
%833 = torch.aten.squeeze.dim %832, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc481) | |
%834 = torch.aten.broadcast_to %833, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc482) | |
%835 = torch.aten.gather %830, %int-1, %834, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc483) | |
%836 = torch.aten.div.Scalar %835, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc484) | |
%837 = torch.aten.add.Scalar %836, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc485) | |
%838 = torch.aten.neg %795 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc486) | |
%839 = torch.aten.add.Scalar %838, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc487) | |
%840 = torch.aten.clamp %839, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc488) | |
%841 = torch.aten.transpose.int %812, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc489) | |
%842 = torch.aten.bmm %781, %841 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc490) | |
%843 = torch.aten.squeeze.dim %840, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc491) | |
%844 = torch.aten.broadcast_to %843, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc492) | |
%845 = torch.aten.gather %842, %int-1, %844, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc493) | |
%846 = torch.aten.transpose.int %845, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc494) | |
%847 = torch.aten.div.Scalar %846, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc495) | |
%848 = torch.aten.add.Tensor %837, %847, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc496) | |
%849 = torch.aten.add.Tensor %794, %848, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc497) | |
%850 = torch.aten.view %849, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc498) | |
%851 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc499) | |
%852 = torch.aten.to.dtype %851, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc499) | |
%853 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc499) | |
%854 = torch.aten.broadcast_to %852, %853 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc499) | |
%855 = torch.aten.copy %854, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc499) | |
%856 = torch.aten.bitwise_not %855 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc500) | |
%857 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc501) | |
%858 = torch.aten.masked_fill.Tensor %850, %856, %857 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc502) | |
%values_33, %indices_34 = torch.aten.max.dim %858, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc503) | |
%859 = torch.aten.sub.Tensor %858, %values_33, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc503) | |
%860 = torch.aten.exp %859 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc503) | |
%861 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc503) | |
%862 = torch.aten.sum.dim_IntList %860, %861, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc503) | |
%863 = torch.aten.div.Tensor %860, %862 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc503) | |
%864 = torch.aten.masked_fill.Scalar %863, %856, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc504) | |
%865 = torch.aten.view %864, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc505) | |
%866 = torch.aten.bmm %865, %791 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc506) | |
%867 = torch.aten.view %866, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc507) | |
%868 = torch.aten.permute %867, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc508) | |
%869 = torch.aten.clone %868, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc509) | |
%870 = torch.aten.view %869, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc510) | |
%871 = torch.aten.transpose.int %125, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc511) | |
%872 = torch.aten.view %870, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc512) | |
%873 = torch.aten.mm %872, %871 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc513) | |
%874 = torch.aten.mul.Scalar %124, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc513) | |
%875 = torch.aten.add.Tensor %874, %873, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc513) | |
%876 = torch.aten.view %875, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc514) | |
%877 = torch.aten.add.Tensor %876, %result0_30, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc515) | |
%result0_35, %result1_36, %result2_37 = torch.aten.native_layer_norm %877, %207, %123, %122, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc516) | |
%878 = torch.aten.transpose.int %121, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc517) | |
%879 = torch.aten.view %result0_35, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc518) | |
%880 = torch.aten.mm %879, %878 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc519) | |
%881 = torch.aten.mul.Scalar %120, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc519) | |
%882 = torch.aten.add.Tensor %881, %880, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc519) | |
%883 = torch.aten.view %882, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc520) | |
%884 = torch.aten.gelu %883, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc521) | |
%885 = torch.aten.transpose.int %119, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc522) | |
%886 = torch.aten.view %884, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc523) | |
%887 = torch.aten.mm %886, %885 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc524) | |
%888 = torch.aten.mul.Scalar %118, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc524) | |
%889 = torch.aten.add.Tensor %888, %887, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc524) | |
%890 = torch.aten.view %889, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc525) | |
%891 = torch.aten.add.Tensor %890, %result0_35, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc526) | |
%result0_38, %result1_39, %result2_40 = torch.aten.native_layer_norm %891, %207, %117, %116, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc527) | |
%892 = torch.aten.transpose.int %115, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc528) | |
%893 = torch.aten.view %result0_38, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc529) | |
%894 = torch.aten.mm %893, %892 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc530) | |
%895 = torch.aten.mul.Scalar %114, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc530) | |
%896 = torch.aten.add.Tensor %895, %894, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc530) | |
%897 = torch.aten.view %896, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc531) | |
%898 = torch.aten.view %897, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc532) | |
%899 = torch.aten.permute %898, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc533) | |
%900 = torch.aten.clone %899, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc534) | |
%901 = torch.aten.view %900, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc535) | |
%902 = torch.aten.transpose.int %113, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc536) | |
%903 = torch.aten.view %result0_38, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc537) | |
%904 = torch.aten.mm %903, %902 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc538) | |
%905 = torch.aten.mul.Scalar %112, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc538) | |
%906 = torch.aten.add.Tensor %905, %904, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc538) | |
%907 = torch.aten.view %906, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc539) | |
%908 = torch.aten.view %907, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc540) | |
%909 = torch.aten.permute %908, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc541) | |
%910 = torch.aten.clone %909, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc542) | |
%911 = torch.aten.view %910, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc543) | |
%912 = torch.aten.transpose.int %111, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc544) | |
%913 = torch.aten.view %result0_38, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc545) | |
%914 = torch.aten.mm %913, %912 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc546) | |
%915 = torch.aten.mul.Scalar %110, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc546) | |
%916 = torch.aten.add.Tensor %915, %914, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc546) | |
%917 = torch.aten.view %916, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc547) | |
%918 = torch.aten.view %917, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc548) | |
%919 = torch.aten.permute %918, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc549) | |
%920 = torch.aten.clone %919, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc550) | |
%921 = torch.aten.view %920, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc551) | |
%922 = torch.aten.transpose.int %911, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc552) | |
%923 = torch.aten.bmm %901, %922 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc553) | |
%924 = torch.aten.div.Scalar %923, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc554) | |
%925 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc555) | |
%926 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc556) | |
%927 = torch.aten.transpose.int %115, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc557) | |
%928 = torch.aten.view %926, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc558) | |
%929 = torch.aten.mm %928, %927 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc559) | |
%930 = torch.aten.mul.Scalar %114, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc559) | |
%931 = torch.aten.add.Tensor %930, %929, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc559) | |
%932 = torch.aten.view %931, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc560) | |
%933 = torch.aten.view %932, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc561) | |
%934 = torch.aten.permute %933, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc562) | |
%935 = torch.aten.clone %934, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc563) | |
%936 = torch.aten.view %935, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc564) | |
%937 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc565) | |
%938 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc565) | |
%939 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc565) | |
%940 = torch.aten.view %936, %937 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc565) | |
%941 = torch.aten.broadcast_to %940, %938 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc565) | |
%942 = torch.aten.view %941, %939 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc565) | |
%943 = torch.aten.transpose.int %113, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc566) | |
%944 = torch.aten.view %926, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc567) | |
%945 = torch.aten.mm %944, %943 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc568) | |
%946 = torch.aten.mul.Scalar %112, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc568) | |
%947 = torch.aten.add.Tensor %946, %945, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc568) | |
%948 = torch.aten.view %947, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc569) | |
%949 = torch.aten.view %948, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc570) | |
%950 = torch.aten.permute %949, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc571) | |
%951 = torch.aten.clone %950, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc572) | |
%952 = torch.aten.view %951, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc573) | |
%953 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc574) | |
%954 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc574) | |
%955 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc574) | |
%956 = torch.aten.view %952, %953 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc574) | |
%957 = torch.aten.broadcast_to %956, %954 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc574) | |
%958 = torch.aten.view %957, %955 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc574) | |
%959 = torch.aten.transpose.int %958, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc575) | |
%960 = torch.aten.bmm %901, %959 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc576) | |
%961 = torch.aten.add.Scalar %925, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc577) | |
%962 = torch.aten.clamp %961, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc578) | |
%963 = torch.aten.squeeze.dim %962, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc579) | |
%964 = torch.aten.broadcast_to %963, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc580) | |
%965 = torch.aten.gather %960, %int-1, %964, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc581) | |
%966 = torch.aten.div.Scalar %965, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc582) | |
%967 = torch.aten.add.Scalar %966, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc583) | |
%968 = torch.aten.neg %925 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc584) | |
%969 = torch.aten.add.Scalar %968, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc585) | |
%970 = torch.aten.clamp %969, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc586) | |
%971 = torch.aten.transpose.int %942, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc587) | |
%972 = torch.aten.bmm %911, %971 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc588) | |
%973 = torch.aten.squeeze.dim %970, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc589) | |
%974 = torch.aten.broadcast_to %973, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc590) | |
%975 = torch.aten.gather %972, %int-1, %974, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc591) | |
%976 = torch.aten.transpose.int %975, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc592) | |
%977 = torch.aten.div.Scalar %976, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc593) | |
%978 = torch.aten.add.Tensor %967, %977, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc594) | |
%979 = torch.aten.add.Tensor %924, %978, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc595) | |
%980 = torch.aten.view %979, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc596) | |
%981 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc597) | |
%982 = torch.aten.to.dtype %981, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc597) | |
%983 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc597) | |
%984 = torch.aten.broadcast_to %982, %983 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc597) | |
%985 = torch.aten.copy %984, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc597) | |
%986 = torch.aten.bitwise_not %985 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc598) | |
%987 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc599) | |
%988 = torch.aten.masked_fill.Tensor %980, %986, %987 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc600) | |
%values_41, %indices_42 = torch.aten.max.dim %988, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc601) | |
%989 = torch.aten.sub.Tensor %988, %values_41, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc601) | |
%990 = torch.aten.exp %989 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc601) | |
%991 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc601) | |
%992 = torch.aten.sum.dim_IntList %990, %991, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc601) | |
%993 = torch.aten.div.Tensor %990, %992 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc601) | |
%994 = torch.aten.masked_fill.Scalar %993, %986, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc602) | |
%995 = torch.aten.view %994, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc603) | |
%996 = torch.aten.bmm %995, %921 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc604) | |
%997 = torch.aten.view %996, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc605) | |
%998 = torch.aten.permute %997, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc606) | |
%999 = torch.aten.clone %998, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc607) | |
%1000 = torch.aten.view %999, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc608) | |
%1001 = torch.aten.transpose.int %109, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc609) | |
%1002 = torch.aten.view %1000, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc610) | |
%1003 = torch.aten.mm %1002, %1001 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc611) | |
%1004 = torch.aten.mul.Scalar %108, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc611) | |
%1005 = torch.aten.add.Tensor %1004, %1003, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc611) | |
%1006 = torch.aten.view %1005, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc612) | |
%1007 = torch.aten.add.Tensor %1006, %result0_38, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc613) | |
%result0_43, %result1_44, %result2_45 = torch.aten.native_layer_norm %1007, %207, %107, %106, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc614) | |
%1008 = torch.aten.transpose.int %105, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc615) | |
%1009 = torch.aten.view %result0_43, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc616) | |
%1010 = torch.aten.mm %1009, %1008 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc617) | |
%1011 = torch.aten.mul.Scalar %104, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc617) | |
%1012 = torch.aten.add.Tensor %1011, %1010, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc617) | |
%1013 = torch.aten.view %1012, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc618) | |
%1014 = torch.aten.gelu %1013, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc619) | |
%1015 = torch.aten.transpose.int %103, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc620) | |
%1016 = torch.aten.view %1014, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc621) | |
%1017 = torch.aten.mm %1016, %1015 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc622) | |
%1018 = torch.aten.mul.Scalar %102, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc622) | |
%1019 = torch.aten.add.Tensor %1018, %1017, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc622) | |
%1020 = torch.aten.view %1019, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc623) | |
%1021 = torch.aten.add.Tensor %1020, %result0_43, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc624) | |
%result0_46, %result1_47, %result2_48 = torch.aten.native_layer_norm %1021, %207, %101, %100, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc625) | |
%1022 = torch.aten.transpose.int %99, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc626) | |
%1023 = torch.aten.view %result0_46, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc627) | |
%1024 = torch.aten.mm %1023, %1022 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc628) | |
%1025 = torch.aten.mul.Scalar %98, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc628) | |
%1026 = torch.aten.add.Tensor %1025, %1024, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc628) | |
%1027 = torch.aten.view %1026, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc629) | |
%1028 = torch.aten.view %1027, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc630) | |
%1029 = torch.aten.permute %1028, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc631) | |
%1030 = torch.aten.clone %1029, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc632) | |
%1031 = torch.aten.view %1030, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc633) | |
%1032 = torch.aten.transpose.int %97, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc634) | |
%1033 = torch.aten.view %result0_46, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc635) | |
%1034 = torch.aten.mm %1033, %1032 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc636) | |
%1035 = torch.aten.mul.Scalar %96, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc636) | |
%1036 = torch.aten.add.Tensor %1035, %1034, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc636) | |
%1037 = torch.aten.view %1036, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc637) | |
%1038 = torch.aten.view %1037, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc638) | |
%1039 = torch.aten.permute %1038, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc639) | |
%1040 = torch.aten.clone %1039, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc640) | |
%1041 = torch.aten.view %1040, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc641) | |
%1042 = torch.aten.transpose.int %95, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc642) | |
%1043 = torch.aten.view %result0_46, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc643) | |
%1044 = torch.aten.mm %1043, %1042 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc644) | |
%1045 = torch.aten.mul.Scalar %94, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc644) | |
%1046 = torch.aten.add.Tensor %1045, %1044, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc644) | |
%1047 = torch.aten.view %1046, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc645) | |
%1048 = torch.aten.view %1047, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc646) | |
%1049 = torch.aten.permute %1048, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc647) | |
%1050 = torch.aten.clone %1049, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc648) | |
%1051 = torch.aten.view %1050, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc649) | |
%1052 = torch.aten.transpose.int %1041, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc650) | |
%1053 = torch.aten.bmm %1031, %1052 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc651) | |
%1054 = torch.aten.div.Scalar %1053, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc652) | |
%1055 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc653) | |
%1056 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc654) | |
%1057 = torch.aten.transpose.int %99, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc655) | |
%1058 = torch.aten.view %1056, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc656) | |
%1059 = torch.aten.mm %1058, %1057 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc657) | |
%1060 = torch.aten.mul.Scalar %98, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc657) | |
%1061 = torch.aten.add.Tensor %1060, %1059, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc657) | |
%1062 = torch.aten.view %1061, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc658) | |
%1063 = torch.aten.view %1062, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc659) | |
%1064 = torch.aten.permute %1063, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc660) | |
%1065 = torch.aten.clone %1064, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc661) | |
%1066 = torch.aten.view %1065, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc662) | |
%1067 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc663) | |
%1068 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc663) | |
%1069 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc663) | |
%1070 = torch.aten.view %1066, %1067 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc663) | |
%1071 = torch.aten.broadcast_to %1070, %1068 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc663) | |
%1072 = torch.aten.view %1071, %1069 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc663) | |
%1073 = torch.aten.transpose.int %97, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc664) | |
%1074 = torch.aten.view %1056, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc665) | |
%1075 = torch.aten.mm %1074, %1073 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc666) | |
%1076 = torch.aten.mul.Scalar %96, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc666) | |
%1077 = torch.aten.add.Tensor %1076, %1075, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc666) | |
%1078 = torch.aten.view %1077, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc667) | |
%1079 = torch.aten.view %1078, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc668) | |
%1080 = torch.aten.permute %1079, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc669) | |
%1081 = torch.aten.clone %1080, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc670) | |
%1082 = torch.aten.view %1081, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc671) | |
%1083 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc672) | |
%1084 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc672) | |
%1085 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc672) | |
%1086 = torch.aten.view %1082, %1083 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc672) | |
%1087 = torch.aten.broadcast_to %1086, %1084 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc672) | |
%1088 = torch.aten.view %1087, %1085 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc672) | |
%1089 = torch.aten.transpose.int %1088, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc673) | |
%1090 = torch.aten.bmm %1031, %1089 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc674) | |
%1091 = torch.aten.add.Scalar %1055, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc675) | |
%1092 = torch.aten.clamp %1091, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc676) | |
%1093 = torch.aten.squeeze.dim %1092, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc677) | |
%1094 = torch.aten.broadcast_to %1093, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc678) | |
%1095 = torch.aten.gather %1090, %int-1, %1094, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc679) | |
%1096 = torch.aten.div.Scalar %1095, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc680) | |
%1097 = torch.aten.add.Scalar %1096, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc681) | |
%1098 = torch.aten.neg %1055 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc682) | |
%1099 = torch.aten.add.Scalar %1098, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc683) | |
%1100 = torch.aten.clamp %1099, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc684) | |
%1101 = torch.aten.transpose.int %1072, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc685) | |
%1102 = torch.aten.bmm %1041, %1101 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc686) | |
%1103 = torch.aten.squeeze.dim %1100, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc687) | |
%1104 = torch.aten.broadcast_to %1103, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc688) | |
%1105 = torch.aten.gather %1102, %int-1, %1104, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc689) | |
%1106 = torch.aten.transpose.int %1105, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc690) | |
%1107 = torch.aten.div.Scalar %1106, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc691) | |
%1108 = torch.aten.add.Tensor %1097, %1107, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc692) | |
%1109 = torch.aten.add.Tensor %1054, %1108, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc693) | |
%1110 = torch.aten.view %1109, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc694) | |
%1111 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc695) | |
%1112 = torch.aten.to.dtype %1111, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc695) | |
%1113 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc695) | |
%1114 = torch.aten.broadcast_to %1112, %1113 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc695) | |
%1115 = torch.aten.copy %1114, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc695) | |
%1116 = torch.aten.bitwise_not %1115 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc696) | |
%1117 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc697) | |
%1118 = torch.aten.masked_fill.Tensor %1110, %1116, %1117 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc698) | |
%values_49, %indices_50 = torch.aten.max.dim %1118, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc699) | |
%1119 = torch.aten.sub.Tensor %1118, %values_49, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc699) | |
%1120 = torch.aten.exp %1119 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc699) | |
%1121 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc699) | |
%1122 = torch.aten.sum.dim_IntList %1120, %1121, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc699) | |
%1123 = torch.aten.div.Tensor %1120, %1122 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc699) | |
%1124 = torch.aten.masked_fill.Scalar %1123, %1116, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc700) | |
%1125 = torch.aten.view %1124, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc701) | |
%1126 = torch.aten.bmm %1125, %1051 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc702) | |
%1127 = torch.aten.view %1126, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc703) | |
%1128 = torch.aten.permute %1127, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc704) | |
%1129 = torch.aten.clone %1128, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc705) | |
%1130 = torch.aten.view %1129, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc706) | |
%1131 = torch.aten.transpose.int %93, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc707) | |
%1132 = torch.aten.view %1130, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc708) | |
%1133 = torch.aten.mm %1132, %1131 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc709) | |
%1134 = torch.aten.mul.Scalar %92, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc709) | |
%1135 = torch.aten.add.Tensor %1134, %1133, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc709) | |
%1136 = torch.aten.view %1135, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc710) | |
%1137 = torch.aten.add.Tensor %1136, %result0_46, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc711) | |
%result0_51, %result1_52, %result2_53 = torch.aten.native_layer_norm %1137, %207, %91, %90, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc712) | |
%1138 = torch.aten.transpose.int %89, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc713) | |
%1139 = torch.aten.view %result0_51, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc714) | |
%1140 = torch.aten.mm %1139, %1138 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc715) | |
%1141 = torch.aten.mul.Scalar %88, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc715) | |
%1142 = torch.aten.add.Tensor %1141, %1140, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc715) | |
%1143 = torch.aten.view %1142, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc716) | |
%1144 = torch.aten.gelu %1143, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc717) | |
%1145 = torch.aten.transpose.int %87, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc718) | |
%1146 = torch.aten.view %1144, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc719) | |
%1147 = torch.aten.mm %1146, %1145 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc720) | |
%1148 = torch.aten.mul.Scalar %86, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc720) | |
%1149 = torch.aten.add.Tensor %1148, %1147, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc720) | |
%1150 = torch.aten.view %1149, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc721) | |
%1151 = torch.aten.add.Tensor %1150, %result0_51, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc722) | |
%result0_54, %result1_55, %result2_56 = torch.aten.native_layer_norm %1151, %207, %85, %84, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc723) | |
%1152 = torch.aten.transpose.int %83, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc724) | |
%1153 = torch.aten.view %result0_54, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc725) | |
%1154 = torch.aten.mm %1153, %1152 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc726) | |
%1155 = torch.aten.mul.Scalar %82, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc726) | |
%1156 = torch.aten.add.Tensor %1155, %1154, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc726) | |
%1157 = torch.aten.view %1156, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc727) | |
%1158 = torch.aten.view %1157, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc728) | |
%1159 = torch.aten.permute %1158, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc729) | |
%1160 = torch.aten.clone %1159, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc730) | |
%1161 = torch.aten.view %1160, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc731) | |
%1162 = torch.aten.transpose.int %81, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc732) | |
%1163 = torch.aten.view %result0_54, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc733) | |
%1164 = torch.aten.mm %1163, %1162 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc734) | |
%1165 = torch.aten.mul.Scalar %80, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc734) | |
%1166 = torch.aten.add.Tensor %1165, %1164, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc734) | |
%1167 = torch.aten.view %1166, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc735) | |
%1168 = torch.aten.view %1167, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc736) | |
%1169 = torch.aten.permute %1168, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc737) | |
%1170 = torch.aten.clone %1169, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc738) | |
%1171 = torch.aten.view %1170, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc739) | |
%1172 = torch.aten.transpose.int %79, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc740) | |
%1173 = torch.aten.view %result0_54, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc741) | |
%1174 = torch.aten.mm %1173, %1172 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc742) | |
%1175 = torch.aten.mul.Scalar %78, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc742) | |
%1176 = torch.aten.add.Tensor %1175, %1174, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc742) | |
%1177 = torch.aten.view %1176, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc743) | |
%1178 = torch.aten.view %1177, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc744) | |
%1179 = torch.aten.permute %1178, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc745) | |
%1180 = torch.aten.clone %1179, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc746) | |
%1181 = torch.aten.view %1180, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc747) | |
%1182 = torch.aten.transpose.int %1171, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc748) | |
%1183 = torch.aten.bmm %1161, %1182 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc749) | |
%1184 = torch.aten.div.Scalar %1183, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc750) | |
%1185 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc751) | |
%1186 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc752) | |
%1187 = torch.aten.transpose.int %83, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc753) | |
%1188 = torch.aten.view %1186, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc754) | |
%1189 = torch.aten.mm %1188, %1187 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc755) | |
%1190 = torch.aten.mul.Scalar %82, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc755) | |
%1191 = torch.aten.add.Tensor %1190, %1189, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc755) | |
%1192 = torch.aten.view %1191, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc756) | |
%1193 = torch.aten.view %1192, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc757) | |
%1194 = torch.aten.permute %1193, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc758) | |
%1195 = torch.aten.clone %1194, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc759) | |
%1196 = torch.aten.view %1195, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc760) | |
%1197 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc761) | |
%1198 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc761) | |
%1199 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc761) | |
%1200 = torch.aten.view %1196, %1197 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc761) | |
%1201 = torch.aten.broadcast_to %1200, %1198 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc761) | |
%1202 = torch.aten.view %1201, %1199 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc761) | |
%1203 = torch.aten.transpose.int %81, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc762) | |
%1204 = torch.aten.view %1186, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc763) | |
%1205 = torch.aten.mm %1204, %1203 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc764) | |
%1206 = torch.aten.mul.Scalar %80, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc764) | |
%1207 = torch.aten.add.Tensor %1206, %1205, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc764) | |
%1208 = torch.aten.view %1207, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc765) | |
%1209 = torch.aten.view %1208, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc766) | |
%1210 = torch.aten.permute %1209, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc767) | |
%1211 = torch.aten.clone %1210, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc768) | |
%1212 = torch.aten.view %1211, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc769) | |
%1213 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc770) | |
%1214 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc770) | |
%1215 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc770) | |
%1216 = torch.aten.view %1212, %1213 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc770) | |
%1217 = torch.aten.broadcast_to %1216, %1214 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc770) | |
%1218 = torch.aten.view %1217, %1215 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc770) | |
%1219 = torch.aten.transpose.int %1218, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc771) | |
%1220 = torch.aten.bmm %1161, %1219 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc772) | |
%1221 = torch.aten.add.Scalar %1185, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc773) | |
%1222 = torch.aten.clamp %1221, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc774) | |
%1223 = torch.aten.squeeze.dim %1222, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc775) | |
%1224 = torch.aten.broadcast_to %1223, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc776) | |
%1225 = torch.aten.gather %1220, %int-1, %1224, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc777) | |
%1226 = torch.aten.div.Scalar %1225, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc778) | |
%1227 = torch.aten.add.Scalar %1226, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc779) | |
%1228 = torch.aten.neg %1185 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc780) | |
%1229 = torch.aten.add.Scalar %1228, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc781) | |
%1230 = torch.aten.clamp %1229, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc782) | |
%1231 = torch.aten.transpose.int %1202, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc783) | |
%1232 = torch.aten.bmm %1171, %1231 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc784) | |
%1233 = torch.aten.squeeze.dim %1230, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc785) | |
%1234 = torch.aten.broadcast_to %1233, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc786) | |
%1235 = torch.aten.gather %1232, %int-1, %1234, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc787) | |
%1236 = torch.aten.transpose.int %1235, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc788) | |
%1237 = torch.aten.div.Scalar %1236, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc789) | |
%1238 = torch.aten.add.Tensor %1227, %1237, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc790) | |
%1239 = torch.aten.add.Tensor %1184, %1238, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc791) | |
%1240 = torch.aten.view %1239, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc792) | |
%1241 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc793) | |
%1242 = torch.aten.to.dtype %1241, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc793) | |
%1243 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc793) | |
%1244 = torch.aten.broadcast_to %1242, %1243 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc793) | |
%1245 = torch.aten.copy %1244, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc793) | |
%1246 = torch.aten.bitwise_not %1245 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc794) | |
%1247 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc795) | |
%1248 = torch.aten.masked_fill.Tensor %1240, %1246, %1247 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc796) | |
%values_57, %indices_58 = torch.aten.max.dim %1248, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc797) | |
%1249 = torch.aten.sub.Tensor %1248, %values_57, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc797) | |
%1250 = torch.aten.exp %1249 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc797) | |
%1251 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc797) | |
%1252 = torch.aten.sum.dim_IntList %1250, %1251, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc797) | |
%1253 = torch.aten.div.Tensor %1250, %1252 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc797) | |
%1254 = torch.aten.masked_fill.Scalar %1253, %1246, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc798) | |
%1255 = torch.aten.view %1254, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc799) | |
%1256 = torch.aten.bmm %1255, %1181 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc800) | |
%1257 = torch.aten.view %1256, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc801) | |
%1258 = torch.aten.permute %1257, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc802) | |
%1259 = torch.aten.clone %1258, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc803) | |
%1260 = torch.aten.view %1259, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc804) | |
%1261 = torch.aten.transpose.int %77, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc805) | |
%1262 = torch.aten.view %1260, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc806) | |
%1263 = torch.aten.mm %1262, %1261 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc807) | |
%1264 = torch.aten.mul.Scalar %76, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc807) | |
%1265 = torch.aten.add.Tensor %1264, %1263, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc807) | |
%1266 = torch.aten.view %1265, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc808) | |
%1267 = torch.aten.add.Tensor %1266, %result0_54, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc809) | |
%result0_59, %result1_60, %result2_61 = torch.aten.native_layer_norm %1267, %207, %75, %74, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc810) | |
%1268 = torch.aten.transpose.int %73, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc811) | |
%1269 = torch.aten.view %result0_59, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc812) | |
%1270 = torch.aten.mm %1269, %1268 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc813) | |
%1271 = torch.aten.mul.Scalar %72, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc813) | |
%1272 = torch.aten.add.Tensor %1271, %1270, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc813) | |
%1273 = torch.aten.view %1272, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc814) | |
%1274 = torch.aten.gelu %1273, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc815) | |
%1275 = torch.aten.transpose.int %71, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc816) | |
%1276 = torch.aten.view %1274, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc817) | |
%1277 = torch.aten.mm %1276, %1275 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc818) | |
%1278 = torch.aten.mul.Scalar %70, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc818) | |
%1279 = torch.aten.add.Tensor %1278, %1277, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc818) | |
%1280 = torch.aten.view %1279, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc819) | |
%1281 = torch.aten.add.Tensor %1280, %result0_59, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc820) | |
%result0_62, %result1_63, %result2_64 = torch.aten.native_layer_norm %1281, %207, %69, %68, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc821) | |
%1282 = torch.aten.transpose.int %67, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc822) | |
%1283 = torch.aten.view %result0_62, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc823) | |
%1284 = torch.aten.mm %1283, %1282 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc824) | |
%1285 = torch.aten.mul.Scalar %66, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc824) | |
%1286 = torch.aten.add.Tensor %1285, %1284, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc824) | |
%1287 = torch.aten.view %1286, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc825) | |
%1288 = torch.aten.view %1287, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc826) | |
%1289 = torch.aten.permute %1288, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc827) | |
%1290 = torch.aten.clone %1289, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc828) | |
%1291 = torch.aten.view %1290, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc829) | |
%1292 = torch.aten.transpose.int %65, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc830) | |
%1293 = torch.aten.view %result0_62, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc831) | |
%1294 = torch.aten.mm %1293, %1292 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc832) | |
%1295 = torch.aten.mul.Scalar %64, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc832) | |
%1296 = torch.aten.add.Tensor %1295, %1294, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc832) | |
%1297 = torch.aten.view %1296, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc833) | |
%1298 = torch.aten.view %1297, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc834) | |
%1299 = torch.aten.permute %1298, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc835) | |
%1300 = torch.aten.clone %1299, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc836) | |
%1301 = torch.aten.view %1300, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc837) | |
%1302 = torch.aten.transpose.int %63, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc838) | |
%1303 = torch.aten.view %result0_62, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc839) | |
%1304 = torch.aten.mm %1303, %1302 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc840) | |
%1305 = torch.aten.mul.Scalar %62, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc840) | |
%1306 = torch.aten.add.Tensor %1305, %1304, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc840) | |
%1307 = torch.aten.view %1306, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc841) | |
%1308 = torch.aten.view %1307, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc842) | |
%1309 = torch.aten.permute %1308, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc843) | |
%1310 = torch.aten.clone %1309, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc844) | |
%1311 = torch.aten.view %1310, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc845) | |
%1312 = torch.aten.transpose.int %1301, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc846) | |
%1313 = torch.aten.bmm %1291, %1312 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc847) | |
%1314 = torch.aten.div.Scalar %1313, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc848) | |
%1315 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc849) | |
%1316 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc850) | |
%1317 = torch.aten.transpose.int %67, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc851) | |
%1318 = torch.aten.view %1316, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc852) | |
%1319 = torch.aten.mm %1318, %1317 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc853) | |
%1320 = torch.aten.mul.Scalar %66, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc853) | |
%1321 = torch.aten.add.Tensor %1320, %1319, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc853) | |
%1322 = torch.aten.view %1321, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc854) | |
%1323 = torch.aten.view %1322, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc855) | |
%1324 = torch.aten.permute %1323, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc856) | |
%1325 = torch.aten.clone %1324, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc857) | |
%1326 = torch.aten.view %1325, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc858) | |
%1327 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc859) | |
%1328 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc859) | |
%1329 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc859) | |
%1330 = torch.aten.view %1326, %1327 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc859) | |
%1331 = torch.aten.broadcast_to %1330, %1328 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc859) | |
%1332 = torch.aten.view %1331, %1329 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc859) | |
%1333 = torch.aten.transpose.int %65, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc860) | |
%1334 = torch.aten.view %1316, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc861) | |
%1335 = torch.aten.mm %1334, %1333 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc862) | |
%1336 = torch.aten.mul.Scalar %64, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc862) | |
%1337 = torch.aten.add.Tensor %1336, %1335, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc862) | |
%1338 = torch.aten.view %1337, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc863) | |
%1339 = torch.aten.view %1338, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc864) | |
%1340 = torch.aten.permute %1339, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc865) | |
%1341 = torch.aten.clone %1340, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc866) | |
%1342 = torch.aten.view %1341, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc867) | |
%1343 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc868) | |
%1344 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc868) | |
%1345 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc868) | |
%1346 = torch.aten.view %1342, %1343 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc868) | |
%1347 = torch.aten.broadcast_to %1346, %1344 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc868) | |
%1348 = torch.aten.view %1347, %1345 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc868) | |
%1349 = torch.aten.transpose.int %1348, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc869) | |
%1350 = torch.aten.bmm %1291, %1349 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc870) | |
%1351 = torch.aten.add.Scalar %1315, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc871) | |
%1352 = torch.aten.clamp %1351, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc872) | |
%1353 = torch.aten.squeeze.dim %1352, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc873) | |
%1354 = torch.aten.broadcast_to %1353, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc874) | |
%1355 = torch.aten.gather %1350, %int-1, %1354, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc875) | |
%1356 = torch.aten.div.Scalar %1355, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc876) | |
%1357 = torch.aten.add.Scalar %1356, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc877) | |
%1358 = torch.aten.neg %1315 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc878) | |
%1359 = torch.aten.add.Scalar %1358, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc879) | |
%1360 = torch.aten.clamp %1359, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc880) | |
%1361 = torch.aten.transpose.int %1332, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc881) | |
%1362 = torch.aten.bmm %1301, %1361 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc882) | |
%1363 = torch.aten.squeeze.dim %1360, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc883) | |
%1364 = torch.aten.broadcast_to %1363, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc884) | |
%1365 = torch.aten.gather %1362, %int-1, %1364, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc885) | |
%1366 = torch.aten.transpose.int %1365, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc886) | |
%1367 = torch.aten.div.Scalar %1366, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc887) | |
%1368 = torch.aten.add.Tensor %1357, %1367, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc888) | |
%1369 = torch.aten.add.Tensor %1314, %1368, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc889) | |
%1370 = torch.aten.view %1369, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc890) | |
%1371 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc891) | |
%1372 = torch.aten.to.dtype %1371, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc891) | |
%1373 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc891) | |
%1374 = torch.aten.broadcast_to %1372, %1373 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc891) | |
%1375 = torch.aten.copy %1374, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc891) | |
%1376 = torch.aten.bitwise_not %1375 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc892) | |
%1377 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc893) | |
%1378 = torch.aten.masked_fill.Tensor %1370, %1376, %1377 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc894) | |
%values_65, %indices_66 = torch.aten.max.dim %1378, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc895) | |
%1379 = torch.aten.sub.Tensor %1378, %values_65, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc895) | |
%1380 = torch.aten.exp %1379 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc895) | |
%1381 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc895) | |
%1382 = torch.aten.sum.dim_IntList %1380, %1381, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc895) | |
%1383 = torch.aten.div.Tensor %1380, %1382 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc895) | |
%1384 = torch.aten.masked_fill.Scalar %1383, %1376, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc896) | |
%1385 = torch.aten.view %1384, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc897) | |
%1386 = torch.aten.bmm %1385, %1311 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc898) | |
%1387 = torch.aten.view %1386, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc899) | |
%1388 = torch.aten.permute %1387, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc900) | |
%1389 = torch.aten.clone %1388, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc901) | |
%1390 = torch.aten.view %1389, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc902) | |
%1391 = torch.aten.transpose.int %61, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc903) | |
%1392 = torch.aten.view %1390, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc904) | |
%1393 = torch.aten.mm %1392, %1391 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc905) | |
%1394 = torch.aten.mul.Scalar %60, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc905) | |
%1395 = torch.aten.add.Tensor %1394, %1393, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc905) | |
%1396 = torch.aten.view %1395, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc906) | |
%1397 = torch.aten.add.Tensor %1396, %result0_62, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc907) | |
%result0_67, %result1_68, %result2_69 = torch.aten.native_layer_norm %1397, %207, %59, %58, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc908) | |
%1398 = torch.aten.transpose.int %57, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc909) | |
%1399 = torch.aten.view %result0_67, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc910) | |
%1400 = torch.aten.mm %1399, %1398 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc911) | |
%1401 = torch.aten.mul.Scalar %56, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc911) | |
%1402 = torch.aten.add.Tensor %1401, %1400, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc911) | |
%1403 = torch.aten.view %1402, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc912) | |
%1404 = torch.aten.gelu %1403, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc913) | |
%1405 = torch.aten.transpose.int %55, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc914) | |
%1406 = torch.aten.view %1404, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc915) | |
%1407 = torch.aten.mm %1406, %1405 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc916) | |
%1408 = torch.aten.mul.Scalar %54, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc916) | |
%1409 = torch.aten.add.Tensor %1408, %1407, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc916) | |
%1410 = torch.aten.view %1409, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc917) | |
%1411 = torch.aten.add.Tensor %1410, %result0_67, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc918) | |
%result0_70, %result1_71, %result2_72 = torch.aten.native_layer_norm %1411, %207, %53, %52, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc919) | |
%1412 = torch.aten.transpose.int %51, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc920) | |
%1413 = torch.aten.view %result0_70, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc921) | |
%1414 = torch.aten.mm %1413, %1412 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc922) | |
%1415 = torch.aten.mul.Scalar %50, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc922) | |
%1416 = torch.aten.add.Tensor %1415, %1414, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc922) | |
%1417 = torch.aten.view %1416, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc923) | |
%1418 = torch.aten.view %1417, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc924) | |
%1419 = torch.aten.permute %1418, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc925) | |
%1420 = torch.aten.clone %1419, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc926) | |
%1421 = torch.aten.view %1420, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc927) | |
%1422 = torch.aten.transpose.int %49, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc928) | |
%1423 = torch.aten.view %result0_70, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc929) | |
%1424 = torch.aten.mm %1423, %1422 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc930) | |
%1425 = torch.aten.mul.Scalar %48, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc930) | |
%1426 = torch.aten.add.Tensor %1425, %1424, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc930) | |
%1427 = torch.aten.view %1426, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc931) | |
%1428 = torch.aten.view %1427, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc932) | |
%1429 = torch.aten.permute %1428, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc933) | |
%1430 = torch.aten.clone %1429, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc934) | |
%1431 = torch.aten.view %1430, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc935) | |
%1432 = torch.aten.transpose.int %47, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc936) | |
%1433 = torch.aten.view %result0_70, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc937) | |
%1434 = torch.aten.mm %1433, %1432 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc938) | |
%1435 = torch.aten.mul.Scalar %46, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc938) | |
%1436 = torch.aten.add.Tensor %1435, %1434, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc938) | |
%1437 = torch.aten.view %1436, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc939) | |
%1438 = torch.aten.view %1437, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc940) | |
%1439 = torch.aten.permute %1438, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc941) | |
%1440 = torch.aten.clone %1439, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc942) | |
%1441 = torch.aten.view %1440, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc943) | |
%1442 = torch.aten.transpose.int %1431, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc944) | |
%1443 = torch.aten.bmm %1421, %1442 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc945) | |
%1444 = torch.aten.div.Scalar %1443, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc946) | |
%1445 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc947) | |
%1446 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc948) | |
%1447 = torch.aten.transpose.int %51, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc949) | |
%1448 = torch.aten.view %1446, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc950) | |
%1449 = torch.aten.mm %1448, %1447 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc951) | |
%1450 = torch.aten.mul.Scalar %50, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc951) | |
%1451 = torch.aten.add.Tensor %1450, %1449, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc951) | |
%1452 = torch.aten.view %1451, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc952) | |
%1453 = torch.aten.view %1452, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc953) | |
%1454 = torch.aten.permute %1453, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc954) | |
%1455 = torch.aten.clone %1454, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc955) | |
%1456 = torch.aten.view %1455, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc956) | |
%1457 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc957) | |
%1458 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc957) | |
%1459 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc957) | |
%1460 = torch.aten.view %1456, %1457 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc957) | |
%1461 = torch.aten.broadcast_to %1460, %1458 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc957) | |
%1462 = torch.aten.view %1461, %1459 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc957) | |
%1463 = torch.aten.transpose.int %49, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc958) | |
%1464 = torch.aten.view %1446, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc959) | |
%1465 = torch.aten.mm %1464, %1463 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc960) | |
%1466 = torch.aten.mul.Scalar %48, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc960) | |
%1467 = torch.aten.add.Tensor %1466, %1465, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc960) | |
%1468 = torch.aten.view %1467, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc961) | |
%1469 = torch.aten.view %1468, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc962) | |
%1470 = torch.aten.permute %1469, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc963) | |
%1471 = torch.aten.clone %1470, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc964) | |
%1472 = torch.aten.view %1471, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc965) | |
%1473 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc966) | |
%1474 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc966) | |
%1475 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc966) | |
%1476 = torch.aten.view %1472, %1473 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc966) | |
%1477 = torch.aten.broadcast_to %1476, %1474 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc966) | |
%1478 = torch.aten.view %1477, %1475 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc966) | |
%1479 = torch.aten.transpose.int %1478, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc967) | |
%1480 = torch.aten.bmm %1421, %1479 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc968) | |
%1481 = torch.aten.add.Scalar %1445, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc969) | |
%1482 = torch.aten.clamp %1481, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc970) | |
%1483 = torch.aten.squeeze.dim %1482, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc971) | |
%1484 = torch.aten.broadcast_to %1483, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc972) | |
%1485 = torch.aten.gather %1480, %int-1, %1484, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc973) | |
%1486 = torch.aten.div.Scalar %1485, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc974) | |
%1487 = torch.aten.add.Scalar %1486, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc975) | |
%1488 = torch.aten.neg %1445 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc976) | |
%1489 = torch.aten.add.Scalar %1488, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc977) | |
%1490 = torch.aten.clamp %1489, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc978) | |
%1491 = torch.aten.transpose.int %1462, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc979) | |
%1492 = torch.aten.bmm %1431, %1491 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc980) | |
%1493 = torch.aten.squeeze.dim %1490, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc981) | |
%1494 = torch.aten.broadcast_to %1493, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc982) | |
%1495 = torch.aten.gather %1492, %int-1, %1494, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc983) | |
%1496 = torch.aten.transpose.int %1495, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc984) | |
%1497 = torch.aten.div.Scalar %1496, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc985) | |
%1498 = torch.aten.add.Tensor %1487, %1497, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc986) | |
%1499 = torch.aten.add.Tensor %1444, %1498, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc987) | |
%1500 = torch.aten.view %1499, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc988) | |
%1501 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc989) | |
%1502 = torch.aten.to.dtype %1501, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc989) | |
%1503 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc989) | |
%1504 = torch.aten.broadcast_to %1502, %1503 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc989) | |
%1505 = torch.aten.copy %1504, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc989) | |
%1506 = torch.aten.bitwise_not %1505 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc990) | |
%1507 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc991) | |
%1508 = torch.aten.masked_fill.Tensor %1500, %1506, %1507 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc992) | |
%values_73, %indices_74 = torch.aten.max.dim %1508, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc993) | |
%1509 = torch.aten.sub.Tensor %1508, %values_73, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc993) | |
%1510 = torch.aten.exp %1509 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc993) | |
%1511 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc993) | |
%1512 = torch.aten.sum.dim_IntList %1510, %1511, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc993) | |
%1513 = torch.aten.div.Tensor %1510, %1512 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc993) | |
%1514 = torch.aten.masked_fill.Scalar %1513, %1506, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc994) | |
%1515 = torch.aten.view %1514, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc995) | |
%1516 = torch.aten.bmm %1515, %1441 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc996) | |
%1517 = torch.aten.view %1516, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc997) | |
%1518 = torch.aten.permute %1517, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc998) | |
%1519 = torch.aten.clone %1518, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc999) | |
%1520 = torch.aten.view %1519, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1000) | |
%1521 = torch.aten.transpose.int %45, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1001) | |
%1522 = torch.aten.view %1520, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1002) | |
%1523 = torch.aten.mm %1522, %1521 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1003) | |
%1524 = torch.aten.mul.Scalar %44, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1003) | |
%1525 = torch.aten.add.Tensor %1524, %1523, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1003) | |
%1526 = torch.aten.view %1525, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1004) | |
%1527 = torch.aten.add.Tensor %1526, %result0_70, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1005) | |
%result0_75, %result1_76, %result2_77 = torch.aten.native_layer_norm %1527, %207, %43, %42, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1006) | |
%1528 = torch.aten.transpose.int %41, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc1007) | |
%1529 = torch.aten.view %result0_75, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1008) | |
%1530 = torch.aten.mm %1529, %1528 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc1009) | |
%1531 = torch.aten.mul.Scalar %40, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc1009) | |
%1532 = torch.aten.add.Tensor %1531, %1530, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc1009) | |
%1533 = torch.aten.view %1532, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc1010) | |
%1534 = torch.aten.gelu %1533, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc1011) | |
%1535 = torch.aten.transpose.int %39, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc1012) | |
%1536 = torch.aten.view %1534, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc1013) | |
%1537 = torch.aten.mm %1536, %1535 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1014) | |
%1538 = torch.aten.mul.Scalar %38, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1014) | |
%1539 = torch.aten.add.Tensor %1538, %1537, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1014) | |
%1540 = torch.aten.view %1539, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1015) | |
%1541 = torch.aten.add.Tensor %1540, %result0_75, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1016) | |
%result0_78, %result1_79, %result2_80 = torch.aten.native_layer_norm %1541, %207, %37, %36, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1017) | |
%1542 = torch.aten.transpose.int %35, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1018) | |
%1543 = torch.aten.view %result0_78, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1019) | |
%1544 = torch.aten.mm %1543, %1542 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1020) | |
%1545 = torch.aten.mul.Scalar %34, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1020) | |
%1546 = torch.aten.add.Tensor %1545, %1544, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1020) | |
%1547 = torch.aten.view %1546, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1021) | |
%1548 = torch.aten.view %1547, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1022) | |
%1549 = torch.aten.permute %1548, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1023) | |
%1550 = torch.aten.clone %1549, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1024) | |
%1551 = torch.aten.view %1550, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1025) | |
%1552 = torch.aten.transpose.int %33, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1026) | |
%1553 = torch.aten.view %result0_78, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1027) | |
%1554 = torch.aten.mm %1553, %1552 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1028) | |
%1555 = torch.aten.mul.Scalar %32, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1028) | |
%1556 = torch.aten.add.Tensor %1555, %1554, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1028) | |
%1557 = torch.aten.view %1556, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1029) | |
%1558 = torch.aten.view %1557, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1030) | |
%1559 = torch.aten.permute %1558, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1031) | |
%1560 = torch.aten.clone %1559, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1032) | |
%1561 = torch.aten.view %1560, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1033) | |
%1562 = torch.aten.transpose.int %31, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1034) | |
%1563 = torch.aten.view %result0_78, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1035) | |
%1564 = torch.aten.mm %1563, %1562 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1036) | |
%1565 = torch.aten.mul.Scalar %30, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1036) | |
%1566 = torch.aten.add.Tensor %1565, %1564, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1036) | |
%1567 = torch.aten.view %1566, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1037) | |
%1568 = torch.aten.view %1567, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1038) | |
%1569 = torch.aten.permute %1568, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1039) | |
%1570 = torch.aten.clone %1569, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1040) | |
%1571 = torch.aten.view %1570, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1041) | |
%1572 = torch.aten.transpose.int %1561, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc1042) | |
%1573 = torch.aten.bmm %1551, %1572 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc1043) | |
%1574 = torch.aten.div.Scalar %1573, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1044) | |
%1575 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1045) | |
%1576 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc1046) | |
%1577 = torch.aten.transpose.int %35, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1047) | |
%1578 = torch.aten.view %1576, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc1048) | |
%1579 = torch.aten.mm %1578, %1577 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc1049) | |
%1580 = torch.aten.mul.Scalar %34, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1049) | |
%1581 = torch.aten.add.Tensor %1580, %1579, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc1049) | |
%1582 = torch.aten.view %1581, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc1050) | |
%1583 = torch.aten.view %1582, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc1051) | |
%1584 = torch.aten.permute %1583, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1052) | |
%1585 = torch.aten.clone %1584, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1053) | |
%1586 = torch.aten.view %1585, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1054) | |
%1587 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1055) | |
%1588 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1055) | |
%1589 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1055) | |
%1590 = torch.aten.view %1586, %1587 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1055) | |
%1591 = torch.aten.broadcast_to %1590, %1588 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1055) | |
%1592 = torch.aten.view %1591, %1589 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1055) | |
%1593 = torch.aten.transpose.int %33, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1056) | |
%1594 = torch.aten.view %1576, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc1057) | |
%1595 = torch.aten.mm %1594, %1593 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc1058) | |
%1596 = torch.aten.mul.Scalar %32, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1058) | |
%1597 = torch.aten.add.Tensor %1596, %1595, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc1058) | |
%1598 = torch.aten.view %1597, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc1059) | |
%1599 = torch.aten.view %1598, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc1060) | |
%1600 = torch.aten.permute %1599, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1061) | |
%1601 = torch.aten.clone %1600, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1062) | |
%1602 = torch.aten.view %1601, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1063) | |
%1603 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1064) | |
%1604 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1064) | |
%1605 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1064) | |
%1606 = torch.aten.view %1602, %1603 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1064) | |
%1607 = torch.aten.broadcast_to %1606, %1604 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1064) | |
%1608 = torch.aten.view %1607, %1605 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1064) | |
%1609 = torch.aten.transpose.int %1608, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc1065) | |
%1610 = torch.aten.bmm %1551, %1609 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc1066) | |
%1611 = torch.aten.add.Scalar %1575, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1067) | |
%1612 = torch.aten.clamp %1611, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1068) | |
%1613 = torch.aten.squeeze.dim %1612, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc1069) | |
%1614 = torch.aten.broadcast_to %1613, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc1070) | |
%1615 = torch.aten.gather %1610, %int-1, %1614, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc1071) | |
%1616 = torch.aten.div.Scalar %1615, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1072) | |
%1617 = torch.aten.add.Scalar %1616, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1073) | |
%1618 = torch.aten.neg %1575 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1074) | |
%1619 = torch.aten.add.Scalar %1618, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1075) | |
%1620 = torch.aten.clamp %1619, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1076) | |
%1621 = torch.aten.transpose.int %1592, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc1077) | |
%1622 = torch.aten.bmm %1561, %1621 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc1078) | |
%1623 = torch.aten.squeeze.dim %1620, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc1079) | |
%1624 = torch.aten.broadcast_to %1623, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc1080) | |
%1625 = torch.aten.gather %1622, %int-1, %1624, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc1081) | |
%1626 = torch.aten.transpose.int %1625, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1082) | |
%1627 = torch.aten.div.Scalar %1626, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1083) | |
%1628 = torch.aten.add.Tensor %1617, %1627, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1084) | |
%1629 = torch.aten.add.Tensor %1574, %1628, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1085) | |
%1630 = torch.aten.view %1629, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1086) | |
%1631 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc1087) | |
%1632 = torch.aten.to.dtype %1631, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc1087) | |
%1633 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1087) | |
%1634 = torch.aten.broadcast_to %1632, %1633 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1087) | |
%1635 = torch.aten.copy %1634, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1087) | |
%1636 = torch.aten.bitwise_not %1635 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1088) | |
%1637 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc1089) | |
%1638 = torch.aten.masked_fill.Tensor %1630, %1636, %1637 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1090) | |
%values_81, %indices_82 = torch.aten.max.dim %1638, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc1091) | |
%1639 = torch.aten.sub.Tensor %1638, %values_81, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1091) | |
%1640 = torch.aten.exp %1639 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1091) | |
%1641 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc1091) | |
%1642 = torch.aten.sum.dim_IntList %1640, %1641, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc1091) | |
%1643 = torch.aten.div.Tensor %1640, %1642 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1091) | |
%1644 = torch.aten.masked_fill.Scalar %1643, %1636, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1092) | |
%1645 = torch.aten.view %1644, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc1093) | |
%1646 = torch.aten.bmm %1645, %1571 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc1094) | |
%1647 = torch.aten.view %1646, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1095) | |
%1648 = torch.aten.permute %1647, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1096) | |
%1649 = torch.aten.clone %1648, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1097) | |
%1650 = torch.aten.view %1649, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1098) | |
%1651 = torch.aten.transpose.int %29, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1099) | |
%1652 = torch.aten.view %1650, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1100) | |
%1653 = torch.aten.mm %1652, %1651 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1101) | |
%1654 = torch.aten.mul.Scalar %28, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1101) | |
%1655 = torch.aten.add.Tensor %1654, %1653, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1101) | |
%1656 = torch.aten.view %1655, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1102) | |
%1657 = torch.aten.add.Tensor %1656, %result0_78, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1103) | |
%result0_83, %result1_84, %result2_85 = torch.aten.native_layer_norm %1657, %207, %27, %26, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1104) | |
%1658 = torch.aten.transpose.int %25, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc1105) | |
%1659 = torch.aten.view %result0_83, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1106) | |
%1660 = torch.aten.mm %1659, %1658 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc1107) | |
%1661 = torch.aten.mul.Scalar %24, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc1107) | |
%1662 = torch.aten.add.Tensor %1661, %1660, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc1107) | |
%1663 = torch.aten.view %1662, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc1108) | |
%1664 = torch.aten.gelu %1663, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc1109) | |
%1665 = torch.aten.transpose.int %23, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc1110) | |
%1666 = torch.aten.view %1664, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc1111) | |
%1667 = torch.aten.mm %1666, %1665 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1112) | |
%1668 = torch.aten.mul.Scalar %22, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1112) | |
%1669 = torch.aten.add.Tensor %1668, %1667, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1112) | |
%1670 = torch.aten.view %1669, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1113) | |
%1671 = torch.aten.add.Tensor %1670, %result0_83, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1114) | |
%result0_86, %result1_87, %result2_88 = torch.aten.native_layer_norm %1671, %207, %21, %20, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1115) | |
%1672 = torch.aten.transpose.int %19, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1116) | |
%1673 = torch.aten.view %result0_86, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1117) | |
%1674 = torch.aten.mm %1673, %1672 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1118) | |
%1675 = torch.aten.mul.Scalar %18, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1118) | |
%1676 = torch.aten.add.Tensor %1675, %1674, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1118) | |
%1677 = torch.aten.view %1676, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1119) | |
%1678 = torch.aten.view %1677, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1120) | |
%1679 = torch.aten.permute %1678, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1121) | |
%1680 = torch.aten.clone %1679, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1122) | |
%1681 = torch.aten.view %1680, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1123) | |
%1682 = torch.aten.transpose.int %17, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1124) | |
%1683 = torch.aten.view %result0_86, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1125) | |
%1684 = torch.aten.mm %1683, %1682 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1126) | |
%1685 = torch.aten.mul.Scalar %16, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1126) | |
%1686 = torch.aten.add.Tensor %1685, %1684, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1126) | |
%1687 = torch.aten.view %1686, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1127) | |
%1688 = torch.aten.view %1687, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1128) | |
%1689 = torch.aten.permute %1688, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1129) | |
%1690 = torch.aten.clone %1689, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1130) | |
%1691 = torch.aten.view %1690, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1131) | |
%1692 = torch.aten.transpose.int %15, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1132) | |
%1693 = torch.aten.view %result0_86, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1133) | |
%1694 = torch.aten.mm %1693, %1692 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1134) | |
%1695 = torch.aten.mul.Scalar %14, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1134) | |
%1696 = torch.aten.add.Tensor %1695, %1694, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1134) | |
%1697 = torch.aten.view %1696, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1135) | |
%1698 = torch.aten.view %1697, %234 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1136) | |
%1699 = torch.aten.permute %1698, %236 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1137) | |
%1700 = torch.aten.clone %1699, %int0 : !torch.vtensor<[1,12,128,64],f32>, !torch.int -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1138) | |
%1701 = torch.aten.view %1700, %239 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[12,128,64],f32> loc(#loc1139) | |
%1702 = torch.aten.transpose.int %1691, %int-1, %int-2 : !torch.vtensor<[12,128,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,128],f32> loc(#loc1140) | |
%1703 = torch.aten.bmm %1681, %1702 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,128],f32> -> !torch.vtensor<[12,128,128],f32> loc(#loc1141) | |
%1704 = torch.aten.div.Scalar %1703, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1142) | |
%1705 = torch.aten.unsqueeze %225, %int1 : !torch.vtensor<[1,128,128],si64>, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1143) | |
%1706 = torch.aten.unsqueeze %result0_0, %int0 : !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[1,512,768],f32> loc(#loc1144) | |
%1707 = torch.aten.transpose.int %19, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1145) | |
%1708 = torch.aten.view %1706, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc1146) | |
%1709 = torch.aten.mm %1708, %1707 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc1147) | |
%1710 = torch.aten.mul.Scalar %18, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1147) | |
%1711 = torch.aten.add.Tensor %1710, %1709, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc1147) | |
%1712 = torch.aten.view %1711, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc1148) | |
%1713 = torch.aten.view %1712, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc1149) | |
%1714 = torch.aten.permute %1713, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1150) | |
%1715 = torch.aten.clone %1714, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1151) | |
%1716 = torch.aten.view %1715, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1152) | |
%1717 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1153) | |
%1718 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1153) | |
%1719 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1153) | |
%1720 = torch.aten.view %1716, %1717 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1153) | |
%1721 = torch.aten.broadcast_to %1720, %1718 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1153) | |
%1722 = torch.aten.view %1721, %1719 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1153) | |
%1723 = torch.aten.transpose.int %17, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1154) | |
%1724 = torch.aten.view %1706, %267 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[512,768],f32> loc(#loc1155) | |
%1725 = torch.aten.mm %1724, %1723 : !torch.vtensor<[512,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[512,768],f32> loc(#loc1156) | |
%1726 = torch.aten.mul.Scalar %16, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1156) | |
%1727 = torch.aten.add.Tensor %1726, %1725, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[512,768],f32>, !torch.int -> !torch.vtensor<[512,768],f32> loc(#loc1156) | |
%1728 = torch.aten.view %1727, %272 : !torch.vtensor<[512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,768],f32> loc(#loc1157) | |
%1729 = torch.aten.view %1728, %274 : !torch.vtensor<[1,512,768],f32>, !torch.list<int> -> !torch.vtensor<[1,512,12,64],f32> loc(#loc1158) | |
%1730 = torch.aten.permute %1729, %236 : !torch.vtensor<[1,512,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1159) | |
%1731 = torch.aten.clone %1730, %int0 : !torch.vtensor<[1,12,512,64],f32>, !torch.int -> !torch.vtensor<[1,12,512,64],f32> loc(#loc1160) | |
%1732 = torch.aten.view %1731, %278 : !torch.vtensor<[1,12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1161) | |
%1733 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1162) | |
%1734 = torch.prim.ListConstruct %int1, %int12, %int1, %int512, %int1, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1162) | |
%1735 = torch.prim.ListConstruct %int12, %int512, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1162) | |
%1736 = torch.aten.view %1732, %1733 : !torch.vtensor<[12,512,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1162) | |
%1737 = torch.aten.broadcast_to %1736, %1734 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,1,512,1,64],f32> loc(#loc1162) | |
%1738 = torch.aten.view %1737, %1735 : !torch.vtensor<[1,12,1,512,1,64],f32>, !torch.list<int> -> !torch.vtensor<[12,512,64],f32> loc(#loc1162) | |
%1739 = torch.aten.transpose.int %1738, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc1163) | |
%1740 = torch.aten.bmm %1681, %1739 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc1164) | |
%1741 = torch.aten.add.Scalar %1705, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1165) | |
%1742 = torch.aten.clamp %1741, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1166) | |
%1743 = torch.aten.squeeze.dim %1742, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc1167) | |
%1744 = torch.aten.broadcast_to %1743, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc1168) | |
%1745 = torch.aten.gather %1740, %int-1, %1744, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc1169) | |
%1746 = torch.aten.div.Scalar %1745, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1170) | |
%1747 = torch.aten.add.Scalar %1746, %int0, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1171) | |
%1748 = torch.aten.neg %1705 : !torch.vtensor<[1,1,128,128],si64> -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1172) | |
%1749 = torch.aten.add.Scalar %1748, %int256, %int1 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1173) | |
%1750 = torch.aten.clamp %1749, %int0, %int511 : !torch.vtensor<[1,1,128,128],si64>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],si64> loc(#loc1174) | |
%1751 = torch.aten.transpose.int %1722, %int-1, %int-2 : !torch.vtensor<[12,512,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,64,512],f32> loc(#loc1175) | |
%1752 = torch.aten.bmm %1691, %1751 : !torch.vtensor<[12,128,64],f32>, !torch.vtensor<[12,64,512],f32> -> !torch.vtensor<[12,128,512],f32> loc(#loc1176) | |
%1753 = torch.aten.squeeze.dim %1750, %int0 : !torch.vtensor<[1,1,128,128],si64>, !torch.int -> !torch.vtensor<[1,128,128],si64> loc(#loc1177) | |
%1754 = torch.aten.broadcast_to %1753, %307 : !torch.vtensor<[1,128,128],si64>, !torch.list<int> -> !torch.vtensor<[12,128,128],si64> loc(#loc1178) | |
%1755 = torch.aten.gather %1752, %int-1, %1754, %false : !torch.vtensor<[12,128,512],f32>, !torch.int, !torch.vtensor<[12,128,128],si64>, !torch.bool -> !torch.vtensor<[12,128,128],f32> loc(#loc1179) | |
%1756 = torch.aten.transpose.int %1755, %int-1, %int-2 : !torch.vtensor<[12,128,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1180) | |
%1757 = torch.aten.div.Scalar %1756, %float1.385640e01 : !torch.vtensor<[12,128,128],f32>, !torch.float -> !torch.vtensor<[12,128,128],f32> loc(#loc1181) | |
%1758 = torch.aten.add.Tensor %1747, %1757, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1182) | |
%1759 = torch.aten.add.Tensor %1704, %1758, %int1 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,128],f32>, !torch.int -> !torch.vtensor<[12,128,128],f32> loc(#loc1183) | |
%1760 = torch.aten.view %1759, %324 : !torch.vtensor<[12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1184) | |
%1761 = torch.prim.NumToTensor.Scalar %int0 : !torch.int -> !torch.vtensor<[],si64> loc(#loc1185) | |
%1762 = torch.aten.to.dtype %1761, %int11, %false, %false, %none : !torch.vtensor<[],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],i1> loc(#loc1185) | |
%1763 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc1185) | |
%1764 = torch.aten.broadcast_to %1762, %1763 : !torch.vtensor<[],i1>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1185) | |
%1765 = torch.aten.copy %1764, %219, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],si8>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1185) | |
%1766 = torch.aten.bitwise_not %1765 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> loc(#loc1186) | |
%1767 = torch.aten.clone %190, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> loc(#loc1187) | |
%1768 = torch.aten.masked_fill.Tensor %1760, %1766, %1767 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1188) | |
%values_89, %indices_90 = torch.aten.max.dim %1768, %int-1, %true : !torch.vtensor<[1,12,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,12,128,1],f32>, !torch.vtensor<[1,12,128,1],si64> loc(#loc1189) | |
%1769 = torch.aten.sub.Tensor %1768, %values_89, %float1.000000e00 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32>, !torch.float -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1189) | |
%1770 = torch.aten.exp %1769 : !torch.vtensor<[1,12,128,128],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1189) | |
%1771 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc1189) | |
%1772 = torch.aten.sum.dim_IntList %1770, %1771, %true, %none : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,12,128,1],f32> loc(#loc1189) | |
%1773 = torch.aten.div.Tensor %1770, %1772 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,12,128,1],f32> -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1189) | |
%1774 = torch.aten.masked_fill.Scalar %1773, %1766, %int0 : !torch.vtensor<[1,12,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.int -> !torch.vtensor<[1,12,128,128],f32> loc(#loc1190) | |
%1775 = torch.aten.view %1774, %340 : !torch.vtensor<[1,12,128,128],f32>, !torch.list<int> -> !torch.vtensor<[12,128,128],f32> loc(#loc1191) | |
%1776 = torch.aten.bmm %1775, %1701 : !torch.vtensor<[12,128,128],f32>, !torch.vtensor<[12,128,64],f32> -> !torch.vtensor<[12,128,64],f32> loc(#loc1192) | |
%1777 = torch.aten.view %1776, %343 : !torch.vtensor<[12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,12,128,64],f32> loc(#loc1193) | |
%1778 = torch.aten.permute %1777, %236 : !torch.vtensor<[1,12,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1194) | |
%1779 = torch.aten.clone %1778, %int0 : !torch.vtensor<[1,128,12,64],f32>, !torch.int -> !torch.vtensor<[1,128,12,64],f32> loc(#loc1195) | |
%1780 = torch.aten.view %1779, %347 : !torch.vtensor<[1,128,12,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1196) | |
%1781 = torch.aten.transpose.int %13, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1197) | |
%1782 = torch.aten.view %1780, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1198) | |
%1783 = torch.aten.mm %1782, %1781 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1199) | |
%1784 = torch.aten.mul.Scalar %12, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1199) | |
%1785 = torch.aten.add.Tensor %1784, %1783, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1199) | |
%1786 = torch.aten.view %1785, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1200) | |
%1787 = torch.aten.add.Tensor %1786, %result0_86, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1201) | |
%result0_91, %result1_92, %result2_93 = torch.aten.native_layer_norm %1787, %207, %11, %10, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1202) | |
%1788 = torch.aten.transpose.int %9, %int0, %int1 : !torch.vtensor<[3072,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,3072],f32> loc(#loc1203) | |
%1789 = torch.aten.view %result0_91, %227 : !torch.vtensor<[1,128,768],f32>, !torch.list<int> -> !torch.vtensor<[128,768],f32> loc(#loc1204) | |
%1790 = torch.aten.mm %1789, %1788 : !torch.vtensor<[128,768],f32>, !torch.vtensor<[768,3072],f32> -> !torch.vtensor<[128,3072],f32> loc(#loc1205) | |
%1791 = torch.aten.mul.Scalar %8, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> loc(#loc1205) | |
%1792 = torch.aten.add.Tensor %1791, %1790, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> loc(#loc1205) | |
%1793 = torch.aten.view %1792, %361 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> loc(#loc1206) | |
%1794 = torch.aten.gelu %1793, %str : !torch.vtensor<[1,128,3072],f32>, !torch.str -> !torch.vtensor<[1,128,3072],f32> loc(#loc1207) | |
%1795 = torch.aten.transpose.int %7, %int0, %int1 : !torch.vtensor<[768,3072],f32>, !torch.int, !torch.int -> !torch.vtensor<[3072,768],f32> loc(#loc1208) | |
%1796 = torch.aten.view %1794, %365 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[128,3072],f32> loc(#loc1209) | |
%1797 = torch.aten.mm %1796, %1795 : !torch.vtensor<[128,3072],f32>, !torch.vtensor<[3072,768],f32> -> !torch.vtensor<[128,768],f32> loc(#loc1210) | |
%1798 = torch.aten.mul.Scalar %6, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1210) | |
%1799 = torch.aten.add.Tensor %1798, %1797, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[128,768],f32>, !torch.int -> !torch.vtensor<[128,768],f32> loc(#loc1210) | |
%1800 = torch.aten.view %1799, %232 : !torch.vtensor<[128,768],f32>, !torch.list<int> -> !torch.vtensor<[1,128,768],f32> loc(#loc1211) | |
%1801 = torch.aten.add.Tensor %1800, %result0_91, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,768],f32>, !torch.int -> !torch.vtensor<[1,128,768],f32> loc(#loc1212) | |
%result0_94, %result1_95, %result2_96 = torch.aten.native_layer_norm %1801, %207, %5, %4, %float9.999990e-08 : !torch.vtensor<[1,128,768],f32>, !torch.list<int>, !torch.vtensor<[768],f32>, !torch.vtensor<[768],f32>, !torch.float -> !torch.vtensor<[1,128,768],f32>, !torch.vtensor<[1,128,1],f32>, !torch.vtensor<[1,128,1],f32> loc(#loc1213) | |
%1802 = torch.aten.slice.Tensor %result0_94, %int1, %int0, %int1, %int1 : !torch.vtensor<[1,128,768],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,768],f32> loc(#loc1214) | |
%1803 = torch.aten.squeeze.dim %1802, %int1 : !torch.vtensor<[1,1,768],f32>, !torch.int -> !torch.vtensor<[1,768],f32> loc(#loc1214) | |
%1804 = torch.aten.transpose.int %3, %int0, %int1 : !torch.vtensor<[768,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,768],f32> loc(#loc1215) | |
%1805 = torch.aten.mm %1803, %1804 : !torch.vtensor<[1,768],f32>, !torch.vtensor<[768,768],f32> -> !torch.vtensor<[1,768],f32> loc(#loc1216) | |
%1806 = torch.aten.mul.Scalar %2, %int1 : !torch.vtensor<[768],f32>, !torch.int -> !torch.vtensor<[768],f32> loc(#loc1216) | |
%1807 = torch.aten.add.Tensor %1806, %1805, %int1 : !torch.vtensor<[768],f32>, !torch.vtensor<[1,768],f32>, !torch.int -> !torch.vtensor<[1,768],f32> loc(#loc1216) | |
%1808 = torch.aten.gelu %1807, %str : !torch.vtensor<[1,768],f32>, !torch.str -> !torch.vtensor<[1,768],f32> loc(#loc1217) | |
%1809 = torch.aten.transpose.int %1, %int0, %int1 : !torch.vtensor<[2,768],f32>, !torch.int, !torch.int -> !torch.vtensor<[768,2],f32> loc(#loc1218) | |
%1810 = torch.aten.mm %1808, %1809 : !torch.vtensor<[1,768],f32>, !torch.vtensor<[768,2],f32> -> !torch.vtensor<[1,2],f32> loc(#loc1219) | |
%1811 = torch.aten.mul.Scalar %0, %int1 : !torch.vtensor<[2],f32>, !torch.int -> !torch.vtensor<[2],f32> loc(#loc1219) | |
%1812 = torch.aten.add.Tensor %1811, %1810, %int1 : !torch.vtensor<[2],f32>, !torch.vtensor<[1,2],f32>, !torch.int -> !torch.vtensor<[1,2],f32> loc(#loc1219) | |
return %1812 : !torch.vtensor<[1,2],f32> loc(#loc) | |
} loc(#loc) | |
} loc(#loc) | |
#loc1 = loc("<eval_with_key>.2":8:54) | |
#loc2 = loc("<eval_with_key>.2":5:32) | |
#loc3 = loc("<eval_with_key>.2":124:15) | |
#loc4 = loc("<eval_with_key>.2":119:59) | |
#loc5 = loc("<eval_with_key>.2":5:83) | |
#loc6 = loc("<eval_with_key>.2":5:35) | |
#loc7 = loc("<eval_with_key>.2":45:50) | |
#loc8 = loc("<eval_with_key>.2":77:48) | |
#loc9 = loc("<eval_with_key>.2":48:50) | |
#loc10 = loc("<eval_with_key>.2":23:50) | |
#loc11 = loc("<eval_with_key>.2":24:52) | |
#loc12 = loc("<eval_with_key>.2":15:69) | |
#loc13 = loc("<eval_with_key>.2":15:111) | |
#loc14 = loc("<eval_with_key>.2":18:34) | |
#loc15 = loc("<eval_with_key>.2":46:55) | |
#loc16 = loc("<eval_with_key>.2":71:34) | |
#loc17 = loc("<eval_with_key>.2":99:42) | |
#loc18 = loc("<eval_with_key>.2":100:41) | |
#loc19 = loc("<eval_with_key>.2":151:52) | |
#loc20 = loc("<eval_with_key>.2":5:11) | |
#loc21 = loc("<eval_with_key>.2":11:16) | |
#loc22 = loc("<eval_with_key>.2":15:24) | |
#loc23 = loc("<eval_with_key>.2":19:16) | |
#loc24 = loc("<eval_with_key>.2":20:10) | |
#loc25 = loc("<eval_with_key>.2":21:18) | |
#loc26 = loc("<eval_with_key>.2":22:18) | |
#loc27 = loc("<eval_with_key>.2":23:14) | |
#loc28 = loc("<eval_with_key>.2":24:18) | |
#loc29 = loc("<eval_with_key>.2":25:12) | |
#loc30 = loc("<eval_with_key>.2":26:15) | |
#loc31 = loc("<eval_with_key>.2":28:22) | |
#loc32 = loc("<eval_with_key>.2":29:17) | |
#loc33 = loc("<eval_with_key>.2":31:18) | |
#loc34 = loc("<eval_with_key>.2":35:26) | |
#loc35 = loc("<eval_with_key>.2":40:8) | |
#loc36 = loc("-":4226:13) | |
#loc37 = loc("-":6086:10) | |
#loc38 = loc("<eval_with_key>.2":41:11) | |
#loc39 = loc("<eval_with_key>.2":43:12) | |
#loc40 = loc("<eval_with_key>.2":44:13) | |
#loc41 = loc("<eval_with_key>.2":45:13) | |
#loc42 = loc("<eval_with_key>.2":46:14) | |
#loc43 = loc("<eval_with_key>.2":47:12) | |
#loc44 = loc("<eval_with_key>.2":48:13) | |
#loc45 = loc("<eval_with_key>.2":50:10) | |
#loc46 = loc("<eval_with_key>.2":51:13) | |
#loc47 = loc("<eval_with_key>.2":53:14) | |
#loc48 = loc("<eval_with_key>.2":54:13) | |
#loc49 = loc("<eval_with_key>.2":55:13) | |
#loc50 = loc("<eval_with_key>.2":56:16) | |
#loc51 = loc("<eval_with_key>.2":57:14) | |
#loc52 = loc("<eval_with_key>.2":58:13) | |
#loc53 = loc("<eval_with_key>.2":60:10) | |
#loc54 = loc("<eval_with_key>.2":61:13) | |
#loc55 = loc("<eval_with_key>.2":63:14) | |
#loc56 = loc("<eval_with_key>.2":64:13) | |
#loc57 = loc("<eval_with_key>.2":65:14) | |
#loc58 = loc("<eval_with_key>.2":66:16) | |
#loc59 = loc("<eval_with_key>.2":67:14) | |
#loc60 = loc("<eval_with_key>.2":68:14) | |
#loc61 = loc("<eval_with_key>.2":69:16) | |
#loc62 = loc("<eval_with_key>.2":70:10) | |
#loc63 = loc("<eval_with_key>.2":71:10) | |
#loc64 = loc("<eval_with_key>.2":72:18) | |
#loc65 = loc("<eval_with_key>.2":74:18) | |
#loc66 = loc("<eval_with_key>.2":76:10) | |
#loc67 = loc("<eval_with_key>.2":77:14) | |
#loc68 = loc("<eval_with_key>.2":79:14) | |
#loc69 = loc("<eval_with_key>.2":80:14) | |
#loc70 = loc("<eval_with_key>.2":81:14) | |
#loc71 = loc("<eval_with_key>.2":82:16) | |
#loc72 = loc("<eval_with_key>.2":83:14) | |
#loc73 = loc("<eval_with_key>.2":84:14) | |
#loc74 = loc("<eval_with_key>.2":85:13) | |
#loc75 = loc("<eval_with_key>.2":87:10) | |
#loc76 = loc("<eval_with_key>.2":88:14) | |
#loc77 = loc("<eval_with_key>.2":90:14) | |
#loc78 = loc("<eval_with_key>.2":91:14) | |
#loc79 = loc("<eval_with_key>.2":92:14) | |
#loc80 = loc("<eval_with_key>.2":93:16) | |
#loc81 = loc("<eval_with_key>.2":94:14) | |
#loc82 = loc("<eval_with_key>.2":95:14) | |
#loc83 = loc("<eval_with_key>.2":96:15) | |
#loc84 = loc("<eval_with_key>.2":97:18) | |
#loc85 = loc("<eval_with_key>.2":98:12) | |
#loc86 = loc("<eval_with_key>.2":99:10) | |
#loc87 = loc("<eval_with_key>.2":100:12) | |
#loc88 = loc("<eval_with_key>.2":101:16) | |
#loc89 = loc("-":4394:15) | |
#loc90 = loc("-":6074:10) | |
#loc91 = loc("<eval_with_key>.2":102:13) | |
#loc92 = loc("<eval_with_key>.2":103:13) | |
#loc93 = loc("<eval_with_key>.2":104:12) | |
#loc94 = loc("<eval_with_key>.2":105:12) | |
#loc95 = loc("<eval_with_key>.2":106:10) | |
#loc96 = loc("<eval_with_key>.2":107:12) | |
#loc97 = loc("<eval_with_key>.2":108:14) | |
#loc98 = loc("<eval_with_key>.2":109:18) | |
#loc99 = loc("<eval_with_key>.2":110:12) | |
#loc100 = loc("<eval_with_key>.2":111:16) | |
#loc101 = loc("<eval_with_key>.2":112:15) | |
#loc102 = loc("<eval_with_key>.2":113:15) | |
#loc103 = loc("<eval_with_key>.2":114:18) | |
#loc104 = loc("<eval_with_key>.2":115:12) | |
#loc105 = loc("<eval_with_key>.2":116:11) | |
#loc106 = loc("<eval_with_key>.2":117:12) | |
#loc107 = loc("<eval_with_key>.2":118:14) | |
#loc108 = loc("<eval_with_key>.2":119:17) | |
#loc109 = loc("<eval_with_key>.2":120:18) | |
#loc110 = loc("<eval_with_key>.2":122:24) | |
#loc111 = loc("<eval_with_key>.2":123:18) | |
#loc112 = loc("<eval_with_key>.2":125:19) | |
#loc113 = loc("<eval_with_key>.2":127:14) | |
#loc114 = loc("<eval_with_key>.2":128:12) | |
#loc115 = loc("<eval_with_key>.2":129:14) | |
#loc116 = loc("<eval_with_key>.2":130:16) | |
#loc117 = loc("<eval_with_key>.2":131:14) | |
#loc118 = loc("<eval_with_key>.2":132:14) | |
#loc119 = loc("<eval_with_key>.2":134:10) | |
#loc120 = loc("<eval_with_key>.2":135:14) | |
#loc121 = loc("<eval_with_key>.2":137:14) | |
#loc122 = loc("<eval_with_key>.2":138:14) | |
#loc123 = loc("<eval_with_key>.2":139:12) | |
#loc124 = loc("<eval_with_key>.2":142:26) | |
#loc125 = loc("<eval_with_key>.2":147:10) | |
#loc126 = loc("<eval_with_key>.2":148:14) | |
#loc127 = loc("<eval_with_key>.2":150:14) | |
#loc128 = loc("<eval_with_key>.2":151:14) | |
#loc129 = loc("<eval_with_key>.2":152:11) | |
#loc130 = loc("<eval_with_key>.2":154:10) | |
#loc131 = loc("<eval_with_key>.2":155:14) | |
#loc132 = loc("<eval_with_key>.2":157:14) | |
#loc133 = loc("<eval_with_key>.2":158:14) | |
#loc134 = loc("<eval_with_key>.2":159:12) | |
#loc135 = loc("<eval_with_key>.2":162:26) | |
#loc136 = loc("<eval_with_key>.2":167:10) | |
#loc137 = loc("<eval_with_key>.2":168:14) | |
#loc138 = loc("<eval_with_key>.2":170:14) | |
#loc139 = loc("<eval_with_key>.2":171:14) | |
#loc140 = loc("<eval_with_key>.2":172:14) | |
#loc141 = loc("<eval_with_key>.2":173:16) | |
#loc142 = loc("<eval_with_key>.2":174:14) | |
#loc143 = loc("<eval_with_key>.2":175:14) | |
#loc144 = loc("<eval_with_key>.2":177:10) | |
#loc145 = loc("<eval_with_key>.2":178:14) | |
#loc146 = loc("<eval_with_key>.2":180:14) | |
#loc147 = loc("<eval_with_key>.2":181:14) | |
#loc148 = loc("<eval_with_key>.2":182:14) | |
#loc149 = loc("<eval_with_key>.2":183:16) | |
#loc150 = loc("<eval_with_key>.2":184:14) | |
#loc151 = loc("<eval_with_key>.2":185:14) | |
#loc152 = loc("<eval_with_key>.2":187:11) | |
#loc153 = loc("<eval_with_key>.2":188:14) | |
#loc154 = loc("<eval_with_key>.2":190:15) | |
#loc155 = loc("<eval_with_key>.2":191:14) | |
#loc156 = loc("<eval_with_key>.2":192:14) | |
#loc157 = loc("<eval_with_key>.2":193:16) | |
#loc158 = loc("<eval_with_key>.2":194:14) | |
#loc159 = loc("<eval_with_key>.2":195:14) | |
#loc160 = loc("<eval_with_key>.2":196:18) | |
#loc161 = loc("<eval_with_key>.2":197:12) | |
#loc162 = loc("<eval_with_key>.2":198:12) | |
#loc163 = loc("<eval_with_key>.2":199:18) | |
#loc164 = loc("<eval_with_key>.2":201:18) | |
#loc165 = loc("<eval_with_key>.2":203:11) | |
#loc166 = loc("<eval_with_key>.2":204:14) | |
#loc167 = loc("<eval_with_key>.2":206:15) | |
#loc168 = loc("<eval_with_key>.2":207:14) | |
#loc169 = loc("<eval_with_key>.2":208:14) | |
#loc170 = loc("<eval_with_key>.2":209:16) | |
#loc171 = loc("<eval_with_key>.2":210:14) | |
#loc172 = loc("<eval_with_key>.2":211:14) | |
#loc173 = loc("<eval_with_key>.2":212:15) | |
#loc174 = loc("<eval_with_key>.2":214:11) | |
#loc175 = loc("<eval_with_key>.2":215:14) | |
#loc176 = loc("<eval_with_key>.2":217:15) | |
#loc177 = loc("<eval_with_key>.2":218:14) | |
#loc178 = loc("<eval_with_key>.2":219:14) | |
#loc179 = loc("<eval_with_key>.2":220:17) | |
#loc180 = loc("<eval_with_key>.2":221:15) | |
#loc181 = loc("<eval_with_key>.2":222:14) | |
#loc182 = loc("<eval_with_key>.2":223:15) | |
#loc183 = loc("<eval_with_key>.2":224:18) | |
#loc184 = loc("<eval_with_key>.2":225:12) | |
#loc185 = loc("<eval_with_key>.2":226:12) | |
#loc186 = loc("<eval_with_key>.2":227:14) | |
#loc187 = loc("<eval_with_key>.2":228:16) | |
#loc188 = loc("<eval_with_key>.2":229:15) | |
#loc189 = loc("<eval_with_key>.2":230:15) | |
#loc190 = loc("<eval_with_key>.2":231:12) | |
#loc191 = loc("<eval_with_key>.2":232:12) | |
#loc192 = loc("<eval_with_key>.2":233:12) | |
#loc193 = loc("<eval_with_key>.2":234:12) | |
#loc194 = loc("<eval_with_key>.2":235:14) | |
#loc195 = loc("<eval_with_key>.2":236:18) | |
#loc196 = loc("<eval_with_key>.2":237:12) | |
#loc197 = loc("<eval_with_key>.2":238:16) | |
#loc198 = loc("<eval_with_key>.2":239:15) | |
#loc199 = loc("<eval_with_key>.2":240:15) | |
#loc200 = loc("<eval_with_key>.2":241:18) | |
#loc201 = loc("<eval_with_key>.2":242:12) | |
#loc202 = loc("<eval_with_key>.2":243:13) | |
#loc203 = loc("<eval_with_key>.2":244:12) | |
#loc204 = loc("<eval_with_key>.2":245:14) | |
#loc205 = loc("<eval_with_key>.2":246:17) | |
#loc206 = loc("<eval_with_key>.2":247:20) | |
#loc207 = loc("<eval_with_key>.2":249:24) | |
#loc208 = loc("<eval_with_key>.2":250:20) | |
#loc209 = loc("<eval_with_key>.2":251:17) | |
#loc210 = loc("<eval_with_key>.2":252:21) | |
#loc211 = loc("<eval_with_key>.2":254:14) | |
#loc212 = loc("<eval_with_key>.2":255:12) | |
#loc213 = loc("<eval_with_key>.2":256:14) | |
#loc214 = loc("<eval_with_key>.2":257:17) | |
#loc215 = loc("<eval_with_key>.2":258:15) | |
#loc216 = loc("<eval_with_key>.2":259:14) | |
#loc217 = loc("<eval_with_key>.2":261:11) | |
#loc218 = loc("<eval_with_key>.2":262:14) | |
#loc219 = loc("<eval_with_key>.2":264:15) | |
#loc220 = loc("<eval_with_key>.2":265:14) | |
#loc221 = loc("<eval_with_key>.2":266:13) | |
#loc222 = loc("<eval_with_key>.2":269:26) | |
#loc223 = loc("<eval_with_key>.2":274:11) | |
#loc224 = loc("<eval_with_key>.2":275:14) | |
#loc225 = loc("<eval_with_key>.2":277:15) | |
#loc226 = loc("<eval_with_key>.2":278:14) | |
#loc227 = loc("<eval_with_key>.2":279:13) | |
#loc228 = loc("<eval_with_key>.2":281:11) | |
#loc229 = loc("<eval_with_key>.2":282:14) | |
#loc230 = loc("<eval_with_key>.2":284:15) | |
#loc231 = loc("<eval_with_key>.2":285:14) | |
#loc232 = loc("<eval_with_key>.2":286:13) | |
#loc233 = loc("<eval_with_key>.2":289:26) | |
#loc234 = loc("<eval_with_key>.2":294:11) | |
#loc235 = loc("<eval_with_key>.2":295:14) | |
#loc236 = loc("<eval_with_key>.2":297:15) | |
#loc237 = loc("<eval_with_key>.2":298:14) | |
#loc238 = loc("<eval_with_key>.2":299:14) | |
#loc239 = loc("<eval_with_key>.2":300:17) | |
#loc240 = loc("<eval_with_key>.2":301:15) | |
#loc241 = loc("<eval_with_key>.2":302:14) | |
#loc242 = loc("<eval_with_key>.2":304:11) | |
#loc243 = loc("<eval_with_key>.2":305:14) | |
#loc244 = loc("<eval_with_key>.2":307:15) | |
#loc245 = loc("<eval_with_key>.2":308:14) | |
#loc246 = loc("<eval_with_key>.2":309:14) | |
#loc247 = loc("<eval_with_key>.2":310:17) | |
#loc248 = loc("<eval_with_key>.2":311:15) | |
#loc249 = loc("<eval_with_key>.2":312:14) | |
#loc250 = loc("<eval_with_key>.2":314:11) | |
#loc251 = loc("<eval_with_key>.2":315:14) | |
#loc252 = loc("<eval_with_key>.2":317:15) | |
#loc253 = loc("<eval_with_key>.2":318:14) | |
#loc254 = loc("<eval_with_key>.2":319:14) | |
#loc255 = loc("<eval_with_key>.2":320:17) | |
#loc256 = loc("<eval_with_key>.2":321:15) | |
#loc257 = loc("<eval_with_key>.2":322:14) | |
#loc258 = loc("<eval_with_key>.2":323:18) | |
#loc259 = loc("<eval_with_key>.2":324:12) | |
#loc260 = loc("<eval_with_key>.2":325:12) | |
#loc261 = loc("<eval_with_key>.2":326:18) | |
#loc262 = loc("<eval_with_key>.2":328:19) | |
#loc263 = loc("<eval_with_key>.2":330:11) | |
#loc264 = loc("<eval_with_key>.2":331:14) | |
#loc265 = loc("<eval_with_key>.2":333:15) | |
#loc266 = loc("<eval_with_key>.2":334:14) | |
#loc267 = loc("<eval_with_key>.2":335:14) | |
#loc268 = loc("<eval_with_key>.2":336:17) | |
#loc269 = loc("<eval_with_key>.2":337:15) | |
#loc270 = loc("<eval_with_key>.2":338:14) | |
#loc271 = loc("<eval_with_key>.2":339:15) | |
#loc272 = loc("<eval_with_key>.2":341:11) | |
#loc273 = loc("<eval_with_key>.2":342:14) | |
#loc274 = loc("<eval_with_key>.2":344:15) | |
#loc275 = loc("<eval_with_key>.2":345:14) | |
#loc276 = loc("<eval_with_key>.2":346:14) | |
#loc277 = loc("<eval_with_key>.2":347:17) | |
#loc278 = loc("<eval_with_key>.2":348:15) | |
#loc279 = loc("<eval_with_key>.2":349:14) | |
#loc280 = loc("<eval_with_key>.2":350:15) | |
#loc281 = loc("<eval_with_key>.2":351:18) | |
#loc282 = loc("<eval_with_key>.2":352:12) | |
#loc283 = loc("<eval_with_key>.2":353:13) | |
#loc284 = loc("<eval_with_key>.2":354:14) | |
#loc285 = loc("<eval_with_key>.2":355:16) | |
#loc286 = loc("<eval_with_key>.2":356:15) | |
#loc287 = loc("<eval_with_key>.2":357:15) | |
#loc288 = loc("<eval_with_key>.2":358:12) | |
#loc289 = loc("<eval_with_key>.2":359:13) | |
#loc290 = loc("<eval_with_key>.2":360:12) | |
#loc291 = loc("<eval_with_key>.2":361:13) | |
#loc292 = loc("<eval_with_key>.2":362:14) | |
#loc293 = loc("<eval_with_key>.2":363:19) | |
#loc294 = loc("<eval_with_key>.2":364:13) | |
#loc295 = loc("<eval_with_key>.2":365:16) | |
#loc296 = loc("<eval_with_key>.2":366:15) | |
#loc297 = loc("<eval_with_key>.2":367:15) | |
#loc298 = loc("<eval_with_key>.2":368:19) | |
#loc299 = loc("<eval_with_key>.2":369:12) | |
#loc300 = loc("<eval_with_key>.2":370:13) | |
#loc301 = loc("<eval_with_key>.2":371:13) | |
#loc302 = loc("<eval_with_key>.2":372:14) | |
#loc303 = loc("<eval_with_key>.2":373:17) | |
#loc304 = loc("<eval_with_key>.2":374:20) | |
#loc305 = loc("<eval_with_key>.2":376:24) | |
#loc306 = loc("<eval_with_key>.2":377:20) | |
#loc307 = loc("<eval_with_key>.2":378:17) | |
#loc308 = loc("<eval_with_key>.2":379:21) | |
#loc309 = loc("<eval_with_key>.2":381:14) | |
#loc310 = loc("<eval_with_key>.2":382:13) | |
#loc311 = loc("<eval_with_key>.2":383:14) | |
#loc312 = loc("<eval_with_key>.2":384:17) | |
#loc313 = loc("<eval_with_key>.2":385:15) | |
#loc314 = loc("<eval_with_key>.2":386:14) | |
#loc315 = loc("<eval_with_key>.2":388:11) | |
#loc316 = loc("<eval_with_key>.2":389:14) | |
#loc317 = loc("<eval_with_key>.2":391:15) | |
#loc318 = loc("<eval_with_key>.2":392:14) | |
#loc319 = loc("<eval_with_key>.2":393:13) | |
#loc320 = loc("<eval_with_key>.2":396:26) | |
#loc321 = loc("<eval_with_key>.2":401:11) | |
#loc322 = loc("<eval_with_key>.2":402:14) | |
#loc323 = loc("<eval_with_key>.2":404:15) | |
#loc324 = loc("<eval_with_key>.2":405:14) | |
#loc325 = loc("<eval_with_key>.2":406:13) | |
#loc326 = loc("<eval_with_key>.2":408:11) | |
#loc327 = loc("<eval_with_key>.2":409:14) | |
#loc328 = loc("<eval_with_key>.2":411:15) | |
#loc329 = loc("<eval_with_key>.2":412:14) | |
#loc330 = loc("<eval_with_key>.2":413:13) | |
#loc331 = loc("<eval_with_key>.2":416:26) | |
#loc332 = loc("<eval_with_key>.2":421:11) | |
#loc333 = loc("<eval_with_key>.2":422:14) | |
#loc334 = loc("<eval_with_key>.2":424:15) | |
#loc335 = loc("<eval_with_key>.2":425:14) | |
#loc336 = loc("<eval_with_key>.2":426:14) | |
#loc337 = loc("<eval_with_key>.2":427:17) | |
#loc338 = loc("<eval_with_key>.2":428:15) | |
#loc339 = loc("<eval_with_key>.2":429:14) | |
#loc340 = loc("<eval_with_key>.2":431:11) | |
#loc341 = loc("<eval_with_key>.2":432:14) | |
#loc342 = loc("<eval_with_key>.2":434:15) | |
#loc343 = loc("<eval_with_key>.2":435:14) | |
#loc344 = loc("<eval_with_key>.2":436:14) | |
#loc345 = loc("<eval_with_key>.2":437:17) | |
#loc346 = loc("<eval_with_key>.2":438:15) | |
#loc347 = loc("<eval_with_key>.2":439:14) | |
#loc348 = loc("<eval_with_key>.2":441:11) | |
#loc349 = loc("<eval_with_key>.2":442:14) | |
#loc350 = loc("<eval_with_key>.2":444:15) | |
#loc351 = loc("<eval_with_key>.2":445:14) | |
#loc352 = loc("<eval_with_key>.2":446:15) | |
#loc353 = loc("<eval_with_key>.2":447:17) | |
#loc354 = loc("<eval_with_key>.2":448:15) | |
#loc355 = loc("<eval_with_key>.2":449:15) | |
#loc356 = loc("<eval_with_key>.2":450:19) | |
#loc357 = loc("<eval_with_key>.2":451:13) | |
#loc358 = loc("<eval_with_key>.2":452:12) | |
#loc359 = loc("<eval_with_key>.2":453:19) | |
#loc360 = loc("<eval_with_key>.2":455:19) | |
#loc361 = loc("<eval_with_key>.2":457:11) | |
#loc362 = loc("<eval_with_key>.2":458:15) | |
#loc363 = loc("<eval_with_key>.2":460:15) | |
#loc364 = loc("<eval_with_key>.2":461:15) | |
#loc365 = loc("<eval_with_key>.2":462:15) | |
#loc366 = loc("<eval_with_key>.2":463:17) | |
#loc367 = loc("<eval_with_key>.2":464:15) | |
#loc368 = loc("<eval_with_key>.2":465:15) | |
#loc369 = loc("<eval_with_key>.2":466:15) | |
#loc370 = loc("<eval_with_key>.2":468:11) | |
#loc371 = loc("<eval_with_key>.2":469:15) | |
#loc372 = loc("<eval_with_key>.2":471:15) | |
#loc373 = loc("<eval_with_key>.2":472:15) | |
#loc374 = loc("<eval_with_key>.2":473:15) | |
#loc375 = loc("<eval_with_key>.2":474:17) | |
#loc376 = loc("<eval_with_key>.2":475:15) | |
#loc377 = loc("<eval_with_key>.2":476:15) | |
#loc378 = loc("<eval_with_key>.2":477:15) | |
#loc379 = loc("<eval_with_key>.2":478:19) | |
#loc380 = loc("<eval_with_key>.2":479:13) | |
#loc381 = loc("<eval_with_key>.2":480:13) | |
#loc382 = loc("<eval_with_key>.2":481:14) | |
#loc383 = loc("<eval_with_key>.2":482:16) | |
#loc384 = loc("<eval_with_key>.2":483:15) | |
#loc385 = loc("<eval_with_key>.2":484:15) | |
#loc386 = loc("<eval_with_key>.2":485:13) | |
#loc387 = loc("<eval_with_key>.2":486:13) | |
#loc388 = loc("<eval_with_key>.2":487:12) | |
#loc389 = loc("<eval_with_key>.2":488:13) | |
#loc390 = loc("<eval_with_key>.2":489:14) | |
#loc391 = loc("<eval_with_key>.2":490:19) | |
#loc392 = loc("<eval_with_key>.2":491:13) | |
#loc393 = loc("<eval_with_key>.2":492:16) | |
#loc394 = loc("<eval_with_key>.2":493:15) | |
#loc395 = loc("<eval_with_key>.2":494:15) | |
#loc396 = loc("<eval_with_key>.2":495:19) | |
#loc397 = loc("<eval_with_key>.2":496:13) | |
#loc398 = loc("<eval_with_key>.2":497:13) | |
#loc399 = loc("<eval_with_key>.2":498:13) | |
#loc400 = loc("<eval_with_key>.2":499:15) | |
#loc401 = loc("<eval_with_key>.2":500:17) | |
#loc402 = loc("<eval_with_key>.2":501:20) | |
#loc403 = loc("<eval_with_key>.2":503:24) | |
#loc404 = loc("<eval_with_key>.2":504:20) | |
#loc405 = loc("<eval_with_key>.2":505:17) | |
#loc406 = loc("<eval_with_key>.2":506:21) | |
#loc407 = loc("<eval_with_key>.2":508:15) | |
#loc408 = loc("<eval_with_key>.2":509:13) | |
#loc409 = loc("<eval_with_key>.2":510:15) | |
#loc410 = loc("<eval_with_key>.2":511:17) | |
#loc411 = loc("<eval_with_key>.2":512:15) | |
#loc412 = loc("<eval_with_key>.2":513:15) | |
#loc413 = loc("<eval_with_key>.2":515:11) | |
#loc414 = loc("<eval_with_key>.2":516:15) | |
#loc415 = loc("<eval_with_key>.2":518:15) | |
#loc416 = loc("<eval_with_key>.2":519:15) | |
#loc417 = loc("<eval_with_key>.2":520:13) | |
#loc418 = loc("<eval_with_key>.2":523:26) | |
#loc419 = loc("<eval_with_key>.2":528:11) | |
#loc420 = loc("<eval_with_key>.2":529:15) | |
#loc421 = loc("<eval_with_key>.2":531:15) | |
#loc422 = loc("<eval_with_key>.2":532:15) | |
#loc423 = loc("<eval_with_key>.2":533:13) | |
#loc424 = loc("<eval_with_key>.2":535:11) | |
#loc425 = loc("<eval_with_key>.2":536:15) | |
#loc426 = loc("<eval_with_key>.2":538:15) | |
#loc427 = loc("<eval_with_key>.2":539:15) | |
#loc428 = loc("<eval_with_key>.2":540:13) | |
#loc429 = loc("<eval_with_key>.2":543:26) | |
#loc430 = loc("<eval_with_key>.2":548:11) | |
#loc431 = loc("<eval_with_key>.2":549:15) | |
#loc432 = loc("<eval_with_key>.2":551:15) | |
#loc433 = loc("<eval_with_key>.2":552:15) | |
#loc434 = loc("<eval_with_key>.2":553:15) | |
#loc435 = loc("<eval_with_key>.2":554:17) | |
#loc436 = loc("<eval_with_key>.2":555:15) | |
#loc437 = loc("<eval_with_key>.2":556:15) | |
#loc438 = loc("<eval_with_key>.2":558:11) | |
#loc439 = loc("<eval_with_key>.2":559:15) | |
#loc440 = loc("<eval_with_key>.2":561:15) | |
#loc441 = loc("<eval_with_key>.2":562:15) | |
#loc442 = loc("<eval_with_key>.2":563:15) | |
#loc443 = loc("<eval_with_key>.2":564:17) | |
#loc444 = loc("<eval_with_key>.2":565:15) | |
#loc445 = loc("<eval_with_key>.2":566:15) | |
#loc446 = loc("<eval_with_key>.2":568:11) | |
#loc447 = loc("<eval_with_key>.2":569:15) | |
#loc448 = loc("<eval_with_key>.2":571:15) | |
#loc449 = loc("<eval_with_key>.2":572:15) | |
#loc450 = loc("<eval_with_key>.2":573:15) | |
#loc451 = loc("<eval_with_key>.2":574:17) | |
#loc452 = loc("<eval_with_key>.2":575:15) | |
#loc453 = loc("<eval_with_key>.2":576:15) | |
#loc454 = loc("<eval_with_key>.2":577:19) | |
#loc455 = loc("<eval_with_key>.2":578:13) | |
#loc456 = loc("<eval_with_key>.2":579:13) | |
#loc457 = loc("<eval_with_key>.2":580:19) | |
#loc458 = loc("<eval_with_key>.2":582:19) | |
#loc459 = loc("<eval_with_key>.2":584:11) | |
#loc460 = loc("<eval_with_key>.2":585:15) | |
#loc461 = loc("<eval_with_key>.2":587:15) | |
#loc462 = loc("<eval_with_key>.2":588:15) | |
#loc463 = loc("<eval_with_key>.2":589:15) | |
#loc464 = loc("<eval_with_key>.2":590:17) | |
#loc465 = loc("<eval_with_key>.2":591:15) | |
#loc466 = loc("<eval_with_key>.2":592:15) | |
#loc467 = loc("<eval_with_key>.2":593:15) | |
#loc468 = loc("<eval_with_key>.2":595:11) | |
#loc469 = loc("<eval_with_key>.2":596:15) | |
#loc470 = loc("<eval_with_key>.2":598:15) | |
#loc471 = loc("<eval_with_key>.2":599:15) | |
#loc472 = loc("<eval_with_key>.2":600:15) | |
#loc473 = loc("<eval_with_key>.2":601:17) | |
#loc474 = loc("<eval_with_key>.2":602:15) | |
#loc475 = loc("<eval_with_key>.2":603:15) | |
#loc476 = loc("<eval_with_key>.2":604:15) | |
#loc477 = loc("<eval_with_key>.2":605:19) | |
#loc478 = loc("<eval_with_key>.2":606:13) | |
#loc479 = loc("<eval_with_key>.2":607:13) | |
#loc480 = loc("<eval_with_key>.2":608:14) | |
#loc481 = loc("<eval_with_key>.2":609:16) | |
#loc482 = loc("<eval_with_key>.2":610:15) | |
#loc483 = loc("<eval_with_key>.2":611:15) | |
#loc484 = loc("<eval_with_key>.2":612:13) | |
#loc485 = loc("<eval_with_key>.2":613:13) | |
#loc486 = loc("<eval_with_key>.2":614:12) | |
#loc487 = loc("<eval_with_key>.2":615:13) | |
#loc488 = loc("<eval_with_key>.2":616:14) | |
#loc489 = loc("<eval_with_key>.2":617:19) | |
#loc490 = loc("<eval_with_key>.2":618:13) | |
#loc491 = loc("<eval_with_key>.2":619:17) | |
#loc492 = loc("<eval_with_key>.2":620:15) | |
#loc493 = loc("<eval_with_key>.2":621:15) | |
#loc494 = loc("<eval_with_key>.2":622:19) | |
#loc495 = loc("<eval_with_key>.2":623:13) | |
#loc496 = loc("<eval_with_key>.2":624:13) | |
#loc497 = loc("<eval_with_key>.2":625:13) | |
#loc498 = loc("<eval_with_key>.2":626:15) | |
#loc499 = loc("<eval_with_key>.2":627:17) | |
#loc500 = loc("<eval_with_key>.2":628:20) | |
#loc501 = loc("<eval_with_key>.2":630:24) | |
#loc502 = loc("<eval_with_key>.2":631:20) | |
#loc503 = loc("<eval_with_key>.2":632:17) | |
#loc504 = loc("<eval_with_key>.2":633:21) | |
#loc505 = loc("<eval_with_key>.2":635:15) | |
#loc506 = loc("<eval_with_key>.2":636:13) | |
#loc507 = loc("<eval_with_key>.2":637:15) | |
#loc508 = loc("<eval_with_key>.2":638:17) | |
#loc509 = loc("<eval_with_key>.2":639:15) | |
#loc510 = loc("<eval_with_key>.2":640:15) | |
#loc511 = loc("<eval_with_key>.2":642:11) | |
#loc512 = loc("<eval_with_key>.2":643:15) | |
#loc513 = loc("<eval_with_key>.2":645:15) | |
#loc514 = loc("<eval_with_key>.2":646:15) | |
#loc515 = loc("<eval_with_key>.2":647:13) | |
#loc516 = loc("<eval_with_key>.2":650:27) | |
#loc517 = loc("<eval_with_key>.2":655:11) | |
#loc518 = loc("<eval_with_key>.2":656:15) | |
#loc519 = loc("<eval_with_key>.2":658:15) | |
#loc520 = loc("<eval_with_key>.2":659:15) | |
#loc521 = loc("<eval_with_key>.2":660:13) | |
#loc522 = loc("<eval_with_key>.2":662:11) | |
#loc523 = loc("<eval_with_key>.2":663:15) | |
#loc524 = loc("<eval_with_key>.2":665:15) | |
#loc525 = loc("<eval_with_key>.2":666:15) | |
#loc526 = loc("<eval_with_key>.2":667:13) | |
#loc527 = loc("<eval_with_key>.2":670:27) | |
#loc528 = loc("<eval_with_key>.2":675:11) | |
#loc529 = loc("<eval_with_key>.2":676:15) | |
#loc530 = loc("<eval_with_key>.2":678:15) | |
#loc531 = loc("<eval_with_key>.2":679:15) | |
#loc532 = loc("<eval_with_key>.2":680:15) | |
#loc533 = loc("<eval_with_key>.2":681:17) | |
#loc534 = loc("<eval_with_key>.2":682:15) | |
#loc535 = loc("<eval_with_key>.2":683:15) | |
#loc536 = loc("<eval_with_key>.2":685:11) | |
#loc537 = loc("<eval_with_key>.2":686:15) | |
#loc538 = loc("<eval_with_key>.2":688:15) | |
#loc539 = loc("<eval_with_key>.2":689:15) | |
#loc540 = loc("<eval_with_key>.2":690:15) | |
#loc541 = loc("<eval_with_key>.2":691:17) | |
#loc542 = loc("<eval_with_key>.2":692:15) | |
#loc543 = loc("<eval_with_key>.2":693:15) | |
#loc544 = loc("<eval_with_key>.2":695:11) | |
#loc545 = loc("<eval_with_key>.2":696:15) | |
#loc546 = loc("<eval_with_key>.2":698:15) | |
#loc547 = loc("<eval_with_key>.2":699:15) | |
#loc548 = loc("<eval_with_key>.2":700:15) | |
#loc549 = loc("<eval_with_key>.2":701:17) | |
#loc550 = loc("<eval_with_key>.2":702:15) | |
#loc551 = loc("<eval_with_key>.2":703:15) | |
#loc552 = loc("<eval_with_key>.2":704:19) | |
#loc553 = loc("<eval_with_key>.2":705:13) | |
#loc554 = loc("<eval_with_key>.2":706:13) | |
#loc555 = loc("<eval_with_key>.2":707:19) | |
#loc556 = loc("<eval_with_key>.2":709:19) | |
#loc557 = loc("<eval_with_key>.2":711:11) | |
#loc558 = loc("<eval_with_key>.2":712:15) | |
#loc559 = loc("<eval_with_key>.2":714:15) | |
#loc560 = loc("<eval_with_key>.2":715:15) | |
#loc561 = loc("<eval_with_key>.2":716:15) | |
#loc562 = loc("<eval_with_key>.2":717:17) | |
#loc563 = loc("<eval_with_key>.2":718:15) | |
#loc564 = loc("<eval_with_key>.2":719:15) | |
#loc565 = loc("<eval_with_key>.2":720:16) | |
#loc566 = loc("<eval_with_key>.2":722:11) | |
#loc567 = loc("<eval_with_key>.2":723:15) | |
#loc568 = loc("<eval_with_key>.2":725:15) | |
#loc569 = loc("<eval_with_key>.2":726:15) | |
#loc570 = loc("<eval_with_key>.2":727:15) | |
#loc571 = loc("<eval_with_key>.2":728:17) | |
#loc572 = loc("<eval_with_key>.2":729:15) | |
#loc573 = loc("<eval_with_key>.2":730:15) | |
#loc574 = loc("<eval_with_key>.2":731:16) | |
#loc575 = loc("<eval_with_key>.2":732:19) | |
#loc576 = loc("<eval_with_key>.2":733:13) | |
#loc577 = loc("<eval_with_key>.2":734:13) | |
#loc578 = loc("<eval_with_key>.2":735:15) | |
#loc579 = loc("<eval_with_key>.2":736:17) | |
#loc580 = loc("<eval_with_key>.2":737:16) | |
#loc581 = loc("<eval_with_key>.2":738:16) | |
#loc582 = loc("<eval_with_key>.2":739:13) | |
#loc583 = loc("<eval_with_key>.2":740:13) | |
#loc584 = loc("<eval_with_key>.2":741:12) | |
#loc585 = loc("<eval_with_key>.2":742:13) | |
#loc586 = loc("<eval_with_key>.2":743:15) | |
#loc587 = loc("<eval_with_key>.2":744:19) | |
#loc588 = loc("<eval_with_key>.2":745:13) | |
#loc589 = loc("<eval_with_key>.2":746:17) | |
#loc590 = loc("<eval_with_key>.2":747:16) | |
#loc591 = loc("<eval_with_key>.2":748:16) | |
#loc592 = loc("<eval_with_key>.2":749:19) | |
#loc593 = loc("<eval_with_key>.2":750:13) | |
#loc594 = loc("<eval_with_key>.2":751:13) | |
#loc595 = loc("<eval_with_key>.2":752:13) | |
#loc596 = loc("<eval_with_key>.2":753:15) | |
#loc597 = loc("<eval_with_key>.2":754:17) | |
#loc598 = loc("<eval_with_key>.2":755:20) | |
#loc599 = loc("<eval_with_key>.2":757:24) | |
#loc600 = loc("<eval_with_key>.2":758:20) | |
#loc601 = loc("<eval_with_key>.2":759:17) | |
#loc602 = loc("<eval_with_key>.2":760:21) | |
#loc603 = loc("<eval_with_key>.2":762:15) | |
#loc604 = loc("<eval_with_key>.2":763:13) | |
#loc605 = loc("<eval_with_key>.2":764:15) | |
#loc606 = loc("<eval_with_key>.2":765:17) | |
#loc607 = loc("<eval_with_key>.2":766:15) | |
#loc608 = loc("<eval_with_key>.2":767:15) | |
#loc609 = loc("<eval_with_key>.2":769:11) | |
#loc610 = loc("<eval_with_key>.2":770:15) | |
#loc611 = loc("<eval_with_key>.2":772:15) | |
#loc612 = loc("<eval_with_key>.2":773:15) | |
#loc613 = loc("<eval_with_key>.2":774:13) | |
#loc614 = loc("<eval_with_key>.2":777:27) | |
#loc615 = loc("<eval_with_key>.2":782:11) | |
#loc616 = loc("<eval_with_key>.2":783:15) | |
#loc617 = loc("<eval_with_key>.2":785:15) | |
#loc618 = loc("<eval_with_key>.2":786:15) | |
#loc619 = loc("<eval_with_key>.2":787:13) | |
#loc620 = loc("<eval_with_key>.2":789:11) | |
#loc621 = loc("<eval_with_key>.2":790:15) | |
#loc622 = loc("<eval_with_key>.2":792:15) | |
#loc623 = loc("<eval_with_key>.2":793:15) | |
#loc624 = loc("<eval_with_key>.2":794:13) | |
#loc625 = loc("<eval_with_key>.2":797:27) | |
#loc626 = loc("<eval_with_key>.2":802:11) | |
#loc627 = loc("<eval_with_key>.2":803:15) | |
#loc628 = loc("<eval_with_key>.2":805:15) | |
#loc629 = loc("<eval_with_key>.2":806:15) | |
#loc630 = loc("<eval_with_key>.2":807:15) | |
#loc631 = loc("<eval_with_key>.2":808:17) | |
#loc632 = loc("<eval_with_key>.2":809:15) | |
#loc633 = loc("<eval_with_key>.2":810:15) | |
#loc634 = loc("<eval_with_key>.2":812:11) | |
#loc635 = loc("<eval_with_key>.2":813:15) | |
#loc636 = loc("<eval_with_key>.2":815:15) | |
#loc637 = loc("<eval_with_key>.2":816:15) | |
#loc638 = loc("<eval_with_key>.2":817:15) | |
#loc639 = loc("<eval_with_key>.2":818:17) | |
#loc640 = loc("<eval_with_key>.2":819:15) | |
#loc641 = loc("<eval_with_key>.2":820:15) | |
#loc642 = loc("<eval_with_key>.2":822:11) | |
#loc643 = loc("<eval_with_key>.2":823:15) | |
#loc644 = loc("<eval_with_key>.2":825:15) | |
#loc645 = loc("<eval_with_key>.2":826:15) | |
#loc646 = loc("<eval_with_key>.2":827:15) | |
#loc647 = loc("<eval_with_key>.2":828:17) | |
#loc648 = loc("<eval_with_key>.2":829:15) | |
#loc649 = loc("<eval_with_key>.2":830:15) | |
#loc650 = loc("<eval_with_key>.2":831:19) | |
#loc651 = loc("<eval_with_key>.2":832:13) | |
#loc652 = loc("<eval_with_key>.2":833:13) | |
#loc653 = loc("<eval_with_key>.2":834:19) | |
#loc654 = loc("<eval_with_key>.2":836:19) | |
#loc655 = loc("<eval_with_key>.2":838:11) | |
#loc656 = loc("<eval_with_key>.2":839:15) | |
#loc657 = loc("<eval_with_key>.2":841:15) | |
#loc658 = loc("<eval_with_key>.2":842:15) | |
#loc659 = loc("<eval_with_key>.2":843:15) | |
#loc660 = loc("<eval_with_key>.2":844:17) | |
#loc661 = loc("<eval_with_key>.2":845:15) | |
#loc662 = loc("<eval_with_key>.2":846:15) | |
#loc663 = loc("<eval_with_key>.2":847:16) | |
#loc664 = loc("<eval_with_key>.2":849:11) | |
#loc665 = loc("<eval_with_key>.2":850:15) | |
#loc666 = loc("<eval_with_key>.2":852:15) | |
#loc667 = loc("<eval_with_key>.2":853:15) | |
#loc668 = loc("<eval_with_key>.2":854:15) | |
#loc669 = loc("<eval_with_key>.2":855:17) | |
#loc670 = loc("<eval_with_key>.2":856:15) | |
#loc671 = loc("<eval_with_key>.2":857:15) | |
#loc672 = loc("<eval_with_key>.2":858:16) | |
#loc673 = loc("<eval_with_key>.2":859:19) | |
#loc674 = loc("<eval_with_key>.2":860:13) | |
#loc675 = loc("<eval_with_key>.2":861:13) | |
#loc676 = loc("<eval_with_key>.2":862:15) | |
#loc677 = loc("<eval_with_key>.2":863:17) | |
#loc678 = loc("<eval_with_key>.2":864:16) | |
#loc679 = loc("<eval_with_key>.2":865:16) | |
#loc680 = loc("<eval_with_key>.2":866:13) | |
#loc681 = loc("<eval_with_key>.2":867:13) | |
#loc682 = loc("<eval_with_key>.2":868:12) | |
#loc683 = loc("<eval_with_key>.2":869:13) | |
#loc684 = loc("<eval_with_key>.2":870:15) | |
#loc685 = loc("<eval_with_key>.2":871:19) | |
#loc686 = loc("<eval_with_key>.2":872:13) | |
#loc687 = loc("<eval_with_key>.2":873:17) | |
#loc688 = loc("<eval_with_key>.2":874:16) | |
#loc689 = loc("<eval_with_key>.2":875:16) | |
#loc690 = loc("<eval_with_key>.2":876:19) | |
#loc691 = loc("<eval_with_key>.2":877:13) | |
#loc692 = loc("<eval_with_key>.2":878:13) | |
#loc693 = loc("<eval_with_key>.2":879:13) | |
#loc694 = loc("<eval_with_key>.2":880:15) | |
#loc695 = loc("<eval_with_key>.2":881:17) | |
#loc696 = loc("<eval_with_key>.2":882:20) | |
#loc697 = loc("<eval_with_key>.2":884:24) | |
#loc698 = loc("<eval_with_key>.2":885:20) | |
#loc699 = loc("<eval_with_key>.2":886:17) | |
#loc700 = loc("<eval_with_key>.2":887:21) | |
#loc701 = loc("<eval_with_key>.2":889:15) | |
#loc702 = loc("<eval_with_key>.2":890:13) | |
#loc703 = loc("<eval_with_key>.2":891:15) | |
#loc704 = loc("<eval_with_key>.2":892:17) | |
#loc705 = loc("<eval_with_key>.2":893:15) | |
#loc706 = loc("<eval_with_key>.2":894:15) | |
#loc707 = loc("<eval_with_key>.2":896:11) | |
#loc708 = loc("<eval_with_key>.2":897:15) | |
#loc709 = loc("<eval_with_key>.2":899:15) | |
#loc710 = loc("<eval_with_key>.2":900:15) | |
#loc711 = loc("<eval_with_key>.2":901:13) | |
#loc712 = loc("<eval_with_key>.2":904:27) | |
#loc713 = loc("<eval_with_key>.2":909:11) | |
#loc714 = loc("<eval_with_key>.2":910:15) | |
#loc715 = loc("<eval_with_key>.2":912:15) | |
#loc716 = loc("<eval_with_key>.2":913:15) | |
#loc717 = loc("<eval_with_key>.2":914:13) | |
#loc718 = loc("<eval_with_key>.2":916:11) | |
#loc719 = loc("<eval_with_key>.2":917:15) | |
#loc720 = loc("<eval_with_key>.2":919:15) | |
#loc721 = loc("<eval_with_key>.2":920:15) | |
#loc722 = loc("<eval_with_key>.2":921:13) | |
#loc723 = loc("<eval_with_key>.2":924:27) | |
#loc724 = loc("<eval_with_key>.2":929:11) | |
#loc725 = loc("<eval_with_key>.2":930:15) | |
#loc726 = loc("<eval_with_key>.2":932:15) | |
#loc727 = loc("<eval_with_key>.2":933:15) | |
#loc728 = loc("<eval_with_key>.2":934:15) | |
#loc729 = loc("<eval_with_key>.2":935:17) | |
#loc730 = loc("<eval_with_key>.2":936:15) | |
#loc731 = loc("<eval_with_key>.2":937:15) | |
#loc732 = loc("<eval_with_key>.2":939:11) | |
#loc733 = loc("<eval_with_key>.2":940:15) | |
#loc734 = loc("<eval_with_key>.2":942:15) | |
#loc735 = loc("<eval_with_key>.2":943:15) | |
#loc736 = loc("<eval_with_key>.2":944:15) | |
#loc737 = loc("<eval_with_key>.2":945:17) | |
#loc738 = loc("<eval_with_key>.2":946:15) | |
#loc739 = loc("<eval_with_key>.2":947:15) | |
#loc740 = loc("<eval_with_key>.2":949:11) | |
#loc741 = loc("<eval_with_key>.2":950:15) | |
#loc742 = loc("<eval_with_key>.2":952:15) | |
#loc743 = loc("<eval_with_key>.2":953:15) | |
#loc744 = loc("<eval_with_key>.2":954:15) | |
#loc745 = loc("<eval_with_key>.2":955:17) | |
#loc746 = loc("<eval_with_key>.2":956:15) | |
#loc747 = loc("<eval_with_key>.2":957:15) | |
#loc748 = loc("<eval_with_key>.2":958:19) | |
#loc749 = loc("<eval_with_key>.2":959:13) | |
#loc750 = loc("<eval_with_key>.2":960:13) | |
#loc751 = loc("<eval_with_key>.2":961:19) | |
#loc752 = loc("<eval_with_key>.2":963:19) | |
#loc753 = loc("<eval_with_key>.2":965:11) | |
#loc754 = loc("<eval_with_key>.2":966:15) | |
#loc755 = loc("<eval_with_key>.2":968:15) | |
#loc756 = loc("<eval_with_key>.2":969:15) | |
#loc757 = loc("<eval_with_key>.2":970:15) | |
#loc758 = loc("<eval_with_key>.2":971:17) | |
#loc759 = loc("<eval_with_key>.2":972:15) | |
#loc760 = loc("<eval_with_key>.2":973:15) | |
#loc761 = loc("<eval_with_key>.2":974:16) | |
#loc762 = loc("<eval_with_key>.2":976:11) | |
#loc763 = loc("<eval_with_key>.2":977:15) | |
#loc764 = loc("<eval_with_key>.2":979:15) | |
#loc765 = loc("<eval_with_key>.2":980:15) | |
#loc766 = loc("<eval_with_key>.2":981:15) | |
#loc767 = loc("<eval_with_key>.2":982:17) | |
#loc768 = loc("<eval_with_key>.2":983:15) | |
#loc769 = loc("<eval_with_key>.2":984:15) | |
#loc770 = loc("<eval_with_key>.2":985:16) | |
#loc771 = loc("<eval_with_key>.2":986:19) | |
#loc772 = loc("<eval_with_key>.2":987:13) | |
#loc773 = loc("<eval_with_key>.2":988:13) | |
#loc774 = loc("<eval_with_key>.2":989:15) | |
#loc775 = loc("<eval_with_key>.2":990:17) | |
#loc776 = loc("<eval_with_key>.2":991:16) | |
#loc777 = loc("<eval_with_key>.2":992:16) | |
#loc778 = loc("<eval_with_key>.2":993:13) | |
#loc779 = loc("<eval_with_key>.2":994:13) | |
#loc780 = loc("<eval_with_key>.2":995:12) | |
#loc781 = loc("<eval_with_key>.2":996:13) | |
#loc782 = loc("<eval_with_key>.2":997:15) | |
#loc783 = loc("<eval_with_key>.2":998:19) | |
#loc784 = loc("<eval_with_key>.2":999:13) | |
#loc785 = loc("<eval_with_key>.2":1000:17) | |
#loc786 = loc("<eval_with_key>.2":1001:16) | |
#loc787 = loc("<eval_with_key>.2":1002:16) | |
#loc788 = loc("<eval_with_key>.2":1003:19) | |
#loc789 = loc("<eval_with_key>.2":1004:13) | |
#loc790 = loc("<eval_with_key>.2":1005:13) | |
#loc791 = loc("<eval_with_key>.2":1006:13) | |
#loc792 = loc("<eval_with_key>.2":1007:15) | |
#loc793 = loc("<eval_with_key>.2":1008:17) | |
#loc794 = loc("<eval_with_key>.2":1009:20) | |
#loc795 = loc("<eval_with_key>.2":1011:24) | |
#loc796 = loc("<eval_with_key>.2":1012:20) | |
#loc797 = loc("<eval_with_key>.2":1013:17) | |
#loc798 = loc("<eval_with_key>.2":1014:21) | |
#loc799 = loc("<eval_with_key>.2":1016:15) | |
#loc800 = loc("<eval_with_key>.2":1017:13) | |
#loc801 = loc("<eval_with_key>.2":1018:15) | |
#loc802 = loc("<eval_with_key>.2":1019:17) | |
#loc803 = loc("<eval_with_key>.2":1020:15) | |
#loc804 = loc("<eval_with_key>.2":1021:15) | |
#loc805 = loc("<eval_with_key>.2":1023:11) | |
#loc806 = loc("<eval_with_key>.2":1024:15) | |
#loc807 = loc("<eval_with_key>.2":1026:15) | |
#loc808 = loc("<eval_with_key>.2":1027:15) | |
#loc809 = loc("<eval_with_key>.2":1028:13) | |
#loc810 = loc("<eval_with_key>.2":1031:27) | |
#loc811 = loc("<eval_with_key>.2":1036:11) | |
#loc812 = loc("<eval_with_key>.2":1037:15) | |
#loc813 = loc("<eval_with_key>.2":1039:15) | |
#loc814 = loc("<eval_with_key>.2":1040:15) | |
#loc815 = loc("<eval_with_key>.2":1041:13) | |
#loc816 = loc("<eval_with_key>.2":1043:11) | |
#loc817 = loc("<eval_with_key>.2":1044:15) | |
#loc818 = loc("<eval_with_key>.2":1046:15) | |
#loc819 = loc("<eval_with_key>.2":1047:15) | |
#loc820 = loc("<eval_with_key>.2":1048:13) | |
#loc821 = loc("<eval_with_key>.2":1051:27) | |
#loc822 = loc("<eval_with_key>.2":1056:11) | |
#loc823 = loc("<eval_with_key>.2":1057:15) | |
#loc824 = loc("<eval_with_key>.2":1059:15) | |
#loc825 = loc("<eval_with_key>.2":1060:15) | |
#loc826 = loc("<eval_with_key>.2":1061:15) | |
#loc827 = loc("<eval_with_key>.2":1062:17) | |
#loc828 = loc("<eval_with_key>.2":1063:15) | |
#loc829 = loc("<eval_with_key>.2":1064:15) | |
#loc830 = loc("<eval_with_key>.2":1066:11) | |
#loc831 = loc("<eval_with_key>.2":1067:15) | |
#loc832 = loc("<eval_with_key>.2":1069:15) | |
#loc833 = loc("<eval_with_key>.2":1070:15) | |
#loc834 = loc("<eval_with_key>.2":1071:15) | |
#loc835 = loc("<eval_with_key>.2":1072:17) | |
#loc836 = loc("<eval_with_key>.2":1073:15) | |
#loc837 = loc("<eval_with_key>.2":1074:15) | |
#loc838 = loc("<eval_with_key>.2":1076:11) | |
#loc839 = loc("<eval_with_key>.2":1077:15) | |
#loc840 = loc("<eval_with_key>.2":1079:15) | |
#loc841 = loc("<eval_with_key>.2":1080:15) | |
#loc842 = loc("<eval_with_key>.2":1081:15) | |
#loc843 = loc("<eval_with_key>.2":1082:17) | |
#loc844 = loc("<eval_with_key>.2":1083:15) | |
#loc845 = loc("<eval_with_key>.2":1084:15) | |
#loc846 = loc("<eval_with_key>.2":1085:19) | |
#loc847 = loc("<eval_with_key>.2":1086:13) | |
#loc848 = loc("<eval_with_key>.2":1087:13) | |
#loc849 = loc("<eval_with_key>.2":1088:19) | |
#loc850 = loc("<eval_with_key>.2":1090:19) | |
#loc851 = loc("<eval_with_key>.2":1092:11) | |
#loc852 = loc("<eval_with_key>.2":1093:15) | |
#loc853 = loc("<eval_with_key>.2":1095:15) | |
#loc854 = loc("<eval_with_key>.2":1096:15) | |
#loc855 = loc("<eval_with_key>.2":1097:15) | |
#loc856 = loc("<eval_with_key>.2":1098:17) | |
#loc857 = loc("<eval_with_key>.2":1099:15) | |
#loc858 = loc("<eval_with_key>.2":1100:15) | |
#loc859 = loc("<eval_with_key>.2":1101:16) | |
#loc860 = loc("<eval_with_key>.2":1103:11) | |
#loc861 = loc("<eval_with_key>.2":1104:15) | |
#loc862 = loc("<eval_with_key>.2":1106:15) | |
#loc863 = loc("<eval_with_key>.2":1107:15) | |
#loc864 = loc("<eval_with_key>.2":1108:15) | |
#loc865 = loc("<eval_with_key>.2":1109:17) | |
#loc866 = loc("<eval_with_key>.2":1110:15) | |
#loc867 = loc("<eval_with_key>.2":1111:15) | |
#loc868 = loc("<eval_with_key>.2":1112:16) | |
#loc869 = loc("<eval_with_key>.2":1113:19) | |
#loc870 = loc("<eval_with_key>.2":1114:13) | |
#loc871 = loc("<eval_with_key>.2":1115:13) | |
#loc872 = loc("<eval_with_key>.2":1116:15) | |
#loc873 = loc("<eval_with_key>.2":1117:17) | |
#loc874 = loc("<eval_with_key>.2":1118:16) | |
#loc875 = loc("<eval_with_key>.2":1119:16) | |
#loc876 = loc("<eval_with_key>.2":1120:13) | |
#loc877 = loc("<eval_with_key>.2":1121:13) | |
#loc878 = loc("<eval_with_key>.2":1122:12) | |
#loc879 = loc("<eval_with_key>.2":1123:13) | |
#loc880 = loc("<eval_with_key>.2":1124:15) | |
#loc881 = loc("<eval_with_key>.2":1125:19) | |
#loc882 = loc("<eval_with_key>.2":1126:13) | |
#loc883 = loc("<eval_with_key>.2":1127:17) | |
#loc884 = loc("<eval_with_key>.2":1128:16) | |
#loc885 = loc("<eval_with_key>.2":1129:16) | |
#loc886 = loc("<eval_with_key>.2":1130:19) | |
#loc887 = loc("<eval_with_key>.2":1131:13) | |
#loc888 = loc("<eval_with_key>.2":1132:13) | |
#loc889 = loc("<eval_with_key>.2":1133:13) | |
#loc890 = loc("<eval_with_key>.2":1134:15) | |
#loc891 = loc("<eval_with_key>.2":1135:18) | |
#loc892 = loc("<eval_with_key>.2":1136:20) | |
#loc893 = loc("<eval_with_key>.2":1138:24) | |
#loc894 = loc("<eval_with_key>.2":1139:20) | |
#loc895 = loc("<eval_with_key>.2":1140:17) | |
#loc896 = loc("<eval_with_key>.2":1141:21) | |
#loc897 = loc("<eval_with_key>.2":1143:15) | |
#loc898 = loc("<eval_with_key>.2":1144:13) | |
#loc899 = loc("<eval_with_key>.2":1145:15) | |
#loc900 = loc("<eval_with_key>.2":1146:17) | |
#loc901 = loc("<eval_with_key>.2":1147:15) | |
#loc902 = loc("<eval_with_key>.2":1148:15) | |
#loc903 = loc("<eval_with_key>.2":1150:11) | |
#loc904 = loc("<eval_with_key>.2":1151:15) | |
#loc905 = loc("<eval_with_key>.2":1153:15) | |
#loc906 = loc("<eval_with_key>.2":1154:15) | |
#loc907 = loc("<eval_with_key>.2":1155:13) | |
#loc908 = loc("<eval_with_key>.2":1158:27) | |
#loc909 = loc("<eval_with_key>.2":1163:11) | |
#loc910 = loc("<eval_with_key>.2":1164:15) | |
#loc911 = loc("<eval_with_key>.2":1166:15) | |
#loc912 = loc("<eval_with_key>.2":1167:15) | |
#loc913 = loc("<eval_with_key>.2":1168:13) | |
#loc914 = loc("<eval_with_key>.2":1170:11) | |
#loc915 = loc("<eval_with_key>.2":1171:15) | |
#loc916 = loc("<eval_with_key>.2":1173:15) | |
#loc917 = loc("<eval_with_key>.2":1174:15) | |
#loc918 = loc("<eval_with_key>.2":1175:13) | |
#loc919 = loc("<eval_with_key>.2":1178:27) | |
#loc920 = loc("<eval_with_key>.2":1183:11) | |
#loc921 = loc("<eval_with_key>.2":1184:15) | |
#loc922 = loc("<eval_with_key>.2":1186:15) | |
#loc923 = loc("<eval_with_key>.2":1187:15) | |
#loc924 = loc("<eval_with_key>.2":1188:15) | |
#loc925 = loc("<eval_with_key>.2":1189:17) | |
#loc926 = loc("<eval_with_key>.2":1190:15) | |
#loc927 = loc("<eval_with_key>.2":1191:15) | |
#loc928 = loc("<eval_with_key>.2":1193:11) | |
#loc929 = loc("<eval_with_key>.2":1194:15) | |
#loc930 = loc("<eval_with_key>.2":1196:15) | |
#loc931 = loc("<eval_with_key>.2":1197:15) | |
#loc932 = loc("<eval_with_key>.2":1198:15) | |
#loc933 = loc("<eval_with_key>.2":1199:17) | |
#loc934 = loc("<eval_with_key>.2":1200:15) | |
#loc935 = loc("<eval_with_key>.2":1201:15) | |
#loc936 = loc("<eval_with_key>.2":1203:11) | |
#loc937 = loc("<eval_with_key>.2":1204:15) | |
#loc938 = loc("<eval_with_key>.2":1206:15) | |
#loc939 = loc("<eval_with_key>.2":1207:15) | |
#loc940 = loc("<eval_with_key>.2":1208:15) | |
#loc941 = loc("<eval_with_key>.2":1209:17) | |
#loc942 = loc("<eval_with_key>.2":1210:15) | |
#loc943 = loc("<eval_with_key>.2":1211:15) | |
#loc944 = loc("<eval_with_key>.2":1212:19) | |
#loc945 = loc("<eval_with_key>.2":1213:13) | |
#loc946 = loc("<eval_with_key>.2":1214:13) | |
#loc947 = loc("<eval_with_key>.2":1215:19) | |
#loc948 = loc("<eval_with_key>.2":1217:19) | |
#loc949 = loc("<eval_with_key>.2":1219:11) | |
#loc950 = loc("<eval_with_key>.2":1220:15) | |
#loc951 = loc("<eval_with_key>.2":1222:15) | |
#loc952 = loc("<eval_with_key>.2":1223:15) | |
#loc953 = loc("<eval_with_key>.2":1224:15) | |
#loc954 = loc("<eval_with_key>.2":1225:17) | |
#loc955 = loc("<eval_with_key>.2":1226:15) | |
#loc956 = loc("<eval_with_key>.2":1227:15) | |
#loc957 = loc("<eval_with_key>.2":1228:16) | |
#loc958 = loc("<eval_with_key>.2":1230:11) | |
#loc959 = loc("<eval_with_key>.2":1231:15) | |
#loc960 = loc("<eval_with_key>.2":1233:15) | |
#loc961 = loc("<eval_with_key>.2":1234:15) | |
#loc962 = loc("<eval_with_key>.2":1235:15) | |
#loc963 = loc("<eval_with_key>.2":1236:17) | |
#loc964 = loc("<eval_with_key>.2":1237:15) | |
#loc965 = loc("<eval_with_key>.2":1238:15) | |
#loc966 = loc("<eval_with_key>.2":1239:16) | |
#loc967 = loc("<eval_with_key>.2":1240:19) | |
#loc968 = loc("<eval_with_key>.2":1241:13) | |
#loc969 = loc("<eval_with_key>.2":1242:13) | |
#loc970 = loc("<eval_with_key>.2":1243:15) | |
#loc971 = loc("<eval_with_key>.2":1244:17) | |
#loc972 = loc("<eval_with_key>.2":1245:16) | |
#loc973 = loc("<eval_with_key>.2":1246:16) | |
#loc974 = loc("<eval_with_key>.2":1247:13) | |
#loc975 = loc("<eval_with_key>.2":1248:13) | |
#loc976 = loc("<eval_with_key>.2":1249:12) | |
#loc977 = loc("<eval_with_key>.2":1250:13) | |
#loc978 = loc("<eval_with_key>.2":1251:15) | |
#loc979 = loc("<eval_with_key>.2":1252:19) | |
#loc980 = loc("<eval_with_key>.2":1253:13) | |
#loc981 = loc("<eval_with_key>.2":1254:17) | |
#loc982 = loc("<eval_with_key>.2":1255:16) | |
#loc983 = loc("<eval_with_key>.2":1256:16) | |
#loc984 = loc("<eval_with_key>.2":1257:19) | |
#loc985 = loc("<eval_with_key>.2":1258:13) | |
#loc986 = loc("<eval_with_key>.2":1259:13) | |
#loc987 = loc("<eval_with_key>.2":1260:13) | |
#loc988 = loc("<eval_with_key>.2":1261:15) | |
#loc989 = loc("<eval_with_key>.2":1262:18) | |
#loc990 = loc("<eval_with_key>.2":1263:20) | |
#loc991 = loc("<eval_with_key>.2":1265:25) | |
#loc992 = loc("<eval_with_key>.2":1266:20) | |
#loc993 = loc("<eval_with_key>.2":1267:17) | |
#loc994 = loc("<eval_with_key>.2":1268:21) | |
#loc995 = loc("<eval_with_key>.2":1270:15) | |
#loc996 = loc("<eval_with_key>.2":1271:13) | |
#loc997 = loc("<eval_with_key>.2":1272:15) | |
#loc998 = loc("<eval_with_key>.2":1273:17) | |
#loc999 = loc("<eval_with_key>.2":1274:15) | |
#loc1000 = loc("<eval_with_key>.2":1275:15) | |
#loc1001 = loc("<eval_with_key>.2":1277:11) | |
#loc1002 = loc("<eval_with_key>.2":1278:15) | |
#loc1003 = loc("<eval_with_key>.2":1280:15) | |
#loc1004 = loc("<eval_with_key>.2":1281:15) | |
#loc1005 = loc("<eval_with_key>.2":1282:13) | |
#loc1006 = loc("<eval_with_key>.2":1285:27) | |
#loc1007 = loc("<eval_with_key>.2":1290:11) | |
#loc1008 = loc("<eval_with_key>.2":1291:15) | |
#loc1009 = loc("<eval_with_key>.2":1293:15) | |
#loc1010 = loc("<eval_with_key>.2":1294:15) | |
#loc1011 = loc("<eval_with_key>.2":1295:13) | |
#loc1012 = loc("<eval_with_key>.2":1297:11) | |
#loc1013 = loc("<eval_with_key>.2":1298:15) | |
#loc1014 = loc("<eval_with_key>.2":1300:15) | |
#loc1015 = loc("<eval_with_key>.2":1301:15) | |
#loc1016 = loc("<eval_with_key>.2":1302:13) | |
#loc1017 = loc("<eval_with_key>.2":1305:27) | |
#loc1018 = loc("<eval_with_key>.2":1310:11) | |
#loc1019 = loc("<eval_with_key>.2":1311:15) | |
#loc1020 = loc("<eval_with_key>.2":1313:15) | |
#loc1021 = loc("<eval_with_key>.2":1314:15) | |
#loc1022 = loc("<eval_with_key>.2":1315:15) | |
#loc1023 = loc("<eval_with_key>.2":1316:17) | |
#loc1024 = loc("<eval_with_key>.2":1317:15) | |
#loc1025 = loc("<eval_with_key>.2":1318:15) | |
#loc1026 = loc("<eval_with_key>.2":1320:11) | |
#loc1027 = loc("<eval_with_key>.2":1321:15) | |
#loc1028 = loc("<eval_with_key>.2":1323:15) | |
#loc1029 = loc("<eval_with_key>.2":1324:15) | |
#loc1030 = loc("<eval_with_key>.2":1325:15) | |
#loc1031 = loc("<eval_with_key>.2":1326:17) | |
#loc1032 = loc("<eval_with_key>.2":1327:15) | |
#loc1033 = loc("<eval_with_key>.2":1328:15) | |
#loc1034 = loc("<eval_with_key>.2":1330:11) | |
#loc1035 = loc("<eval_with_key>.2":1331:15) | |
#loc1036 = loc("<eval_with_key>.2":1333:15) | |
#loc1037 = loc("<eval_with_key>.2":1334:15) | |
#loc1038 = loc("<eval_with_key>.2":1335:15) | |
#loc1039 = loc("<eval_with_key>.2":1336:17) | |
#loc1040 = loc("<eval_with_key>.2":1337:15) | |
#loc1041 = loc("<eval_with_key>.2":1338:15) | |
#loc1042 = loc("<eval_with_key>.2":1339:19) | |
#loc1043 = loc("<eval_with_key>.2":1340:13) | |
#loc1044 = loc("<eval_with_key>.2":1341:13) | |
#loc1045 = loc("<eval_with_key>.2":1342:19) | |
#loc1046 = loc("<eval_with_key>.2":1344:19) | |
#loc1047 = loc("<eval_with_key>.2":1346:11) | |
#loc1048 = loc("<eval_with_key>.2":1347:15) | |
#loc1049 = loc("<eval_with_key>.2":1349:15) | |
#loc1050 = loc("<eval_with_key>.2":1350:15) | |
#loc1051 = loc("<eval_with_key>.2":1351:15) | |
#loc1052 = loc("<eval_with_key>.2":1352:17) | |
#loc1053 = loc("<eval_with_key>.2":1353:15) | |
#loc1054 = loc("<eval_with_key>.2":1354:15) | |
#loc1055 = loc("<eval_with_key>.2":1355:16) | |
#loc1056 = loc("<eval_with_key>.2":1357:11) | |
#loc1057 = loc("<eval_with_key>.2":1358:15) | |
#loc1058 = loc("<eval_with_key>.2":1360:15) | |
#loc1059 = loc("<eval_with_key>.2":1361:15) | |
#loc1060 = loc("<eval_with_key>.2":1362:15) | |
#loc1061 = loc("<eval_with_key>.2":1363:17) | |
#loc1062 = loc("<eval_with_key>.2":1364:15) | |
#loc1063 = loc("<eval_with_key>.2":1365:15) | |
#loc1064 = loc("<eval_with_key>.2":1366:16) | |
#loc1065 = loc("<eval_with_key>.2":1367:19) | |
#loc1066 = loc("<eval_with_key>.2":1368:13) | |
#loc1067 = loc("<eval_with_key>.2":1369:13) | |
#loc1068 = loc("<eval_with_key>.2":1370:15) | |
#loc1069 = loc("<eval_with_key>.2":1371:17) | |
#loc1070 = loc("<eval_with_key>.2":1372:16) | |
#loc1071 = loc("<eval_with_key>.2":1373:16) | |
#loc1072 = loc("<eval_with_key>.2":1374:13) | |
#loc1073 = loc("<eval_with_key>.2":1375:13) | |
#loc1074 = loc("<eval_with_key>.2":1376:13) | |
#loc1075 = loc("<eval_with_key>.2":1377:13) | |
#loc1076 = loc("<eval_with_key>.2":1378:15) | |
#loc1077 = loc("<eval_with_key>.2":1379:19) | |
#loc1078 = loc("<eval_with_key>.2":1380:13) | |
#loc1079 = loc("<eval_with_key>.2":1381:17) | |
#loc1080 = loc("<eval_with_key>.2":1382:16) | |
#loc1081 = loc("<eval_with_key>.2":1383:16) | |
#loc1082 = loc("<eval_with_key>.2":1384:19) | |
#loc1083 = loc("<eval_with_key>.2":1385:13) | |
#loc1084 = loc("<eval_with_key>.2":1386:14) | |
#loc1085 = loc("<eval_with_key>.2":1387:13) | |
#loc1086 = loc("<eval_with_key>.2":1388:15) | |
#loc1087 = loc("<eval_with_key>.2":1389:18) | |
#loc1088 = loc("<eval_with_key>.2":1390:21) | |
#loc1089 = loc("<eval_with_key>.2":1392:25) | |
#loc1090 = loc("<eval_with_key>.2":1393:21) | |
#loc1091 = loc("<eval_with_key>.2":1394:18) | |
#loc1092 = loc("<eval_with_key>.2":1395:22) | |
#loc1093 = loc("<eval_with_key>.2":1397:15) | |
#loc1094 = loc("<eval_with_key>.2":1398:13) | |
#loc1095 = loc("<eval_with_key>.2":1399:15) | |
#loc1096 = loc("<eval_with_key>.2":1400:17) | |
#loc1097 = loc("<eval_with_key>.2":1401:15) | |
#loc1098 = loc("<eval_with_key>.2":1402:15) | |
#loc1099 = loc("<eval_with_key>.2":1404:11) | |
#loc1100 = loc("<eval_with_key>.2":1405:15) | |
#loc1101 = loc("<eval_with_key>.2":1407:15) | |
#loc1102 = loc("<eval_with_key>.2":1408:15) | |
#loc1103 = loc("<eval_with_key>.2":1409:13) | |
#loc1104 = loc("<eval_with_key>.2":1412:27) | |
#loc1105 = loc("<eval_with_key>.2":1417:11) | |
#loc1106 = loc("<eval_with_key>.2":1418:15) | |
#loc1107 = loc("<eval_with_key>.2":1420:15) | |
#loc1108 = loc("<eval_with_key>.2":1421:15) | |
#loc1109 = loc("<eval_with_key>.2":1422:14) | |
#loc1110 = loc("<eval_with_key>.2":1424:11) | |
#loc1111 = loc("<eval_with_key>.2":1425:15) | |
#loc1112 = loc("<eval_with_key>.2":1427:15) | |
#loc1113 = loc("<eval_with_key>.2":1428:15) | |
#loc1114 = loc("<eval_with_key>.2":1429:13) | |
#loc1115 = loc("<eval_with_key>.2":1432:27) | |
#loc1116 = loc("<eval_with_key>.2":1437:11) | |
#loc1117 = loc("<eval_with_key>.2":1438:15) | |
#loc1118 = loc("<eval_with_key>.2":1440:15) | |
#loc1119 = loc("<eval_with_key>.2":1441:15) | |
#loc1120 = loc("<eval_with_key>.2":1442:15) | |
#loc1121 = loc("<eval_with_key>.2":1443:17) | |
#loc1122 = loc("<eval_with_key>.2":1444:15) | |
#loc1123 = loc("<eval_with_key>.2":1445:15) | |
#loc1124 = loc("<eval_with_key>.2":1447:11) | |
#loc1125 = loc("<eval_with_key>.2":1448:15) | |
#loc1126 = loc("<eval_with_key>.2":1450:15) | |
#loc1127 = loc("<eval_with_key>.2":1451:15) | |
#loc1128 = loc("<eval_with_key>.2":1452:15) | |
#loc1129 = loc("<eval_with_key>.2":1453:17) | |
#loc1130 = loc("<eval_with_key>.2":1454:15) | |
#loc1131 = loc("<eval_with_key>.2":1455:15) | |
#loc1132 = loc("<eval_with_key>.2":1457:11) | |
#loc1133 = loc("<eval_with_key>.2":1458:15) | |
#loc1134 = loc("<eval_with_key>.2":1460:15) | |
#loc1135 = loc("<eval_with_key>.2":1461:15) | |
#loc1136 = loc("<eval_with_key>.2":1462:15) | |
#loc1137 = loc("<eval_with_key>.2":1463:17) | |
#loc1138 = loc("<eval_with_key>.2":1464:15) | |
#loc1139 = loc("<eval_with_key>.2":1465:15) | |
#loc1140 = loc("<eval_with_key>.2":1466:19) | |
#loc1141 = loc("<eval_with_key>.2":1467:13) | |
#loc1142 = loc("<eval_with_key>.2":1468:13) | |
#loc1143 = loc("<eval_with_key>.2":1469:19) | |
#loc1144 = loc("<eval_with_key>.2":1471:19) | |
#loc1145 = loc("<eval_with_key>.2":1473:11) | |
#loc1146 = loc("<eval_with_key>.2":1474:15) | |
#loc1147 = loc("<eval_with_key>.2":1476:15) | |
#loc1148 = loc("<eval_with_key>.2":1477:15) | |
#loc1149 = loc("<eval_with_key>.2":1478:15) | |
#loc1150 = loc("<eval_with_key>.2":1479:17) | |
#loc1151 = loc("<eval_with_key>.2":1480:15) | |
#loc1152 = loc("<eval_with_key>.2":1481:15) | |
#loc1153 = loc("<eval_with_key>.2":1482:16) | |
#loc1154 = loc("<eval_with_key>.2":1484:11) | |
#loc1155 = loc("<eval_with_key>.2":1485:15) | |
#loc1156 = loc("<eval_with_key>.2":1487:15) | |
#loc1157 = loc("<eval_with_key>.2":1488:15) | |
#loc1158 = loc("<eval_with_key>.2":1489:15) | |
#loc1159 = loc("<eval_with_key>.2":1490:17) | |
#loc1160 = loc("<eval_with_key>.2":1491:15) | |
#loc1161 = loc("<eval_with_key>.2":1492:15) | |
#loc1162 = loc("<eval_with_key>.2":1493:16) | |
#loc1163 = loc("<eval_with_key>.2":1494:19) | |
#loc1164 = loc("<eval_with_key>.2":1495:13) | |
#loc1165 = loc("<eval_with_key>.2":1496:13) | |
#loc1166 = loc("<eval_with_key>.2":1497:15) | |
#loc1167 = loc("<eval_with_key>.2":1498:17) | |
#loc1168 = loc("<eval_with_key>.2":1499:16) | |
#loc1169 = loc("<eval_with_key>.2":1500:16) | |
#loc1170 = loc("<eval_with_key>.2":1501:13) | |
#loc1171 = loc("<eval_with_key>.2":1502:13) | |
#loc1172 = loc("<eval_with_key>.2":1503:13) | |
#loc1173 = loc("<eval_with_key>.2":1504:13) | |
#loc1174 = loc("<eval_with_key>.2":1505:15) | |
#loc1175 = loc("<eval_with_key>.2":1506:19) | |
#loc1176 = loc("<eval_with_key>.2":1507:13) | |
#loc1177 = loc("<eval_with_key>.2":1508:17) | |
#loc1178 = loc("<eval_with_key>.2":1509:16) | |
#loc1179 = loc("<eval_with_key>.2":1510:16) | |
#loc1180 = loc("<eval_with_key>.2":1511:19) | |
#loc1181 = loc("<eval_with_key>.2":1512:13) | |
#loc1182 = loc("<eval_with_key>.2":1513:14) | |
#loc1183 = loc("<eval_with_key>.2":1514:13) | |
#loc1184 = loc("<eval_with_key>.2":1515:15) | |
#loc1185 = loc("<eval_with_key>.2":1516:18) | |
#loc1186 = loc("<eval_with_key>.2":1517:21) | |
#loc1187 = loc("<eval_with_key>.2":1519:25) | |
#loc1188 = loc("<eval_with_key>.2":1520:21) | |
#loc1189 = loc("<eval_with_key>.2":1521:18) | |
#loc1190 = loc("<eval_with_key>.2":1522:22) | |
#loc1191 = loc("<eval_with_key>.2":1524:15) | |
#loc1192 = loc("<eval_with_key>.2":1525:13) | |
#loc1193 = loc("<eval_with_key>.2":1526:15) | |
#loc1194 = loc("<eval_with_key>.2":1527:17) | |
#loc1195 = loc("<eval_with_key>.2":1528:15) | |
#loc1196 = loc("<eval_with_key>.2":1529:15) | |
#loc1197 = loc("<eval_with_key>.2":1531:11) | |
#loc1198 = loc("<eval_with_key>.2":1532:15) | |
#loc1199 = loc("<eval_with_key>.2":1534:15) | |
#loc1200 = loc("<eval_with_key>.2":1535:15) | |
#loc1201 = loc("<eval_with_key>.2":1536:13) | |
#loc1202 = loc("<eval_with_key>.2":1539:27) | |
#loc1203 = loc("<eval_with_key>.2":1544:11) | |
#loc1204 = loc("<eval_with_key>.2":1545:15) | |
#loc1205 = loc("<eval_with_key>.2":1547:15) | |
#loc1206 = loc("<eval_with_key>.2":1548:15) | |
#loc1207 = loc("<eval_with_key>.2":1549:14) | |
#loc1208 = loc("<eval_with_key>.2":1551:11) | |
#loc1209 = loc("<eval_with_key>.2":1552:15) | |
#loc1210 = loc("<eval_with_key>.2":1554:15) | |
#loc1211 = loc("<eval_with_key>.2":1555:15) | |
#loc1212 = loc("<eval_with_key>.2":1556:13) | |
#loc1213 = loc("<eval_with_key>.2":1559:27) | |
#loc1214 = loc("<eval_with_key>.2":1564:13) | |
#loc1215 = loc("<eval_with_key>.2":1566:11) | |
#loc1216 = loc("<eval_with_key>.2":1568:15) | |
#loc1217 = loc("<eval_with_key>.2":1569:14) | |
#loc1218 = loc("<eval_with_key>.2":1571:11) | |
#loc1219 = loc("<eval_with_key>.2":1573:15) | |
#loc1220 = loc(callsite(#loc36 at #loc37)) | |
#loc1221 = loc(callsite(#loc89 at #loc90)) | |
#loc1222 = loc(callsite(#loc1220 at #loc38)) | |
#loc1223 = loc(callsite(#loc1220 at #loc40)) | |
#loc1224 = loc(callsite(#loc1220 at #loc67)) | |
#loc1225 = loc(callsite(#loc1220 at #loc69)) | |
#loc1226 = loc(callsite(#loc1221 at #loc91)) | |
#loc1227 = loc(callsite(#loc1220 at #loc128)) | |
#loc1228 = loc(callsite(#loc1220 at #loc131)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment