Created
February 28, 2023 17:48
-
-
Save pashu123/25682c3bd57611d5e46130023326751d to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc = loc(unknown) | |
module attributes {torch.debug_module_name = "Transformer"} { | |
func.func private @__torch__.torch.nn.modules.sparse.Embedding.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.sparse.Embedding"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1],si64> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.sparse.Embedding"> -> !torch.tensor loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc1) | |
%false = torch.constant.bool false loc(#loc1) | |
%false_0 = torch.constant.bool false loc(#loc1) | |
%187 = torch.aten.embedding %186, %185, %int-1, %false, %false_0 : !torch.tensor, !torch.tensor<[1,1],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.tensor<[1,1,512],f32> loc(#loc1) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc1) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_0.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_0.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_0.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_1.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_1.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_1.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_2.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_2.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_2.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_6.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_6.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_6.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_3.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_3.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_3.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_5.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_5.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_5.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_4.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_4.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_4.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_4.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_5.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_3.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_6.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_2.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_1.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_0.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_0.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_1.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_2.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_6.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_3.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_5.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_4.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_17.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_17.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_17.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_18.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_18.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_18.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_13.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_13.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_13.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_15.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_15.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_15.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_19.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_15.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_16.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_13.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_18.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_12.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_17.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_17.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_7.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_8.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_9.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_10.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_18.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_13.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_15.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_14.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_30.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_30.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_30.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_21.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_21.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_21.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_31.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_31.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_31.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_26.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_26.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_26.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_28.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_28.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_28.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_32.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_28.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_29.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_26.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_31.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_21.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_25.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_30.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_30.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_20.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_21.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_22.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_23.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_31.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_26.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_28.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_27.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_43.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_43.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_43.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_34.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_34.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_34.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_35.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_35.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_35.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_36.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_36.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_36.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_44.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_44.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_44.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_39.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_39.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_39.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_40.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_40.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_40.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_45.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_40.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_42.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_39.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_44.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_36.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_35.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_34.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_38.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_43.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_43.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_33.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_34.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_35.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_36.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_44.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_39.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_41.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_40.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_56.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_56.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_56.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_47.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_47.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_47.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_49.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_49.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_49.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_57.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_57.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_57.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_54.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_54.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_54.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_58.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_54.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_55.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_57.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_49.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_47.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_51.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_56.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_56.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_46.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_47.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_48.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_49.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_57.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_52.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_54.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_53.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_69.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_69.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_69.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_59.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_59.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_59.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_60.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_60.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_60.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_61.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_61.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_61.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_62.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_62.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_62.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_70.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_70.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_70.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_65.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_65.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_65.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_71.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_68.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_65.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_70.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_62.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_61.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_60.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_64.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_59.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_69.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_69.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_59.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_60.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_61.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_62.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_70.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_65.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_67.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_66.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_82.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_82.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_82.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_72.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_72.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_72.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_74.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_74.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_74.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_83.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_83.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_83.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_78.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_78.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_78.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_80.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_80.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_80.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_79.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_79.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_79.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_84.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_79.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_80.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_81.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_78.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_83.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_74.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_77.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_72.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_82.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_82.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_72.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_73.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_74.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_75.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_83.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_78.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_80.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_79.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_95.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_95.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_95.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_85.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_85.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_85.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_86.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_86.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_86.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_87.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_87.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_87.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_96.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_96.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_96.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_91.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_91.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_91.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_93.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_93.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_93.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_92.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_92.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_92.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_97.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_92.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_93.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_94.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_91.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_96.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_87.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_86.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_90.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_85.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_95.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_95.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_85.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_86.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_87.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_88.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_96.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_91.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_93.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_92.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_98.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_98.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_98.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.tensor_static_info_cast %196 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc4) | |
return %197 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_99.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_99.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_99.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.Transformer.forward(%arg0: !torch.nn.Module<"__torch__.model.Transformer"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1],si64> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[],si64> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["output"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_99.Linear"> loc(#loc) | |
%188 = torch.prim.GetAttr %arg0["norm"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_98.RMSNorm"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["7"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["6"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%194 = torch.prim.GetAttr %193["5"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock"> loc(#loc) | |
%195 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%196 = torch.prim.GetAttr %195["4"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock"> loc(#loc) | |
%197 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%198 = torch.prim.GetAttr %197["3"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock"> loc(#loc) | |
%199 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%200 = torch.prim.GetAttr %199["2"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock"> loc(#loc) | |
%201 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%202 = torch.prim.GetAttr %201["1"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock"> loc(#loc) | |
%203 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> loc(#loc) | |
%204 = torch.prim.GetAttr %203["0"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.ModuleList"> -> !torch.nn.Module<"__torch__.model.TransformerBlock"> loc(#loc) | |
%205 = torch.prim.GetAttr %arg0["tok_embeddings"] : !torch.nn.Module<"__torch__.model.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.sparse.Embedding"> loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc37) | |
%206 = torch.aten.size.int %185, %int1 : !torch.tensor<[1,1],si64>, !torch.int -> !torch.int loc(#loc37) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1],si64> to !torch.tensor loc(#loc) | |
%209 = torch.prim.CallMethod %205["forward"] (%208) : !torch.nn.Module<"__torch__.torch.nn.modules.sparse.Embedding">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%210 = torch.tensor.literal(dense_resource<__elided__> : tensor<2048x32x2xf32>) : !torch.tensor<[2048,32,2],f32> loc(#loc38) | |
%int6 = torch.constant.int 6 loc(#loc38) | |
%int0 = torch.constant.int 0 loc(#loc38) | |
%cpu = torch.constant.device "cpu" loc(#loc38) | |
%none_0 = torch.constant.none loc(#loc) | |
%false = torch.constant.bool false loc(#loc38) | |
%false_1 = torch.constant.bool false loc(#loc38) | |
%none_2 = torch.constant.none loc(#loc) | |
%211 = torch.aten.to.dtype_layout %210, %int6, %int0, %cpu, %none_0, %false, %false_1, %none_2 : !torch.tensor<[2048,32,2],f32>, !torch.int, !torch.int, !torch.Device, !torch.none, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[2048,32,2],f32> loc(#loc38) | |
%212 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc39) | |
%int1_3 = torch.constant.int 1 loc(#loc39) | |
%213 = torch.aten.add.Tensor %207, %212, %int1_3 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc39) | |
%214 = torch.aten.Int.Tensor %213 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_4 = torch.constant.int 0 loc(#loc39) | |
%int1_5 = torch.constant.int 1 loc(#loc39) | |
%int1_6 = torch.constant.int 1 loc(#loc39) | |
%215 = torch.aten.slice.Tensor %211, %int0_4, %int1_5, %214, %int1_6 : !torch.tensor<[2048,32,2],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,32,2],f32> loc(#loc39) | |
%216 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%217 = torch.prim.CallMethod %204["forward"] (%209, %216) : !torch.nn.Module<"__torch__.model.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%218 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%219 = torch.prim.CallMethod %202["forward"] (%217, %218) : !torch.nn.Module<"__torch__.model.___torch_mangle_19.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%220 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%221 = torch.prim.CallMethod %200["forward"] (%219, %220) : !torch.nn.Module<"__torch__.model.___torch_mangle_32.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%222 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%223 = torch.prim.CallMethod %198["forward"] (%221, %222) : !torch.nn.Module<"__torch__.model.___torch_mangle_45.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%225 = torch.prim.CallMethod %196["forward"] (%223, %224) : !torch.nn.Module<"__torch__.model.___torch_mangle_58.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%227 = torch.prim.CallMethod %194["forward"] (%225, %226) : !torch.nn.Module<"__torch__.model.___torch_mangle_71.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%228 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%229 = torch.prim.CallMethod %192["forward"] (%227, %228) : !torch.nn.Module<"__torch__.model.___torch_mangle_84.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%230 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%231 = torch.prim.CallMethod %190["forward"] (%229, %230) : !torch.nn.Module<"__torch__.model.___torch_mangle_97.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%232 = torch.prim.CallMethod %188["forward"] (%231) : !torch.nn.Module<"__torch__.model.___torch_mangle_98.RMSNorm">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int0_7 = torch.constant.int 0 loc(#loc40) | |
%int0_8 = torch.constant.int 0 loc(#loc40) | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 loc(#loc40) | |
%int1_9 = torch.constant.int 1 loc(#loc40) | |
%233 = torch.aten.slice.Tensor %232, %int0_7, %int0_8, %int9223372036854775807, %int1_9 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc40) | |
%int1_10 = torch.constant.int 1 loc(#loc40) | |
%int-1 = torch.constant.int -1 loc(#loc40) | |
%234 = torch.aten.select.int %233, %int1_10, %int-1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.int -> !torch.tensor<[1,512],f32> loc(#loc40) | |
%int1_11 = torch.constant.int 1 loc(#loc40) | |
%int0_12 = torch.constant.int 0 loc(#loc40) | |
%int9223372036854775807_13 = torch.constant.int 9223372036854775807 loc(#loc40) | |
%int1_14 = torch.constant.int 1 loc(#loc40) | |
%235 = torch.aten.slice.Tensor %234, %int1_11, %int0_12, %int9223372036854775807_13, %int1_14 : !torch.tensor<[1,512],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,512],f32> loc(#loc40) | |
%236 = torch.tensor_static_info_cast %235 : !torch.tensor<[1,512],f32> to !torch.tensor loc(#loc40) | |
%237 = torch.prim.CallMethod %187["forward"] (%236) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_99.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int6_15 = torch.constant.int 6 loc(#loc41) | |
%false_16 = torch.constant.bool false loc(#loc41) | |
%false_17 = torch.constant.bool false loc(#loc41) | |
%none_18 = torch.constant.none loc(#loc) | |
%238 = torch.aten.to.dtype %237, %int6_15, %false_16, %false_17, %none_18 : !torch.tensor, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1],f32> loc(#loc41) | |
%239 = torch.tensor_static_info_cast %238 : !torch.tensor<[1,1],f32> to !torch.tensor loc(#loc41) | |
return %239 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.sparse.___torch_mangle_101.Embedding.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_101.Embedding"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1],si64> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_101.Embedding"> -> !torch.tensor loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc1) | |
%false = torch.constant.bool false loc(#loc1) | |
%false_0 = torch.constant.bool false loc(#loc1) | |
%187 = torch.aten.embedding %186, %185, %int-1, %false, %false_0 : !torch.tensor, !torch.tensor<[1,1],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.tensor<[1,1,512],f32> loc(#loc1) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc1) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_112.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_112.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_112.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_102.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_102.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_102.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_103.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_103.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_103.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_104.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_104.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_104.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_105.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_105.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_105.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_113.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_113.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_113.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_108.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_108.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_108.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_110.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_110.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_110.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_109.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_109.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_109.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_114.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_109.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_110.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_111.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_108.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_113.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_105.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_104.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_103.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_107.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_102.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_112.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_112.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_102.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_103.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_104.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_105.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_113.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_108.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_110.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_109.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_125.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_125.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_125.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_115.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_115.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_115.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_116.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_116.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_116.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_117.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_117.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_117.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_118.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_118.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_118.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_126.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_126.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_126.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_121.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_121.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_121.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_123.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_123.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_123.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_122.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_122.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_122.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_127.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_122.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_123.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_124.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_121.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_126.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_118.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_117.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_116.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_120.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_115.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_125.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_125.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_115.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_116.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_117.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_118.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_126.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_121.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_123.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_122.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_138.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_138.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_138.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_128.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_128.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_128.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_129.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_129.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_129.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_130.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_130.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_130.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_131.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_131.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_131.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_139.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_139.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_139.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_134.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_134.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_134.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_136.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_136.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_136.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_135.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_135.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_135.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_140.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_135.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_136.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_137.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_134.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_139.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_131.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_130.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_129.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_133.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_128.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_138.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_138.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_128.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_129.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_130.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_131.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_139.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_134.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_136.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_135.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_151.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_151.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_151.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_141.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_141.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_141.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_142.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_142.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_142.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_143.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_143.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_143.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_144.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_144.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_144.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_152.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_152.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_152.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_147.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_147.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_147.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_149.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_149.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_149.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_148.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_148.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_148.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_153.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_148.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_149.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_150.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_147.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_152.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_144.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_143.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_142.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_146.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_141.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_151.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_151.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_141.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_142.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_143.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_144.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_152.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_147.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_149.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_148.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_164.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_164.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_164.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_154.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_154.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_154.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_155.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_155.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_155.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_156.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_156.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_156.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_157.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_157.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_157.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_165.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_165.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_165.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_160.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_160.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_160.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_162.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_162.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_162.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_161.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_161.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_161.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_166.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_161.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_162.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_163.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_160.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_165.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_157.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_156.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_155.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_159.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_154.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_164.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_164.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_154.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_155.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_156.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_157.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_165.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_160.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_162.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_161.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_177.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_177.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_177.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_167.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_167.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_167.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_168.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_168.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_168.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_169.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_169.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_169.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_170.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_170.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_170.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_178.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_178.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_178.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_173.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_173.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_173.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_175.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_175.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_175.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_174.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_174.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_174.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_179.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_174.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_175.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_176.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_173.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_178.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_170.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_169.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_168.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_172.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_167.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_177.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_177.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_167.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_168.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_169.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_170.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_178.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_173.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_175.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_174.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_190.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_190.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_190.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_180.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_180.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_180.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_181.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_181.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_181.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_182.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_182.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_182.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_183.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_183.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_183.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_191.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_191.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_191.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_186.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_186.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_186.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_188.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_188.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_188.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_187.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_187.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_187.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_192.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_187.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_188.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_189.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_186.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_191.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_183.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_182.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_181.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_185.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_180.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_190.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_190.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_180.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_181.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_182.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_183.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_191.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_186.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_188.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_187.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_203.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_203.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_203.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_193.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_193.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_193.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_194.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_194.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_194.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_195.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_195.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_195.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_196.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_196.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_196.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_204.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_204.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_204.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_199.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_199.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_199.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_201.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_201.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_201.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_200.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_200.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_200.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_205.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_200.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_201.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_202.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_199.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_204.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_196.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_195.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_194.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_198.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_193.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_203.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_203.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_193.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_194.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_195.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_196.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_204.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_199.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_201.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_200.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_207.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_207.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_207.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.tensor_static_info_cast %196 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc4) | |
return %197 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_208.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_208.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_208.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_210.Transformer.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1],si64> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[],si64> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["output"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_208.Linear"> loc(#loc) | |
%188 = torch.prim.GetAttr %arg0["norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_207.RMSNorm"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["7"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["6"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%194 = torch.prim.GetAttr %193["5"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock"> loc(#loc) | |
%195 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%196 = torch.prim.GetAttr %195["4"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock"> loc(#loc) | |
%197 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%198 = torch.prim.GetAttr %197["3"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock"> loc(#loc) | |
%199 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%200 = torch.prim.GetAttr %199["2"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock"> loc(#loc) | |
%201 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%202 = torch.prim.GetAttr %201["1"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock"> loc(#loc) | |
%203 = torch.prim.GetAttr %arg0["layers"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> loc(#loc) | |
%204 = torch.prim.GetAttr %203["0"] : !torch.nn.Module<"__torch__.torch.nn.modules.container.___torch_mangle_206.ModuleList"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock"> loc(#loc) | |
%205 = torch.prim.GetAttr %arg0["tok_embeddings"] : !torch.nn.Module<"__torch__.model.___torch_mangle_210.Transformer"> -> !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_101.Embedding"> loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc37) | |
%206 = torch.aten.size.int %185, %int1 : !torch.tensor<[1,1],si64>, !torch.int -> !torch.int loc(#loc37) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1],si64> to !torch.tensor loc(#loc) | |
%209 = torch.prim.CallMethod %205["forward"] (%208) : !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_101.Embedding">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%210 = torch.tensor.literal(dense_resource<__elided__> : tensor<2048x32x2xf32>) : !torch.tensor<[2048,32,2],f32> loc(#loc38) | |
%int6 = torch.constant.int 6 loc(#loc38) | |
%int0 = torch.constant.int 0 loc(#loc38) | |
%cpu = torch.constant.device "cpu" loc(#loc38) | |
%none_0 = torch.constant.none loc(#loc) | |
%false = torch.constant.bool false loc(#loc38) | |
%false_1 = torch.constant.bool false loc(#loc38) | |
%none_2 = torch.constant.none loc(#loc) | |
%211 = torch.aten.to.dtype_layout %210, %int6, %int0, %cpu, %none_0, %false, %false_1, %none_2 : !torch.tensor<[2048,32,2],f32>, !torch.int, !torch.int, !torch.Device, !torch.none, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[2048,32,2],f32> loc(#loc38) | |
%212 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc39) | |
%int1_3 = torch.constant.int 1 loc(#loc39) | |
%213 = torch.aten.add.Tensor %207, %212, %int1_3 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc39) | |
%214 = torch.aten.Int.Tensor %213 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_4 = torch.constant.int 0 loc(#loc39) | |
%int1_5 = torch.constant.int 1 loc(#loc39) | |
%int1_6 = torch.constant.int 1 loc(#loc39) | |
%215 = torch.aten.slice.Tensor %211, %int0_4, %int1_5, %214, %int1_6 : !torch.tensor<[2048,32,2],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,32,2],f32> loc(#loc39) | |
%216 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%217 = torch.prim.CallMethod %204["forward"] (%209, %216) : !torch.nn.Module<"__torch__.model.___torch_mangle_114.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%218 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%219 = torch.prim.CallMethod %202["forward"] (%217, %218) : !torch.nn.Module<"__torch__.model.___torch_mangle_127.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%220 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%221 = torch.prim.CallMethod %200["forward"] (%219, %220) : !torch.nn.Module<"__torch__.model.___torch_mangle_140.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%222 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%223 = torch.prim.CallMethod %198["forward"] (%221, %222) : !torch.nn.Module<"__torch__.model.___torch_mangle_153.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%225 = torch.prim.CallMethod %196["forward"] (%223, %224) : !torch.nn.Module<"__torch__.model.___torch_mangle_166.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%227 = torch.prim.CallMethod %194["forward"] (%225, %226) : !torch.nn.Module<"__torch__.model.___torch_mangle_179.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%228 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%229 = torch.prim.CallMethod %192["forward"] (%227, %228) : !torch.nn.Module<"__torch__.model.___torch_mangle_192.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%230 = torch.tensor_static_info_cast %215 : !torch.tensor<[1,32,2],f32> to !torch.tensor loc(#loc39) | |
%231 = torch.prim.CallMethod %190["forward"] (%229, %230) : !torch.nn.Module<"__torch__.model.___torch_mangle_205.TransformerBlock">, (!torch.tensor, !torch.tensor) -> !torch.tensor loc(#loc) | |
%232 = torch.prim.CallMethod %188["forward"] (%231) : !torch.nn.Module<"__torch__.model.___torch_mangle_207.RMSNorm">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int0_7 = torch.constant.int 0 loc(#loc40) | |
%int0_8 = torch.constant.int 0 loc(#loc40) | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 loc(#loc40) | |
%int1_9 = torch.constant.int 1 loc(#loc40) | |
%233 = torch.aten.slice.Tensor %232, %int0_7, %int0_8, %int9223372036854775807, %int1_9 : !torch.tensor, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc40) | |
%int1_10 = torch.constant.int 1 loc(#loc40) | |
%int-1 = torch.constant.int -1 loc(#loc40) | |
%234 = torch.aten.select.int %233, %int1_10, %int-1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.int -> !torch.tensor<[1,512],f32> loc(#loc40) | |
%int1_11 = torch.constant.int 1 loc(#loc40) | |
%int0_12 = torch.constant.int 0 loc(#loc40) | |
%int9223372036854775807_13 = torch.constant.int 9223372036854775807 loc(#loc40) | |
%int1_14 = torch.constant.int 1 loc(#loc40) | |
%235 = torch.aten.slice.Tensor %234, %int1_11, %int0_12, %int9223372036854775807_13, %int1_14 : !torch.tensor<[1,512],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,512],f32> loc(#loc40) | |
%236 = torch.tensor_static_info_cast %235 : !torch.tensor<[1,512],f32> to !torch.tensor loc(#loc40) | |
%237 = torch.prim.CallMethod %187["forward"] (%236) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_208.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int6_15 = torch.constant.int 6 loc(#loc41) | |
%false_16 = torch.constant.bool false loc(#loc41) | |
%false_17 = torch.constant.bool false loc(#loc41) | |
%none_18 = torch.constant.none loc(#loc) | |
%238 = torch.aten.to.dtype %237, %int6_15, %false_16, %false_17, %none_18 : !torch.tensor, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1],f32> loc(#loc41) | |
%239 = torch.tensor_static_info_cast %238 : !torch.tensor<[1,1],f32> to !torch.tensor loc(#loc41) | |
return %239 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.sparse.___torch_mangle_211.Embedding.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_211.Embedding"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1],si64> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.sparse.___torch_mangle_211.Embedding"> -> !torch.tensor loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc1) | |
%false = torch.constant.bool false loc(#loc1) | |
%false_0 = torch.constant.bool false loc(#loc1) | |
%187 = torch.aten.embedding %186, %185, %int-1, %false, %false_0 : !torch.tensor, !torch.tensor<[1,1],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.tensor<[1,1,512],f32> loc(#loc1) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc1) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_222.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_222.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_222.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_212.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_212.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_212.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_213.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_213.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_213.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_214.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_214.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_214.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_215.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_215.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_215.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_223.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_223.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_223.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_218.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_218.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_218.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_220.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_220.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_220.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_219.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_219.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_219.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_224.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_219.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_220.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_221.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_218.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_223.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_215.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_214.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_213.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_217.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_212.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_224.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_222.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_222.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_212.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_213.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_214.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_215.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_223.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_218.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_220.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_219.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_235.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_235.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_235.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_225.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_225.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_225.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_226.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_226.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_226.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_227.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_227.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_227.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_228.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_228.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_228.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_236.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_236.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_236.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_231.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_231.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_231.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_233.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_233.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_233.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_232.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_232.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_232.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_237.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_232.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_233.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_234.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_231.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_236.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_228.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_227.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_226.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_230.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_225.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_237.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_235.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_235.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_225.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_226.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_227.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_228.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_236.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_231.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_233.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_232.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_248.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_248.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_248.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_238.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_238.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_238.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_239.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_239.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_239.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_240.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_240.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_240.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_241.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_241.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_241.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_249.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_249.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_249.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_244.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_244.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_244.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_246.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_246.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_246.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_245.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_245.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_245.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_250.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_245.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_246.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_247.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_244.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_249.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_241.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_240.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_239.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_243.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_238.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_250.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_248.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_248.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_238.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_239.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_240.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_241.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_249.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_244.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_246.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_245.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_261.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_261.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_261.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_251.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_251.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_251.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_252.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_252.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_252.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_253.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_253.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_253.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_254.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_254.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_254.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_262.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_262.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_262.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_257.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_257.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_257.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_259.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_259.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_259.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_258.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_258.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_258.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_263.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_258.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_259.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_260.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_257.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_262.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_254.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_253.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_252.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_256.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_251.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_263.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_261.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_261.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_251.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_252.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_253.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_254.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_262.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_257.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_259.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_258.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_274.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_274.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_274.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_264.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_264.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_264.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_265.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_265.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_265.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_266.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_266.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_266.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_267.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_267.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_267.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_275.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_275.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_275.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_270.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_270.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_270.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_272.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_272.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_272.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_271.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_271.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_271.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_276.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_271.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_272.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_273.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_270.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_275.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_267.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_266.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_265.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_269.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_264.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_276.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_274.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_274.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_264.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_265.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_266.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_267.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_275.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_270.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_272.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_271.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_287.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_287.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_287.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_277.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_277.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_277.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_278.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_278.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_278.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_279.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_279.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_279.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_280.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_280.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_280.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_288.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_288.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_288.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_283.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_283.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_283.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_285.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_285.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_285.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_284.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_284.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_284.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_289.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_284.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_285.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_286.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_283.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_288.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_280.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_279.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_278.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_282.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_277.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_289.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_287.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_287.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_277.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_278.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_279.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_280.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_288.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_283.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_285.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_284.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_300.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_300.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_300.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_290.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_290.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_290.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_291.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_291.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_291.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_292.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_292.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_292.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_293.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_293.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_293.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_301.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_301.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_301.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_296.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_296.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_296.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_298.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_298.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_298.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_297.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_297.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_297.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_302.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_297.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_298.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_299.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_296.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_301.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_293.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_292.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_291.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_295.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_290.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_302.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_300.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_300.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_290.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_291.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_292.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_293.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_301.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192["forward"] (%324) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_296.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%326 = torch.aten.silu %325 : !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc34) | |
%327 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%328 = torch.prim.CallMethod %190["forward"] (%327) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_298.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%329 = torch.aten.mul.Tensor %326, %328 : !torch.tensor<[1,1,1536],f32>, !torch.tensor -> !torch.tensor<[1,1,1536],f32> loc(#loc35) | |
%330 = torch.tensor_static_info_cast %329 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc35) | |
%331 = torch.prim.CallMethod %188["forward"] (%330) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_297.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_83 = torch.constant.int 1 loc(#loc36) | |
%332 = torch.aten.add.Tensor %323#1, %331, %int1_83 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc36) | |
%333 = torch.tensor_static_info_cast %332 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc36) | |
return %333 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_313.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_313.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_313.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_303.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_303.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_303.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_304.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_304.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_304.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_305.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_305.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_305.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_306.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_306.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_306.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_314.RMSNorm.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_314.RMSNorm"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tuple<tensor, tensor> { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.model.___torch_mangle_314.RMSNorm"> -> !torch.tensor loc(#loc) | |
%int6 = torch.constant.int 6 loc(#loc2) | |
%false = torch.constant.bool false loc(#loc2) | |
%false_0 = torch.constant.bool false loc(#loc2) | |
%none_1 = torch.constant.none loc(#loc) | |
%187 = torch.aten.to.dtype %185, %int6, %false, %false_0, %none_1 : !torch.tensor<[1,1,512],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%int2 = torch.constant.int 2 loc(#loc3) | |
%188 = torch.aten.pow.Tensor_Scalar %187, %int2 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%int-1 = torch.constant.int -1 loc(#loc3) | |
%189 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc) | |
%true_2 = torch.constant.bool true loc(#loc3) | |
%none_3 = torch.constant.none loc(#loc) | |
%190 = torch.aten.mean.dim %188, %189, %true_2, %none_3 : !torch.tensor<[1,1,512],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%191 = torch.tensor.literal(dense<1.000000e-05> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc3) | |
%int1 = torch.constant.int 1 loc(#loc3) | |
%192 = torch.aten.add.Tensor %190, %191, %int1 : !torch.tensor<[1,1,1],f32>, !torch.tensor<[],f64>, !torch.int -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%193 = torch.aten.rsqrt %192 : !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,1],f32> loc(#loc3) | |
%194 = torch.aten.mul.Tensor %187, %193 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,1],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc3) | |
%195 = torch.aten.type_as %194, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tensor<[1,1,512],f32> loc(#loc2) | |
%196 = torch.aten.mul.Tensor %195, %186 : !torch.tensor<[1,1,512],f32>, !torch.tensor -> !torch.tensor<[1,1,512],f32> loc(#loc4) | |
%197 = torch.prim.TupleConstruct %196, %187 : !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> -> !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> loc(#loc) | |
%198 = torch.derefine %197 : !torch.tuple<tensor<[1,1,512],f32>, tensor<[1,1,512],f32>> to !torch.tuple<tensor, tensor> loc(#loc) | |
return %198 : !torch.tuple<tensor, tensor> loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_309.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_309.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_309.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_311.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_311.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_311.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,1536],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,1536],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.torch.nn.modules.linear.___torch_mangle_310.Linear.forward(%arg0: !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_310.Linear"> loc(unknown), %arg1: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,1536],f32> loc(#loc) | |
%186 = torch.prim.GetAttr %arg0["weight"] : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_310.Linear"> -> !torch.tensor loc(#loc) | |
%none_0 = torch.constant.none loc(#loc) | |
%187 = torch.aten.linear %185, %186, %none_0 : !torch.tensor<[1,1,1536],f32>, !torch.tensor, !torch.none -> !torch.tensor<[1,1,512],f32> loc(#loc5) | |
%188 = torch.tensor_static_info_cast %187 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc5) | |
return %188 : !torch.tensor loc(#loc) | |
} loc(#loc) | |
func.func private @__torch__.model.___torch_mangle_315.TransformerBlock.forward(%arg0: !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> loc(unknown), %arg1: !torch.tensor loc(unknown), %arg2: !torch.tensor loc(unknown)) -> !torch.tensor { | |
%185 = torch.tensor_static_info_cast %arg1 : !torch.tensor to !torch.tensor<[1,1,512],f32> loc(#loc) | |
%186 = torch.tensor_static_info_cast %arg2 : !torch.tensor to !torch.tensor<[1,32,2],f32> loc(#loc) | |
%187 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> loc(#loc) | |
%188 = torch.prim.GetAttr %187["w2"] : !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_310.Linear"> loc(#loc) | |
%189 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> loc(#loc) | |
%190 = torch.prim.GetAttr %189["w3"] : !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_311.Linear"> loc(#loc) | |
%191 = torch.prim.GetAttr %arg0["feed_forward"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> loc(#loc) | |
%192 = torch.prim.GetAttr %191["w1"] : !torch.nn.Module<"__torch__.model.___torch_mangle_312.FeedForward"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_309.Linear"> loc(#loc) | |
%193 = torch.prim.GetAttr %arg0["ffn_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_314.RMSNorm"> loc(#loc) | |
%194 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> loc(#loc) | |
%195 = torch.prim.GetAttr %194["wo"] : !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_306.Linear"> loc(#loc) | |
%196 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> loc(#loc) | |
%197 = torch.prim.GetAttr %196["wv"] : !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_305.Linear"> loc(#loc) | |
%198 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> loc(#loc) | |
%199 = torch.prim.GetAttr %198["wk"] : !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_304.Linear"> loc(#loc) | |
%200 = torch.prim.GetAttr %arg0["attention"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> loc(#loc) | |
%201 = torch.prim.GetAttr %200["wq"] : !torch.nn.Module<"__torch__.model.___torch_mangle_308.Attention"> -> !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_303.Linear"> loc(#loc) | |
%202 = torch.prim.GetAttr %arg0["attention_norm"] : !torch.nn.Module<"__torch__.model.___torch_mangle_315.TransformerBlock"> -> !torch.nn.Module<"__torch__.model.___torch_mangle_313.RMSNorm"> loc(#loc) | |
%203 = torch.tensor_static_info_cast %185 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%204 = torch.prim.CallMethod %202["forward"] (%203) : !torch.nn.Module<"__torch__.model.___torch_mangle_313.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%205:2 = torch.prim.TupleUnpack %204 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%int0 = torch.constant.int 0 loc(#loc6) | |
%206 = torch.aten.size.int %205#0, %int0 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%207 = torch.prim.NumToTensor.Scalar %206 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%208 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%209 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%210 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%211 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%212 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%213 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%214 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%215 = torch.aten.Int.Tensor %207 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1 = torch.constant.int 1 loc(#loc6) | |
%216 = torch.aten.size.int %205#0, %int1 : !torch.tensor<[1,1,512],f32>, !torch.int -> !torch.int loc(#loc6) | |
%217 = torch.prim.NumToTensor.Scalar %216 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%218 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%219 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%220 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%221 = torch.aten.Int.Tensor %217 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%222 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%223 = torch.prim.CallMethod %201["forward"] (%222) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_303.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%224 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%225 = torch.prim.CallMethod %199["forward"] (%224) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_304.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%226 = torch.tensor_static_info_cast %205#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%227 = torch.prim.CallMethod %197["forward"] (%226) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_305.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int8 = torch.constant.int 8 loc(#loc7) | |
%int64 = torch.constant.int 64 loc(#loc7) | |
%228 = torch.prim.ListConstruct %215, %221, %int8, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%229 = torch.aten.view %223, %228 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc7) | |
%int8_0 = torch.constant.int 8 loc(#loc8) | |
%int64_1 = torch.constant.int 64 loc(#loc8) | |
%230 = torch.prim.ListConstruct %214, %220, %int8_0, %int64_1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%231 = torch.aten.view %225, %230 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc8) | |
%int8_2 = torch.constant.int 8 loc(#loc9) | |
%int64_3 = torch.constant.int 64 loc(#loc9) | |
%232 = torch.prim.ListConstruct %213, %219, %int8_2, %int64_3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%233 = torch.aten.view %227, %232 : !torch.tensor, !torch.list<int> -> !torch.tensor<[1,1,8,64],f32> loc(#loc9) | |
%int6 = torch.constant.int 6 loc(#loc10) | |
%false = torch.constant.bool false loc(#loc10) | |
%false_4 = torch.constant.bool false loc(#loc10) | |
%none_5 = torch.constant.none loc(#loc) | |
%234 = torch.aten.to.dtype %229, %int6, %false, %false_4, %none_5 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc10) | |
%int0_6 = torch.constant.int 0 loc(#loc10) | |
%235 = torch.aten.size.int %234, %int0_6 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%236 = torch.prim.NumToTensor.Scalar %235 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%237 = torch.aten.Int.Tensor %236 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_7 = torch.constant.int 1 loc(#loc10) | |
%238 = torch.aten.size.int %234, %int1_7 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%239 = torch.prim.NumToTensor.Scalar %238 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%240 = torch.aten.Int.Tensor %239 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2 = torch.constant.int 2 loc(#loc10) | |
%241 = torch.aten.size.int %234, %int2 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc10) | |
%242 = torch.prim.NumToTensor.Scalar %241 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%243 = torch.aten.Int.Tensor %242 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1 = torch.constant.int -1 loc(#loc10) | |
%int2_8 = torch.constant.int 2 loc(#loc10) | |
%244 = torch.prim.ListConstruct %237, %240, %243, %int-1, %int2_8 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%245 = torch.aten.reshape %234, %244 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc10) | |
%246 = torch.operator "aten.view_as_complex"(%245) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc10) | |
%int6_9 = torch.constant.int 6 loc(#loc11) | |
%false_10 = torch.constant.bool false loc(#loc11) | |
%false_11 = torch.constant.bool false loc(#loc11) | |
%none_12 = torch.constant.none loc(#loc) | |
%247 = torch.aten.to.dtype %231, %int6_9, %false_10, %false_11, %none_12 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,1,8,64],f32> loc(#loc11) | |
%int0_13 = torch.constant.int 0 loc(#loc11) | |
%248 = torch.aten.size.int %247, %int0_13 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%249 = torch.prim.NumToTensor.Scalar %248 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%250 = torch.aten.Int.Tensor %249 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_14 = torch.constant.int 1 loc(#loc11) | |
%251 = torch.aten.size.int %247, %int1_14 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%252 = torch.prim.NumToTensor.Scalar %251 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%253 = torch.aten.Int.Tensor %252 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int2_15 = torch.constant.int 2 loc(#loc11) | |
%254 = torch.aten.size.int %247, %int2_15 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.int loc(#loc11) | |
%255 = torch.prim.NumToTensor.Scalar %254 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%256 = torch.aten.Int.Tensor %255 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int-1_16 = torch.constant.int -1 loc(#loc11) | |
%int2_17 = torch.constant.int 2 loc(#loc11) | |
%257 = torch.prim.ListConstruct %250, %253, %256, %int-1_16, %int2_17 : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%258 = torch.aten.reshape %247, %257 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc11) | |
%259 = torch.operator "aten.view_as_complex"(%258) : (!torch.tensor<[1,1,8,32,2],f32>) -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc11) | |
%260 = torch.operator "aten.view_as_complex"(%186) : (!torch.tensor<[1,32,2],f32>) -> !torch.tensor<[1,32],complex<f64>> loc(#loc12) | |
%int1_18 = torch.constant.int 1 loc(#loc13) | |
%261 = torch.aten.size.int %246, %int1_18 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%262 = torch.prim.NumToTensor.Scalar %261 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%263 = torch.aten.Int.Tensor %262 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int3 = torch.constant.int 3 loc(#loc13) | |
%264 = torch.aten.size.int %246, %int3 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.int -> !torch.int loc(#loc13) | |
%265 = torch.prim.NumToTensor.Scalar %264 : !torch.int -> !torch.tensor<[],si64> loc(#loc) | |
%266 = torch.aten.Int.Tensor %265 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int1_19 = torch.constant.int 1 loc(#loc14) | |
%int1_20 = torch.constant.int 1 loc(#loc14) | |
%267 = torch.prim.ListConstruct %int1_19, %263, %int1_20, %266 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%268 = torch.aten.view %260, %267 : !torch.tensor<[1,32],complex<f64>>, !torch.list<int> -> !torch.tensor<[1,1,1,32],complex<f64>> loc(#loc14) | |
%269 = torch.aten.mul.Tensor %246, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc15) | |
%270 = torch.operator "aten.view_as_real"(%269) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc15) | |
%int3_21 = torch.constant.int 3 loc(#loc15) | |
%int-1_22 = torch.constant.int -1 loc(#loc15) | |
%271 = torch.aten.flatten.using_ints %270, %int3_21, %int-1_22 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc15) | |
%272 = torch.aten.mul.Tensor %259, %268 : !torch.tensor<[1,1,8,32],complex<f64>>, !torch.tensor<[1,1,1,32],complex<f64>> -> !torch.tensor<[1,1,8,32],complex<f64>> loc(#loc16) | |
%273 = torch.operator "aten.view_as_real"(%272) : (!torch.tensor<[1,1,8,32],complex<f64>>) -> !torch.tensor<[1,1,8,32,2],f32> loc(#loc16) | |
%int3_23 = torch.constant.int 3 loc(#loc16) | |
%int-1_24 = torch.constant.int -1 loc(#loc16) | |
%274 = torch.aten.flatten.using_ints %273, %int3_23, %int-1_24 : !torch.tensor<[1,1,8,32,2],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc16) | |
%275 = torch.aten.type_as %271, %234 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%276 = torch.aten.type_as %274, %247 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32> -> !torch.tensor<[1,1,8,64],f32> loc(#loc17) | |
%277 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%cpu = torch.constant.device "cpu" loc(#loc18) | |
%int6_25 = torch.constant.int 6 loc(#loc18) | |
%false_26 = torch.constant.bool false loc(#loc18) | |
%false_27 = torch.constant.bool false loc(#loc18) | |
%none_28 = torch.constant.none loc(#loc) | |
%278 = torch.aten.to.device %277, %cpu, %int6_25, %false_26, %false_27, %none_28 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc18) | |
%279 = torch.tensor.literal(dense_resource<__elided__> : tensor<32x1024x8x64xf32>) : !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%cpu_29 = torch.constant.device "cpu" loc(#loc19) | |
%int6_30 = torch.constant.int 6 loc(#loc19) | |
%false_31 = torch.constant.bool false loc(#loc19) | |
%false_32 = torch.constant.bool false loc(#loc19) | |
%none_33 = torch.constant.none loc(#loc) | |
%280 = torch.aten.to.device %279, %cpu_29, %int6_30, %false_31, %false_32, %none_33 : !torch.tensor<[32,1024,8,64],f32>, !torch.Device, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[32,1024,8,64],f32> loc(#loc19) | |
%281 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc20) | |
%int1_34 = torch.constant.int 1 loc(#loc20) | |
%282 = torch.aten.add.Tensor %217, %281, %int1_34 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc20) | |
%283 = torch.aten.Int.Tensor %282 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_35 = torch.constant.int 0 loc(#loc20) | |
%int0_36 = torch.constant.int 0 loc(#loc20) | |
%int1_37 = torch.constant.int 1 loc(#loc20) | |
%284 = torch.aten.slice.Tensor %278, %int0_35, %int0_36, %212, %int1_37 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc20) | |
%int1_38 = torch.constant.int 1 loc(#loc20) | |
%int1_39 = torch.constant.int 1 loc(#loc20) | |
%int1_40 = torch.constant.int 1 loc(#loc20) | |
%285 = torch.aten.slice.Tensor %284, %int1_38, %int1_39, %283, %int1_40 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%false_41 = torch.constant.bool false loc(#loc) | |
%286 = torch.aten.copy_ %285, %276, %false_41 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc20) | |
%287 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc21) | |
%int1_42 = torch.constant.int 1 loc(#loc21) | |
%288 = torch.aten.add.Tensor %217, %287, %int1_42 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc21) | |
%289 = torch.aten.Int.Tensor %288 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_43 = torch.constant.int 0 loc(#loc21) | |
%int0_44 = torch.constant.int 0 loc(#loc21) | |
%int1_45 = torch.constant.int 1 loc(#loc21) | |
%290 = torch.aten.slice.Tensor %280, %int0_43, %int0_44, %211, %int1_45 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc21) | |
%int1_46 = torch.constant.int 1 loc(#loc21) | |
%int1_47 = torch.constant.int 1 loc(#loc21) | |
%int1_48 = torch.constant.int 1 loc(#loc21) | |
%291 = torch.aten.slice.Tensor %290, %int1_46, %int1_47, %289, %int1_48 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%false_49 = torch.constant.bool false loc(#loc) | |
%292 = torch.aten.copy_ %291, %233, %false_49 : !torch.tensor<[1,1,8,64],f32>, !torch.tensor<[1,1,8,64],f32>, !torch.bool -> !torch.tensor<[1,1,8,64],f32> loc(#loc21) | |
%293 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc22) | |
%int1_50 = torch.constant.int 1 loc(#loc22) | |
%294 = torch.aten.add.Tensor %217, %293, %int1_50 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc22) | |
%295 = torch.aten.Int.Tensor %294 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_51 = torch.constant.int 0 loc(#loc22) | |
%int0_52 = torch.constant.int 0 loc(#loc22) | |
%int1_53 = torch.constant.int 1 loc(#loc22) | |
%296 = torch.aten.slice.Tensor %278, %int0_51, %int0_52, %210, %int1_53 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc22) | |
%int1_54 = torch.constant.int 1 loc(#loc22) | |
%int0_55 = torch.constant.int 0 loc(#loc22) | |
%int1_56 = torch.constant.int 1 loc(#loc22) | |
%297 = torch.aten.slice.Tensor %296, %int1_54, %int0_55, %295, %int1_56 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc22) | |
%298 = torch.tensor.literal(dense<1> : tensor<si64>) : !torch.tensor<[],si64> loc(#loc23) | |
%int1_57 = torch.constant.int 1 loc(#loc23) | |
%299 = torch.aten.add.Tensor %217, %298, %int1_57 : !torch.tensor<[],si64>, !torch.tensor<[],si64>, !torch.int -> !torch.tensor<[],si64> loc(#loc23) | |
%300 = torch.aten.Int.Tensor %299 : !torch.tensor<[],si64> -> !torch.int loc(#loc) | |
%int0_58 = torch.constant.int 0 loc(#loc23) | |
%int0_59 = torch.constant.int 0 loc(#loc23) | |
%int1_60 = torch.constant.int 1 loc(#loc23) | |
%301 = torch.aten.slice.Tensor %280, %int0_58, %int0_59, %209, %int1_60 : !torch.tensor<[32,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,1024,8,64],f32> loc(#loc23) | |
%int1_61 = torch.constant.int 1 loc(#loc23) | |
%int0_62 = torch.constant.int 0 loc(#loc23) | |
%int1_63 = torch.constant.int 1 loc(#loc23) | |
%302 = torch.aten.slice.Tensor %301, %int1_61, %int0_62, %300, %int1_63 : !torch.tensor<[1,1024,8,64],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.tensor<[1,2,8,64],f32> loc(#loc23) | |
%int1_64 = torch.constant.int 1 loc(#loc24) | |
%int2_65 = torch.constant.int 2 loc(#loc24) | |
%303 = torch.aten.transpose.int %275, %int1_64, %int2_65 : !torch.tensor<[1,1,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,1,64],f32> loc(#loc24) | |
%int1_66 = torch.constant.int 1 loc(#loc25) | |
%int2_67 = torch.constant.int 2 loc(#loc25) | |
%304 = torch.aten.transpose.int %297, %int1_66, %int2_67 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc25) | |
%int1_68 = torch.constant.int 1 loc(#loc26) | |
%int2_69 = torch.constant.int 2 loc(#loc26) | |
%305 = torch.aten.transpose.int %302, %int1_68, %int2_69 : !torch.tensor<[1,2,8,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,2,64],f32> loc(#loc26) | |
%int2_70 = torch.constant.int 2 loc(#loc27) | |
%int3_71 = torch.constant.int 3 loc(#loc27) | |
%306 = torch.aten.transpose.int %304, %int2_70, %int3_71 : !torch.tensor<[1,8,2,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,8,64,2],f32> loc(#loc27) | |
%307 = torch.aten.matmul %303, %306 : !torch.tensor<[1,8,1,64],f32>, !torch.tensor<[1,8,64,2],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%308 = torch.tensor.literal(dense<8.000000e+00> : tensor<f64>) : !torch.tensor<[],f64> loc(#loc27) | |
%309 = torch.aten.div.Tensor %307, %308 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[],f64> -> !torch.tensor<[1,8,1,2],f32> loc(#loc27) | |
%int6_72 = torch.constant.int 6 loc(#loc28) | |
%false_73 = torch.constant.bool false loc(#loc28) | |
%false_74 = torch.constant.bool false loc(#loc28) | |
%none_75 = torch.constant.none loc(#loc) | |
%310 = torch.aten.to.dtype %309, %int6_72, %false_73, %false_74, %none_75 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%int-1_76 = torch.constant.int -1 loc(#loc29) | |
%none_77 = torch.constant.none loc(#loc) | |
%311 = torch.aten.softmax.int %310, %int-1_76, %none_77 : !torch.tensor<[1,8,1,2],f32>, !torch.int, !torch.none -> !torch.tensor<[1,8,1,2],f32> loc(#loc29) | |
%312 = torch.aten.type_as %311, %303 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,1,64],f32> -> !torch.tensor<[1,8,1,2],f32> loc(#loc28) | |
%313 = torch.aten.matmul %312, %305 : !torch.tensor<[1,8,1,2],f32>, !torch.tensor<[1,8,2,64],f32> -> !torch.tensor<[1,8,1,64],f32> loc(#loc30) | |
%int1_78 = torch.constant.int 1 loc(#loc31) | |
%int2_79 = torch.constant.int 2 loc(#loc31) | |
%314 = torch.aten.transpose.int %313, %int1_78, %int2_79 : !torch.tensor<[1,8,1,64],f32>, !torch.int, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc31) | |
%int0_80 = torch.constant.int 0 loc(#loc32) | |
%315 = torch.aten.contiguous %314, %int0_80 : !torch.tensor<[1,1,8,64],f32>, !torch.int -> !torch.tensor<[1,1,8,64],f32> loc(#loc32) | |
%int-1_81 = torch.constant.int -1 loc(#loc32) | |
%316 = torch.prim.ListConstruct %208, %218, %int-1_81 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc) | |
%317 = torch.aten.view %315, %316 : !torch.tensor<[1,1,8,64],f32>, !torch.list<int> -> !torch.tensor<[1,1,512],f32> loc(#loc32) | |
%318 = torch.tensor_static_info_cast %317 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc32) | |
%319 = torch.prim.CallMethod %195["forward"] (%318) : !torch.nn.Module<"__torch__.torch.nn.modules.linear.___torch_mangle_306.Linear">, (!torch.tensor) -> !torch.tensor loc(#loc) | |
%int1_82 = torch.constant.int 1 loc(#loc33) | |
%320 = torch.aten.add.Tensor %205#1, %319, %int1_82 : !torch.tensor<[1,1,512],f32>, !torch.tensor, !torch.int -> !torch.tensor<[1,1,512],f32> loc(#loc33) | |
%321 = torch.tensor_static_info_cast %320 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc33) | |
%322 = torch.prim.CallMethod %193["forward"] (%321) : !torch.nn.Module<"__torch__.model.___torch_mangle_314.RMSNorm">, (!torch.tensor) -> !torch.tuple<tensor, tensor> loc(#loc) | |
%323:2 = torch.prim.TupleUnpack %322 : !torch.tuple<tensor, tensor> -> !torch.tensor<[1,1,512],f32>, !torch.tensor<[1,1,512],f32> loc(#loc) | |
%324 = torch.tensor_static_info_cast %323#0 : !torch.tensor<[1,1,512],f32> to !torch.tensor loc(#loc) | |
%325 = torch.prim.CallMethod %192 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment