Created
September 23, 2022 17:22
-
-
Save pashu123/241e774c191d40de3f4666bfd0f40331 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#loc0 = loc(unknown) | |
module attributes {torch.debug_module_name = "_lambda"} { | |
func.func @forward(%arg0: !torch.vtensor<[1,4,64,64],f32> loc(unknown)) -> !torch.vtensor<[1,3,512,512],f32> { | |
%int1048576 = torch.constant.int 1048576 loc(#loc1) | |
%int262144 = torch.constant.int 262144 loc(#loc2) | |
%int4 = torch.constant.int 4 loc(#loc3) | |
%int2097152 = torch.constant.int 2097152 loc(#loc4) | |
%int8 = torch.constant.int 8 loc(#loc5) | |
%int524288 = torch.constant.int 524288 loc(#loc6) | |
%int65536 = torch.constant.int 65536 loc(#loc7) | |
%int16 = torch.constant.int 16 loc(#loc8) | |
%int16384 = torch.constant.int 16384 loc(#loc9) | |
%int4096 = torch.constant.int 4096 loc(#loc10) | |
%int6 = torch.constant.int 6 loc(#loc11) | |
%int7 = torch.constant.int 7 loc(#loc11) | |
%0 = torch.vtensor.literal(dense<[0.00514700357, -0.0276389793, -0.0438199453]> : tensor<3xf32>) : !torch.vtensor<[3],f32> loc(#loc0) | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3x128x3x3xf32>) : !torch.vtensor<[3,128,3,3],f32> loc(#loc0) | |
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32> loc(#loc0) | |
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32> loc(#loc0) | |
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32> loc(#loc0) | |
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32> loc(#loc0) | |
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x1x1xf32>) : !torch.vtensor<[128,256,1,1],f32> loc(#loc0) | |
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32> loc(#loc0) | |
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> loc(#loc0) | |
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x3x3xf32>) : !torch.vtensor<[128,256,3,3],f32> loc(#loc0) | |
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x1xf32>) : !torch.vtensor<[256,512,1,1],f32> loc(#loc0) | |
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32> loc(#loc0) | |
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> loc(#loc0) | |
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x3xf32>) : !torch.vtensor<[256,512,3,3],f32> loc(#loc0) | |
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc0) | |
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc0) | |
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc0) | |
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512xf32>) : !torch.vtensor<[512,512],f32> loc(#loc0) | |
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32> loc(#loc0) | |
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> loc(#loc0) | |
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x4x3x3xf32>) : !torch.vtensor<[512,4,3,3],f32> loc(#loc0) | |
%138 = torch.vtensor.literal(dense<[-0.0257531833, -0.101349898, -0.213674963, 0.187002152]> : tensor<4xf32>) : !torch.vtensor<[4],f32> loc(#loc0) | |
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4x4x1x1xf32>) : !torch.vtensor<[4,4,1,1],f32> loc(#loc0) | |
%int256 = torch.constant.int 256 loc(#loc12) | |
%int128 = torch.constant.int 128 loc(#loc13) | |
%float2.000000e00 = torch.constant.float 2.000000e+00 loc(#loc14) | |
%float1.000000e00 = torch.constant.float 1.000000e+00 loc(#loc15) | |
%float2.102240e-01 = torch.constant.float 0.21022410381342865 loc(#loc16) | |
%int64 = torch.constant.int 64 loc(#loc17) | |
%int512 = torch.constant.int 512 loc(#loc18) | |
%float9.999990e-07 = torch.constant.float 9.9999999999999995E-7 loc(#loc19) | |
%int3 = torch.constant.int 3 loc(#loc20) | |
%int2 = torch.constant.int 2 loc(#loc21) | |
%int32 = torch.constant.int 32 loc(#loc22) | |
%int0 = torch.constant.int 0 loc(#loc23) | |
%int1 = torch.constant.int 1 loc(#loc24) | |
%false = torch.constant.bool false loc(#loc25) | |
%true = torch.constant.bool true loc(#loc26) | |
%none = torch.constant.none loc(#loc0) | |
%int-1 = torch.constant.int -1 loc(#loc27) | |
%int-2 = torch.constant.int -2 loc(#loc28) | |
%140 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0) | |
%141 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0) | |
%142 = torch.aten.convolution %arg0, %139, %138, %140, %141, %140, %false, %141, %int1 : !torch.vtensor<[1,4,64,64],f32>, !torch.vtensor<[4,4,1,1],f32>, !torch.vtensor<[4],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,4,64,64],f32> loc(#loc29) | |
%143 = torch.aten.convolution %142, %137, %136, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,4,64,64],f32>, !torch.vtensor<[512,4,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc30) | |
%144 = torch.prim.ListConstruct %int1, %int32, %int16, %int4096 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc31) | |
%145 = torch.aten.view %143, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc32) | |
%146 = torch.prim.ListConstruct %int2, %int3 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc0) | |
%147 = torch.aten.to.dtype %145, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc11) | |
%148 = torch.aten.sum.dim_IntList %147, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc11) | |
%149 = torch.aten.div.Scalar %148, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc11) | |
%150 = torch.aten.sub.Tensor %147, %149, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc11) | |
%151 = torch.aten.mul.Tensor %150, %150 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc11) | |
%152 = torch.aten.sum.dim_IntList %151, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc11) | |
%153 = torch.aten.div.Scalar %152, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc11) | |
%154 = torch.aten.to.dtype %153, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc11) | |
%155 = torch.aten.sum.dim_IntList %145, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc33) | |
%156 = torch.aten.div.Scalar %155, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc33) | |
%157 = torch.aten.add.Scalar %154, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc34) | |
%158 = torch.aten.rsqrt %157 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc35) | |
%159 = torch.aten.sub.Tensor %145, %156, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc36) | |
%160 = torch.aten.mul.Tensor %159, %158 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc37) | |
%161 = torch.prim.ListConstruct %int1, %int512, %int64, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc38) | |
%162 = torch.aten.view %160, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc39) | |
%163 = torch.aten.unsqueeze %135, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc40) | |
%164 = torch.aten.unsqueeze %163, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc41) | |
%165 = torch.aten.mul.Tensor %162, %164 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc42) | |
%166 = torch.aten.unsqueeze %134, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc43) | |
%167 = torch.aten.unsqueeze %166, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc44) | |
%168 = torch.aten.add.Tensor %165, %167, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc45) | |
%169 = torch.aten.sigmoid %168 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc46) | |
%170 = torch.aten.mul.Tensor %169, %168 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc46) | |
%171 = torch.aten.convolution %170, %133, %132, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc47) | |
%172 = torch.aten.view %171, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc48) | |
%173 = torch.aten.to.dtype %172, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc49) | |
%174 = torch.aten.sum.dim_IntList %173, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc49) | |
%175 = torch.aten.div.Scalar %174, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc49) | |
%176 = torch.aten.sub.Tensor %173, %175, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc49) | |
%177 = torch.aten.mul.Tensor %176, %176 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc49) | |
%178 = torch.aten.sum.dim_IntList %177, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc49) | |
%179 = torch.aten.div.Scalar %178, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc49) | |
%180 = torch.aten.to.dtype %179, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc49) | |
%181 = torch.aten.sum.dim_IntList %172, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc50) | |
%182 = torch.aten.div.Scalar %181, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc50) | |
%183 = torch.aten.add.Scalar %180, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc51) | |
%184 = torch.aten.rsqrt %183 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc52) | |
%185 = torch.aten.sub.Tensor %172, %182, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc53) | |
%186 = torch.aten.mul.Tensor %185, %184 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc54) | |
%187 = torch.aten.view %186, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc55) | |
%188 = torch.aten.unsqueeze %131, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc56) | |
%189 = torch.aten.unsqueeze %188, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc57) | |
%190 = torch.aten.mul.Tensor %187, %189 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc58) | |
%191 = torch.aten.unsqueeze %130, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc59) | |
%192 = torch.aten.unsqueeze %191, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc60) | |
%193 = torch.aten.add.Tensor %190, %192, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc61) | |
%194 = torch.aten.sigmoid %193 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc62) | |
%195 = torch.aten.mul.Tensor %194, %193 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc62) | |
%196 = torch.aten.convolution %195, %129, %128, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc63) | |
%197 = torch.aten.add.Tensor %143, %196, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc64) | |
%198 = torch.aten.div.Scalar %197, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc65) | |
%199 = torch.aten.view %198, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc66) | |
%200 = torch.aten.to.dtype %199, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc67) | |
%201 = torch.aten.sum.dim_IntList %200, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc67) | |
%202 = torch.aten.div.Scalar %201, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc67) | |
%203 = torch.aten.sub.Tensor %200, %202, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc67) | |
%204 = torch.aten.mul.Tensor %203, %203 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc67) | |
%205 = torch.aten.sum.dim_IntList %204, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc67) | |
%206 = torch.aten.div.Scalar %205, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc67) | |
%207 = torch.aten.to.dtype %206, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc67) | |
%208 = torch.aten.sum.dim_IntList %199, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc68) | |
%209 = torch.aten.div.Scalar %208, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc68) | |
%210 = torch.aten.add.Scalar %207, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc69) | |
%211 = torch.aten.rsqrt %210 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc70) | |
%212 = torch.aten.sub.Tensor %199, %209, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc71) | |
%213 = torch.aten.mul.Tensor %212, %211 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc72) | |
%214 = torch.aten.view %213, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc73) | |
%215 = torch.aten.unsqueeze %127, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc74) | |
%216 = torch.aten.unsqueeze %215, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc75) | |
%217 = torch.aten.mul.Tensor %214, %216 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc76) | |
%218 = torch.aten.unsqueeze %126, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc77) | |
%219 = torch.aten.unsqueeze %218, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc78) | |
%220 = torch.aten.add.Tensor %217, %219, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc79) | |
%221 = torch.prim.ListConstruct %int1, %int512, %int4096 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc80) | |
%222 = torch.aten.view %220, %221 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,512,4096],f32> loc(#loc81) | |
%223 = torch.aten.transpose.int %222, %int1, %int2 : !torch.vtensor<[1,512,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,4096,512],f32> loc(#loc82) | |
%224 = torch.aten.transpose.int %125, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc83) | |
%225 = torch.prim.ListConstruct %int1, %int4096, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc84) | |
%226 = torch.aten.broadcast_to %223, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc85) | |
%227 = torch.aten.view %226, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc86) | |
%228 = torch.prim.ListConstruct %int1, %int512, %int512 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc87) | |
%229 = torch.aten.broadcast_to %224, %228 : !torch.vtensor<[512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc88) | |
%230 = torch.aten.view %229, %228 : !torch.vtensor<[1,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc89) | |
%231 = torch.aten.bmm %227, %230 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[1,512,512],f32> -> !torch.vtensor<[1,4096,512],f32> loc(#loc90) | |
%232 = torch.aten.view %231, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc91) | |
%233 = torch.aten.add.Tensor %232, %124, %int1 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[1,4096,512],f32> loc(#loc92) | |
%234 = torch.aten.transpose.int %123, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc93) | |
%235 = torch.aten.broadcast_to %223, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc94) | |
%236 = torch.aten.view %235, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc95) | |
%237 = torch.aten.broadcast_to %234, %228 : !torch.vtensor<[512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc96) | |
%238 = torch.aten.view %237, %228 : !torch.vtensor<[1,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc97) | |
%239 = torch.aten.bmm %236, %238 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[1,512,512],f32> -> !torch.vtensor<[1,4096,512],f32> loc(#loc98) | |
%240 = torch.aten.view %239, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc99) | |
%241 = torch.aten.add.Tensor %240, %122, %int1 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[1,4096,512],f32> loc(#loc100) | |
%242 = torch.aten.transpose.int %121, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc101) | |
%243 = torch.aten.broadcast_to %223, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc102) | |
%244 = torch.aten.view %243, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc103) | |
%245 = torch.aten.broadcast_to %242, %228 : !torch.vtensor<[512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc104) | |
%246 = torch.aten.view %245, %228 : !torch.vtensor<[1,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,512,512],f32> loc(#loc105) | |
%247 = torch.aten.bmm %244, %246 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[1,512,512],f32> -> !torch.vtensor<[1,4096,512],f32> loc(#loc106) | |
%248 = torch.aten.view %247, %225 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc107) | |
%249 = torch.aten.add.Tensor %248, %120, %int1 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[1,4096,512],f32> loc(#loc108) | |
%250 = torch.prim.ListConstruct %int1, %int4096, %int1, %int-1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc0) | |
%251 = torch.aten.view %233, %250 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,1,512],f32> loc(#loc109) | |
%252 = torch.prim.ListConstruct %int0, %int2, %int1, %int3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc0) | |
%253 = torch.aten.permute %251, %252 : !torch.vtensor<[1,4096,1,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc110) | |
%254 = torch.aten.view %241, %250 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,1,512],f32> loc(#loc111) | |
%255 = torch.aten.permute %254, %252 : !torch.vtensor<[1,4096,1,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc112) | |
%256 = torch.aten.view %249, %250 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,1,512],f32> loc(#loc113) | |
%257 = torch.aten.permute %256, %252 : !torch.vtensor<[1,4096,1,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc114) | |
%258 = torch.aten.mul.Scalar %253, %float2.102240e-01 : !torch.vtensor<[1,1,4096,512],f32>, !torch.float -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc115) | |
%259 = torch.aten.transpose.int %255, %int-1, %int-2 : !torch.vtensor<[1,1,4096,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,1,512,4096],f32> loc(#loc116) | |
%260 = torch.aten.mul.Scalar %259, %float2.102240e-01 : !torch.vtensor<[1,1,512,4096],f32>, !torch.float -> !torch.vtensor<[1,1,512,4096],f32> loc(#loc117) | |
%261 = torch.prim.ListConstruct %int1, %int1, %int4096, %int512 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc118) | |
%262 = torch.aten.broadcast_to %258, %261 : !torch.vtensor<[1,1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc119) | |
%263 = torch.aten.view %262, %225 : !torch.vtensor<[1,1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc120) | |
%264 = torch.prim.ListConstruct %int1, %int1, %int512, %int4096 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc121) | |
%265 = torch.aten.broadcast_to %260, %264 : !torch.vtensor<[1,1,512,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,1,512,4096],f32> loc(#loc122) | |
%266 = torch.aten.view %265, %221 : !torch.vtensor<[1,1,512,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,4096],f32> loc(#loc123) | |
%267 = torch.aten.bmm %263, %266 : !torch.vtensor<[1,4096,512],f32>, !torch.vtensor<[1,512,4096],f32> -> !torch.vtensor<[1,4096,4096],f32> loc(#loc124) | |
%268 = torch.prim.ListConstruct %int1, %int1, %int4096, %int4096 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc125) | |
%269 = torch.aten.view %267, %268 : !torch.vtensor<[1,4096,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,4096],f32> loc(#loc126) | |
%values, %indices = torch.aten.max.dim %269, %int-1, %true : !torch.vtensor<[1,1,4096,4096],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,1,4096,1],f32>, !torch.vtensor<[1,1,4096,1],si64> loc(#loc127) | |
%270 = torch.aten.sub.Tensor %269, %values, %float1.000000e00 : !torch.vtensor<[1,1,4096,4096],f32>, !torch.vtensor<[1,1,4096,1],f32>, !torch.float -> !torch.vtensor<[1,1,4096,4096],f32> loc(#loc127) | |
%271 = torch.aten.exp %270 : !torch.vtensor<[1,1,4096,4096],f32> -> !torch.vtensor<[1,1,4096,4096],f32> loc(#loc127) | |
%272 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> loc(#loc127) | |
%273 = torch.aten.sum.dim_IntList %271, %272, %true, %none : !torch.vtensor<[1,1,4096,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,1,4096,1],f32> loc(#loc127) | |
%274 = torch.aten.div.Tensor %271, %273 : !torch.vtensor<[1,1,4096,4096],f32>, !torch.vtensor<[1,1,4096,1],f32> -> !torch.vtensor<[1,1,4096,4096],f32> loc(#loc127) | |
%275 = torch.aten.broadcast_to %274, %268 : !torch.vtensor<[1,1,4096,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,4096],f32> loc(#loc128) | |
%276 = torch.prim.ListConstruct %int1, %int4096, %int4096 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc129) | |
%277 = torch.aten.view %275, %276 : !torch.vtensor<[1,1,4096,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,4096],f32> loc(#loc130) | |
%278 = torch.aten.broadcast_to %257, %261 : !torch.vtensor<[1,1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc131) | |
%279 = torch.aten.view %278, %225 : !torch.vtensor<[1,1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc132) | |
%280 = torch.aten.bmm %277, %279 : !torch.vtensor<[1,4096,4096],f32>, !torch.vtensor<[1,4096,512],f32> -> !torch.vtensor<[1,4096,512],f32> loc(#loc133) | |
%281 = torch.aten.view %280, %261 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,1,4096,512],f32> loc(#loc134) | |
%282 = torch.aten.permute %281, %252 : !torch.vtensor<[1,1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,1,512],f32> loc(#loc135) | |
%283 = torch.aten.view %282, %225 : !torch.vtensor<[1,4096,1,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc136) | |
%284 = torch.aten.transpose.int %119, %int0, %int1 : !torch.vtensor<[512,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[512,512],f32> loc(#loc137) | |
%285 = torch.prim.ListConstruct %int4096, %int512 : (!torch.int, !torch.int) -> !torch.list<int> loc(#loc138) | |
%286 = torch.aten.view %283, %285 : !torch.vtensor<[1,4096,512],f32>, !torch.list<int> -> !torch.vtensor<[4096,512],f32> loc(#loc139) | |
%287 = torch.aten.mm %286, %284 : !torch.vtensor<[4096,512],f32>, !torch.vtensor<[512,512],f32> -> !torch.vtensor<[4096,512],f32> loc(#loc140) | |
%288 = torch.aten.mul.Scalar %118, %int1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512],f32> loc(#loc140) | |
%289 = torch.aten.add.Tensor %288, %287, %int1 : !torch.vtensor<[512],f32>, !torch.vtensor<[4096,512],f32>, !torch.int -> !torch.vtensor<[4096,512],f32> loc(#loc140) | |
%290 = torch.aten.view %289, %225 : !torch.vtensor<[4096,512],f32>, !torch.list<int> -> !torch.vtensor<[1,4096,512],f32> loc(#loc141) | |
%291 = torch.aten.transpose.int %290, %int-1, %int-2 : !torch.vtensor<[1,4096,512],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,512,4096],f32> loc(#loc142) | |
%292 = torch.aten.view %291, %161 : !torch.vtensor<[1,512,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc143) | |
%293 = torch.aten.add.Tensor %292, %198, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc144) | |
%294 = torch.aten.div.Scalar %293, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc145) | |
%295 = torch.aten.clone %294, %int0 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc146) | |
%296 = torch.aten.view %295, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc147) | |
%297 = torch.aten.to.dtype %296, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc148) | |
%298 = torch.aten.sum.dim_IntList %297, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc148) | |
%299 = torch.aten.div.Scalar %298, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc148) | |
%300 = torch.aten.sub.Tensor %297, %299, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc148) | |
%301 = torch.aten.mul.Tensor %300, %300 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc148) | |
%302 = torch.aten.sum.dim_IntList %301, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc148) | |
%303 = torch.aten.div.Scalar %302, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc148) | |
%304 = torch.aten.to.dtype %303, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc148) | |
%305 = torch.aten.sum.dim_IntList %296, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc149) | |
%306 = torch.aten.div.Scalar %305, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc149) | |
%307 = torch.aten.add.Scalar %304, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc150) | |
%308 = torch.aten.rsqrt %307 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc151) | |
%309 = torch.aten.sub.Tensor %296, %306, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc152) | |
%310 = torch.aten.mul.Tensor %309, %308 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc153) | |
%311 = torch.aten.view %310, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc154) | |
%312 = torch.aten.unsqueeze %117, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc155) | |
%313 = torch.aten.unsqueeze %312, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc156) | |
%314 = torch.aten.mul.Tensor %311, %313 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc157) | |
%315 = torch.aten.unsqueeze %116, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc158) | |
%316 = torch.aten.unsqueeze %315, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc159) | |
%317 = torch.aten.add.Tensor %314, %316, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc160) | |
%318 = torch.aten.sigmoid %317 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc161) | |
%319 = torch.aten.mul.Tensor %318, %317 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc161) | |
%320 = torch.aten.convolution %319, %115, %114, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc162) | |
%321 = torch.aten.view %320, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc163) | |
%322 = torch.aten.to.dtype %321, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc164) | |
%323 = torch.aten.sum.dim_IntList %322, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc164) | |
%324 = torch.aten.div.Scalar %323, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc164) | |
%325 = torch.aten.sub.Tensor %322, %324, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc164) | |
%326 = torch.aten.mul.Tensor %325, %325 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc164) | |
%327 = torch.aten.sum.dim_IntList %326, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc164) | |
%328 = torch.aten.div.Scalar %327, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc164) | |
%329 = torch.aten.to.dtype %328, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc164) | |
%330 = torch.aten.sum.dim_IntList %321, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc165) | |
%331 = torch.aten.div.Scalar %330, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc165) | |
%332 = torch.aten.add.Scalar %329, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc166) | |
%333 = torch.aten.rsqrt %332 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc167) | |
%334 = torch.aten.sub.Tensor %321, %331, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc168) | |
%335 = torch.aten.mul.Tensor %334, %333 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc169) | |
%336 = torch.aten.view %335, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc170) | |
%337 = torch.aten.unsqueeze %113, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc171) | |
%338 = torch.aten.unsqueeze %337, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc172) | |
%339 = torch.aten.mul.Tensor %336, %338 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc173) | |
%340 = torch.aten.unsqueeze %112, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc174) | |
%341 = torch.aten.unsqueeze %340, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc175) | |
%342 = torch.aten.add.Tensor %339, %341, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc176) | |
%343 = torch.aten.sigmoid %342 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc177) | |
%344 = torch.aten.mul.Tensor %343, %342 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc177) | |
%345 = torch.aten.convolution %344, %111, %110, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc178) | |
%346 = torch.aten.add.Tensor %294, %345, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc179) | |
%347 = torch.aten.div.Scalar %346, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc180) | |
%348 = torch.aten.clone %347, %int0 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc181) | |
%349 = torch.aten.view %348, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc182) | |
%350 = torch.aten.to.dtype %349, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc183) | |
%351 = torch.aten.sum.dim_IntList %350, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc183) | |
%352 = torch.aten.div.Scalar %351, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc183) | |
%353 = torch.aten.sub.Tensor %350, %352, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc183) | |
%354 = torch.aten.mul.Tensor %353, %353 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc183) | |
%355 = torch.aten.sum.dim_IntList %354, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc183) | |
%356 = torch.aten.div.Scalar %355, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc183) | |
%357 = torch.aten.to.dtype %356, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc183) | |
%358 = torch.aten.sum.dim_IntList %349, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc184) | |
%359 = torch.aten.div.Scalar %358, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc184) | |
%360 = torch.aten.add.Scalar %357, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc185) | |
%361 = torch.aten.rsqrt %360 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc186) | |
%362 = torch.aten.sub.Tensor %349, %359, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc187) | |
%363 = torch.aten.mul.Tensor %362, %361 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc188) | |
%364 = torch.aten.view %363, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc189) | |
%365 = torch.aten.unsqueeze %109, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc190) | |
%366 = torch.aten.unsqueeze %365, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc191) | |
%367 = torch.aten.mul.Tensor %364, %366 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc192) | |
%368 = torch.aten.unsqueeze %108, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc193) | |
%369 = torch.aten.unsqueeze %368, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc194) | |
%370 = torch.aten.add.Tensor %367, %369, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc195) | |
%371 = torch.aten.sigmoid %370 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc196) | |
%372 = torch.aten.mul.Tensor %371, %370 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc196) | |
%373 = torch.aten.convolution %372, %107, %106, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc197) | |
%374 = torch.aten.view %373, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc198) | |
%375 = torch.aten.to.dtype %374, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc199) | |
%376 = torch.aten.sum.dim_IntList %375, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc199) | |
%377 = torch.aten.div.Scalar %376, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc199) | |
%378 = torch.aten.sub.Tensor %375, %377, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc199) | |
%379 = torch.aten.mul.Tensor %378, %378 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc199) | |
%380 = torch.aten.sum.dim_IntList %379, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc199) | |
%381 = torch.aten.div.Scalar %380, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc199) | |
%382 = torch.aten.to.dtype %381, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc199) | |
%383 = torch.aten.sum.dim_IntList %374, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc200) | |
%384 = torch.aten.div.Scalar %383, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc200) | |
%385 = torch.aten.add.Scalar %382, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc201) | |
%386 = torch.aten.rsqrt %385 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc202) | |
%387 = torch.aten.sub.Tensor %374, %384, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc203) | |
%388 = torch.aten.mul.Tensor %387, %386 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc204) | |
%389 = torch.aten.view %388, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc205) | |
%390 = torch.aten.unsqueeze %105, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc206) | |
%391 = torch.aten.unsqueeze %390, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc207) | |
%392 = torch.aten.mul.Tensor %389, %391 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc208) | |
%393 = torch.aten.unsqueeze %104, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc209) | |
%394 = torch.aten.unsqueeze %393, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc210) | |
%395 = torch.aten.add.Tensor %392, %394, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc211) | |
%396 = torch.aten.sigmoid %395 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc212) | |
%397 = torch.aten.mul.Tensor %396, %395 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc212) | |
%398 = torch.aten.convolution %397, %103, %102, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc213) | |
%399 = torch.aten.add.Tensor %347, %398, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc214) | |
%400 = torch.aten.div.Scalar %399, %float1.000000e00 : !torch.vtensor<[1,512,64,64],f32>, !torch.float -> !torch.vtensor<[1,512,64,64],f32> loc(#loc215) | |
%401 = torch.aten.clone %400, %int0 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc216) | |
%402 = torch.aten.view %401, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc217) | |
%403 = torch.aten.to.dtype %402, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc218) | |
%404 = torch.aten.sum.dim_IntList %403, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc218) | |
%405 = torch.aten.div.Scalar %404, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc218) | |
%406 = torch.aten.sub.Tensor %403, %405, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc218) | |
%407 = torch.aten.mul.Tensor %406, %406 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc218) | |
%408 = torch.aten.sum.dim_IntList %407, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc218) | |
%409 = torch.aten.div.Scalar %408, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc218) | |
%410 = torch.aten.to.dtype %409, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc218) | |
%411 = torch.aten.sum.dim_IntList %402, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc219) | |
%412 = torch.aten.div.Scalar %411, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc219) | |
%413 = torch.aten.add.Scalar %410, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc220) | |
%414 = torch.aten.rsqrt %413 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc221) | |
%415 = torch.aten.sub.Tensor %402, %412, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc222) | |
%416 = torch.aten.mul.Tensor %415, %414 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc223) | |
%417 = torch.aten.view %416, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc224) | |
%418 = torch.aten.unsqueeze %101, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc225) | |
%419 = torch.aten.unsqueeze %418, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc226) | |
%420 = torch.aten.mul.Tensor %417, %419 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc227) | |
%421 = torch.aten.unsqueeze %100, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc228) | |
%422 = torch.aten.unsqueeze %421, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc229) | |
%423 = torch.aten.add.Tensor %420, %422, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc230) | |
%424 = torch.aten.sigmoid %423 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc231) | |
%425 = torch.aten.mul.Tensor %424, %423 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc231) | |
%426 = torch.aten.convolution %425, %99, %98, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc232) | |
%427 = torch.aten.view %426, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc233) | |
%428 = torch.aten.to.dtype %427, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc234) | |
%429 = torch.aten.sum.dim_IntList %428, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc234) | |
%430 = torch.aten.div.Scalar %429, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc234) | |
%431 = torch.aten.sub.Tensor %428, %430, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc234) | |
%432 = torch.aten.mul.Tensor %431, %431 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc234) | |
%433 = torch.aten.sum.dim_IntList %432, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc234) | |
%434 = torch.aten.div.Scalar %433, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc234) | |
%435 = torch.aten.to.dtype %434, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc234) | |
%436 = torch.aten.sum.dim_IntList %427, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc235) | |
%437 = torch.aten.div.Scalar %436, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc235) | |
%438 = torch.aten.add.Scalar %435, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc236) | |
%439 = torch.aten.rsqrt %438 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc237) | |
%440 = torch.aten.sub.Tensor %427, %437, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc238) | |
%441 = torch.aten.mul.Tensor %440, %439 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc239) | |
%442 = torch.aten.view %441, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc240) | |
%443 = torch.aten.unsqueeze %97, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc241) | |
%444 = torch.aten.unsqueeze %443, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc242) | |
%445 = torch.aten.mul.Tensor %442, %444 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc243) | |
%446 = torch.aten.unsqueeze %96, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc244) | |
%447 = torch.aten.unsqueeze %446, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc245) | |
%448 = torch.aten.add.Tensor %445, %447, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc246) | |
%449 = torch.aten.sigmoid %448 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc247) | |
%450 = torch.aten.mul.Tensor %449, %448 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc247) | |
%451 = torch.aten.convolution %450, %95, %94, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc248) | |
%452 = torch.aten.add.Tensor %400, %451, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc249) | |
%453 = torch.aten.div.Scalar %452, %float1.000000e00 : !torch.vtensor<[1,512,64,64],f32>, !torch.float -> !torch.vtensor<[1,512,64,64],f32> loc(#loc250) | |
%454 = torch.aten.clone %453, %int0 : !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc251) | |
%455 = torch.aten.view %454, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc252) | |
%456 = torch.aten.to.dtype %455, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc253) | |
%457 = torch.aten.sum.dim_IntList %456, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc253) | |
%458 = torch.aten.div.Scalar %457, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc253) | |
%459 = torch.aten.sub.Tensor %456, %458, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc253) | |
%460 = torch.aten.mul.Tensor %459, %459 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc253) | |
%461 = torch.aten.sum.dim_IntList %460, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc253) | |
%462 = torch.aten.div.Scalar %461, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc253) | |
%463 = torch.aten.to.dtype %462, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc253) | |
%464 = torch.aten.sum.dim_IntList %455, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc254) | |
%465 = torch.aten.div.Scalar %464, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc254) | |
%466 = torch.aten.add.Scalar %463, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc255) | |
%467 = torch.aten.rsqrt %466 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc256) | |
%468 = torch.aten.sub.Tensor %455, %465, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc257) | |
%469 = torch.aten.mul.Tensor %468, %467 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc258) | |
%470 = torch.aten.view %469, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc259) | |
%471 = torch.aten.unsqueeze %93, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc260) | |
%472 = torch.aten.unsqueeze %471, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc261) | |
%473 = torch.aten.mul.Tensor %470, %472 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc262) | |
%474 = torch.aten.unsqueeze %92, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc263) | |
%475 = torch.aten.unsqueeze %474, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc264) | |
%476 = torch.aten.add.Tensor %473, %475, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc265) | |
%477 = torch.aten.sigmoid %476 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc266) | |
%478 = torch.aten.mul.Tensor %477, %476 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc266) | |
%479 = torch.aten.convolution %478, %91, %90, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc267) | |
%480 = torch.aten.view %479, %144 : !torch.vtensor<[1,512,64,64],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc268) | |
%481 = torch.aten.to.dtype %480, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc269) | |
%482 = torch.aten.sum.dim_IntList %481, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc269) | |
%483 = torch.aten.div.Scalar %482, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc269) | |
%484 = torch.aten.sub.Tensor %481, %483, %float1.000000e00 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc269) | |
%485 = torch.aten.mul.Tensor %484, %484 : !torch.vtensor<[1,32,16,4096],f64>, !torch.vtensor<[1,32,16,4096],f64> -> !torch.vtensor<[1,32,16,4096],f64> loc(#loc269) | |
%486 = torch.aten.sum.dim_IntList %485, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc269) | |
%487 = torch.aten.div.Scalar %486, %int65536 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc269) | |
%488 = torch.aten.to.dtype %487, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc269) | |
%489 = torch.aten.sum.dim_IntList %480, %146, %true, %none : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc270) | |
%490 = torch.aten.div.Scalar %489, %int65536 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc270) | |
%491 = torch.aten.add.Scalar %488, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc271) | |
%492 = torch.aten.rsqrt %491 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc272) | |
%493 = torch.aten.sub.Tensor %480, %490, %int1 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc273) | |
%494 = torch.aten.mul.Tensor %493, %492 : !torch.vtensor<[1,32,16,4096],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,4096],f32> loc(#loc274) | |
%495 = torch.aten.view %494, %161 : !torch.vtensor<[1,32,16,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc275) | |
%496 = torch.aten.unsqueeze %89, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc276) | |
%497 = torch.aten.unsqueeze %496, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc277) | |
%498 = torch.aten.mul.Tensor %495, %497 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc278) | |
%499 = torch.aten.unsqueeze %88, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc279) | |
%500 = torch.aten.unsqueeze %499, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc280) | |
%501 = torch.aten.add.Tensor %498, %500, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc281) | |
%502 = torch.aten.sigmoid %501 : !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc282) | |
%503 = torch.aten.mul.Tensor %502, %501 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32> -> !torch.vtensor<[1,512,64,64],f32> loc(#loc282) | |
%504 = torch.aten.convolution %503, %87, %86, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc283) | |
%505 = torch.aten.add.Tensor %453, %504, %int1 : !torch.vtensor<[1,512,64,64],f32>, !torch.vtensor<[1,512,64,64],f32>, !torch.int -> !torch.vtensor<[1,512,64,64],f32> loc(#loc284) | |
%506 = torch.aten.div.Scalar %505, %float1.000000e00 : !torch.vtensor<[1,512,64,64],f32>, !torch.float -> !torch.vtensor<[1,512,64,64],f32> loc(#loc285) | |
%507 = torch.prim.ListConstruct %float2.000000e00, %float2.000000e00 : (!torch.float, !torch.float) -> !torch.list<float> loc(#loc0) | |
%508 = torch.aten.upsample_nearest2d.vec %506, %none, %507 : !torch.vtensor<[1,512,64,64],f32>, !torch.none, !torch.list<float> -> !torch.vtensor<[1,512,?,?],f32> loc(#loc286) | |
%509 = torch.aten.convolution %508, %85, %84, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,?,?],f32> loc(#loc287) | |
%510 = torch.prim.ListConstruct %int1, %int32, %int16, %int16384 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc288) | |
%511 = torch.aten.view %509, %510 : !torch.vtensor<[1,512,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc289) | |
%512 = torch.aten.to.dtype %511, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc290) | |
%513 = torch.aten.sum.dim_IntList %512, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc290) | |
%514 = torch.aten.div.Scalar %513, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc290) | |
%515 = torch.aten.sub.Tensor %512, %514, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc290) | |
%516 = torch.aten.mul.Tensor %515, %515 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc290) | |
%517 = torch.aten.sum.dim_IntList %516, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc290) | |
%518 = torch.aten.div.Scalar %517, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc290) | |
%519 = torch.aten.to.dtype %518, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc290) | |
%520 = torch.aten.sum.dim_IntList %511, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc291) | |
%521 = torch.aten.div.Scalar %520, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc291) | |
%522 = torch.aten.add.Scalar %519, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc292) | |
%523 = torch.aten.rsqrt %522 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc293) | |
%524 = torch.aten.sub.Tensor %511, %521, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc294) | |
%525 = torch.aten.mul.Tensor %524, %523 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc295) | |
%526 = torch.prim.ListConstruct %int1, %int512, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc296) | |
%527 = torch.aten.view %525, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc297) | |
%528 = torch.aten.unsqueeze %83, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc298) | |
%529 = torch.aten.unsqueeze %528, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc299) | |
%530 = torch.aten.mul.Tensor %527, %529 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc300) | |
%531 = torch.aten.unsqueeze %82, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc301) | |
%532 = torch.aten.unsqueeze %531, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc302) | |
%533 = torch.aten.add.Tensor %530, %532, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc303) | |
%534 = torch.aten.sigmoid %533 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc304) | |
%535 = torch.aten.mul.Tensor %534, %533 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc304) | |
%536 = torch.aten.convolution %535, %81, %80, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc305) | |
%537 = torch.aten.view %536, %510 : !torch.vtensor<[1,512,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc306) | |
%538 = torch.aten.to.dtype %537, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc307) | |
%539 = torch.aten.sum.dim_IntList %538, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc307) | |
%540 = torch.aten.div.Scalar %539, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc307) | |
%541 = torch.aten.sub.Tensor %538, %540, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc307) | |
%542 = torch.aten.mul.Tensor %541, %541 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc307) | |
%543 = torch.aten.sum.dim_IntList %542, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc307) | |
%544 = torch.aten.div.Scalar %543, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc307) | |
%545 = torch.aten.to.dtype %544, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc307) | |
%546 = torch.aten.sum.dim_IntList %537, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc308) | |
%547 = torch.aten.div.Scalar %546, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc308) | |
%548 = torch.aten.add.Scalar %545, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc309) | |
%549 = torch.aten.rsqrt %548 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc310) | |
%550 = torch.aten.sub.Tensor %537, %547, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc311) | |
%551 = torch.aten.mul.Tensor %550, %549 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc312) | |
%552 = torch.aten.view %551, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc313) | |
%553 = torch.aten.unsqueeze %79, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc314) | |
%554 = torch.aten.unsqueeze %553, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc315) | |
%555 = torch.aten.mul.Tensor %552, %554 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc316) | |
%556 = torch.aten.unsqueeze %78, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc317) | |
%557 = torch.aten.unsqueeze %556, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc318) | |
%558 = torch.aten.add.Tensor %555, %557, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc319) | |
%559 = torch.aten.sigmoid %558 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc320) | |
%560 = torch.aten.mul.Tensor %559, %558 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc320) | |
%561 = torch.aten.convolution %560, %77, %76, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc321) | |
%562 = torch.aten.add.Tensor %509, %561, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[1,512,128,128],f32>, !torch.int -> !torch.vtensor<[1,512,?,?],f32> loc(#loc322) | |
%563 = torch.aten.div.Scalar %562, %float1.000000e00 : !torch.vtensor<[1,512,?,?],f32>, !torch.float -> !torch.vtensor<[1,512,?,?],f32> loc(#loc323) | |
%564 = torch.aten.view %563, %510 : !torch.vtensor<[1,512,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc324) | |
%565 = torch.aten.to.dtype %564, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc325) | |
%566 = torch.aten.sum.dim_IntList %565, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc325) | |
%567 = torch.aten.div.Scalar %566, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc325) | |
%568 = torch.aten.sub.Tensor %565, %567, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc325) | |
%569 = torch.aten.mul.Tensor %568, %568 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc325) | |
%570 = torch.aten.sum.dim_IntList %569, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc325) | |
%571 = torch.aten.div.Scalar %570, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc325) | |
%572 = torch.aten.to.dtype %571, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc325) | |
%573 = torch.aten.sum.dim_IntList %564, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc326) | |
%574 = torch.aten.div.Scalar %573, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc326) | |
%575 = torch.aten.add.Scalar %572, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc327) | |
%576 = torch.aten.rsqrt %575 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc328) | |
%577 = torch.aten.sub.Tensor %564, %574, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc329) | |
%578 = torch.aten.mul.Tensor %577, %576 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc330) | |
%579 = torch.aten.view %578, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc331) | |
%580 = torch.aten.unsqueeze %75, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc332) | |
%581 = torch.aten.unsqueeze %580, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc333) | |
%582 = torch.aten.mul.Tensor %579, %581 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc334) | |
%583 = torch.aten.unsqueeze %74, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc335) | |
%584 = torch.aten.unsqueeze %583, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc336) | |
%585 = torch.aten.add.Tensor %582, %584, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc337) | |
%586 = torch.aten.sigmoid %585 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc338) | |
%587 = torch.aten.mul.Tensor %586, %585 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc338) | |
%588 = torch.aten.convolution %587, %73, %72, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc339) | |
%589 = torch.aten.view %588, %510 : !torch.vtensor<[1,512,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc340) | |
%590 = torch.aten.to.dtype %589, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc341) | |
%591 = torch.aten.sum.dim_IntList %590, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc341) | |
%592 = torch.aten.div.Scalar %591, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc341) | |
%593 = torch.aten.sub.Tensor %590, %592, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc341) | |
%594 = torch.aten.mul.Tensor %593, %593 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc341) | |
%595 = torch.aten.sum.dim_IntList %594, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc341) | |
%596 = torch.aten.div.Scalar %595, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc341) | |
%597 = torch.aten.to.dtype %596, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc341) | |
%598 = torch.aten.sum.dim_IntList %589, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc342) | |
%599 = torch.aten.div.Scalar %598, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc342) | |
%600 = torch.aten.add.Scalar %597, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc343) | |
%601 = torch.aten.rsqrt %600 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc344) | |
%602 = torch.aten.sub.Tensor %589, %599, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc345) | |
%603 = torch.aten.mul.Tensor %602, %601 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc346) | |
%604 = torch.aten.view %603, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc347) | |
%605 = torch.aten.unsqueeze %71, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc348) | |
%606 = torch.aten.unsqueeze %605, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc349) | |
%607 = torch.aten.mul.Tensor %604, %606 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc350) | |
%608 = torch.aten.unsqueeze %70, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc351) | |
%609 = torch.aten.unsqueeze %608, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc352) | |
%610 = torch.aten.add.Tensor %607, %609, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc353) | |
%611 = torch.aten.sigmoid %610 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc354) | |
%612 = torch.aten.mul.Tensor %611, %610 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc354) | |
%613 = torch.aten.convolution %612, %69, %68, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc355) | |
%614 = torch.aten.add.Tensor %563, %613, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[1,512,128,128],f32>, !torch.int -> !torch.vtensor<[1,512,?,?],f32> loc(#loc356) | |
%615 = torch.aten.div.Scalar %614, %float1.000000e00 : !torch.vtensor<[1,512,?,?],f32>, !torch.float -> !torch.vtensor<[1,512,?,?],f32> loc(#loc357) | |
%616 = torch.aten.view %615, %510 : !torch.vtensor<[1,512,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc358) | |
%617 = torch.aten.to.dtype %616, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc359) | |
%618 = torch.aten.sum.dim_IntList %617, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc359) | |
%619 = torch.aten.div.Scalar %618, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc359) | |
%620 = torch.aten.sub.Tensor %617, %619, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc359) | |
%621 = torch.aten.mul.Tensor %620, %620 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc359) | |
%622 = torch.aten.sum.dim_IntList %621, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc359) | |
%623 = torch.aten.div.Scalar %622, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc359) | |
%624 = torch.aten.to.dtype %623, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc359) | |
%625 = torch.aten.sum.dim_IntList %616, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc360) | |
%626 = torch.aten.div.Scalar %625, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc360) | |
%627 = torch.aten.add.Scalar %624, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc361) | |
%628 = torch.aten.rsqrt %627 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc362) | |
%629 = torch.aten.sub.Tensor %616, %626, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc363) | |
%630 = torch.aten.mul.Tensor %629, %628 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc364) | |
%631 = torch.aten.view %630, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc365) | |
%632 = torch.aten.unsqueeze %67, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc366) | |
%633 = torch.aten.unsqueeze %632, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc367) | |
%634 = torch.aten.mul.Tensor %631, %633 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc368) | |
%635 = torch.aten.unsqueeze %66, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc369) | |
%636 = torch.aten.unsqueeze %635, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc370) | |
%637 = torch.aten.add.Tensor %634, %636, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc371) | |
%638 = torch.aten.sigmoid %637 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc372) | |
%639 = torch.aten.mul.Tensor %638, %637 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc372) | |
%640 = torch.aten.convolution %639, %65, %64, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc373) | |
%641 = torch.aten.view %640, %510 : !torch.vtensor<[1,512,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc374) | |
%642 = torch.aten.to.dtype %641, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc375) | |
%643 = torch.aten.sum.dim_IntList %642, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc375) | |
%644 = torch.aten.div.Scalar %643, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc375) | |
%645 = torch.aten.sub.Tensor %642, %644, %float1.000000e00 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc375) | |
%646 = torch.aten.mul.Tensor %645, %645 : !torch.vtensor<[1,32,16,16384],f64>, !torch.vtensor<[1,32,16,16384],f64> -> !torch.vtensor<[1,32,16,16384],f64> loc(#loc375) | |
%647 = torch.aten.sum.dim_IntList %646, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc375) | |
%648 = torch.aten.div.Scalar %647, %int262144 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc375) | |
%649 = torch.aten.to.dtype %648, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc375) | |
%650 = torch.aten.sum.dim_IntList %641, %146, %true, %none : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc376) | |
%651 = torch.aten.div.Scalar %650, %int262144 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc376) | |
%652 = torch.aten.add.Scalar %649, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc377) | |
%653 = torch.aten.rsqrt %652 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc378) | |
%654 = torch.aten.sub.Tensor %641, %651, %int1 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc379) | |
%655 = torch.aten.mul.Tensor %654, %653 : !torch.vtensor<[1,32,16,16384],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,16384],f32> loc(#loc380) | |
%656 = torch.aten.view %655, %526 : !torch.vtensor<[1,32,16,16384],f32>, !torch.list<int> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc381) | |
%657 = torch.aten.unsqueeze %63, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc382) | |
%658 = torch.aten.unsqueeze %657, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc383) | |
%659 = torch.aten.mul.Tensor %656, %658 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc384) | |
%660 = torch.aten.unsqueeze %62, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc385) | |
%661 = torch.aten.unsqueeze %660, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc386) | |
%662 = torch.aten.add.Tensor %659, %661, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc387) | |
%663 = torch.aten.sigmoid %662 : !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc388) | |
%664 = torch.aten.mul.Tensor %663, %662 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[1,512,128,128],f32> -> !torch.vtensor<[1,512,128,128],f32> loc(#loc388) | |
%665 = torch.aten.convolution %664, %61, %60, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,128,128],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,128,128],f32> loc(#loc389) | |
%666 = torch.aten.add.Tensor %615, %665, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[1,512,128,128],f32>, !torch.int -> !torch.vtensor<[1,512,?,?],f32> loc(#loc390) | |
%667 = torch.aten.div.Scalar %666, %float1.000000e00 : !torch.vtensor<[1,512,?,?],f32>, !torch.float -> !torch.vtensor<[1,512,?,?],f32> loc(#loc391) | |
%668 = torch.aten.upsample_nearest2d.vec %667, %none, %507 : !torch.vtensor<[1,512,?,?],f32>, !torch.none, !torch.list<float> -> !torch.vtensor<[1,512,?,?],f32> loc(#loc392) | |
%669 = torch.aten.convolution %668, %59, %58, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,?,?],f32> loc(#loc393) | |
%670 = torch.prim.ListConstruct %int1, %int32, %int16, %int65536 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc394) | |
%671 = torch.aten.view %669, %670 : !torch.vtensor<[1,512,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,16,65536],f32> loc(#loc395) | |
%672 = torch.aten.to.dtype %671, %int7, %false, %false, %none : !torch.vtensor<[1,32,16,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,16,65536],f64> loc(#loc1) | |
%673 = torch.aten.sum.dim_IntList %672, %146, %true, %none : !torch.vtensor<[1,32,16,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc1) | |
%674 = torch.aten.div.Scalar %673, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc1) | |
%675 = torch.aten.sub.Tensor %672, %674, %float1.000000e00 : !torch.vtensor<[1,32,16,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,16,65536],f64> loc(#loc1) | |
%676 = torch.aten.mul.Tensor %675, %675 : !torch.vtensor<[1,32,16,65536],f64>, !torch.vtensor<[1,32,16,65536],f64> -> !torch.vtensor<[1,32,16,65536],f64> loc(#loc1) | |
%677 = torch.aten.sum.dim_IntList %676, %146, %true, %none : !torch.vtensor<[1,32,16,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc1) | |
%678 = torch.aten.div.Scalar %677, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc1) | |
%679 = torch.aten.to.dtype %678, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc1) | |
%680 = torch.aten.sum.dim_IntList %671, %146, %true, %none : !torch.vtensor<[1,32,16,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc396) | |
%681 = torch.aten.div.Scalar %680, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc396) | |
%682 = torch.aten.add.Scalar %679, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc397) | |
%683 = torch.aten.rsqrt %682 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc398) | |
%684 = torch.aten.sub.Tensor %671, %681, %int1 : !torch.vtensor<[1,32,16,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,16,65536],f32> loc(#loc399) | |
%685 = torch.aten.mul.Tensor %684, %683 : !torch.vtensor<[1,32,16,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,16,65536],f32> loc(#loc400) | |
%686 = torch.prim.ListConstruct %int1, %int512, %int256, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc401) | |
%687 = torch.aten.view %685, %686 : !torch.vtensor<[1,32,16,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,512,256,256],f32> loc(#loc402) | |
%688 = torch.aten.unsqueeze %57, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc403) | |
%689 = torch.aten.unsqueeze %688, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc404) | |
%690 = torch.aten.mul.Tensor %687, %689 : !torch.vtensor<[1,512,256,256],f32>, !torch.vtensor<[512,1,1],f32> -> !torch.vtensor<[1,512,256,256],f32> loc(#loc405) | |
%691 = torch.aten.unsqueeze %56, %int-1 : !torch.vtensor<[512],f32>, !torch.int -> !torch.vtensor<[512,1],f32> loc(#loc406) | |
%692 = torch.aten.unsqueeze %691, %int-1 : !torch.vtensor<[512,1],f32>, !torch.int -> !torch.vtensor<[512,1,1],f32> loc(#loc407) | |
%693 = torch.aten.add.Tensor %690, %692, %int1 : !torch.vtensor<[1,512,256,256],f32>, !torch.vtensor<[512,1,1],f32>, !torch.int -> !torch.vtensor<[1,512,256,256],f32> loc(#loc408) | |
%694 = torch.aten.sigmoid %693 : !torch.vtensor<[1,512,256,256],f32> -> !torch.vtensor<[1,512,256,256],f32> loc(#loc409) | |
%695 = torch.aten.mul.Tensor %694, %693 : !torch.vtensor<[1,512,256,256],f32>, !torch.vtensor<[1,512,256,256],f32> -> !torch.vtensor<[1,512,256,256],f32> loc(#loc409) | |
%696 = torch.aten.convolution %695, %55, %54, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,512,256,256],f32>, !torch.vtensor<[256,512,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc410) | |
%697 = torch.prim.ListConstruct %int1, %int32, %int8, %int65536 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc411) | |
%698 = torch.aten.view %696, %697 : !torch.vtensor<[1,256,256,256],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc412) | |
%699 = torch.aten.to.dtype %698, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc6) | |
%700 = torch.aten.sum.dim_IntList %699, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc6) | |
%701 = torch.aten.div.Scalar %700, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc6) | |
%702 = torch.aten.sub.Tensor %699, %701, %float1.000000e00 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc6) | |
%703 = torch.aten.mul.Tensor %702, %702 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,8,65536],f64> -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc6) | |
%704 = torch.aten.sum.dim_IntList %703, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc6) | |
%705 = torch.aten.div.Scalar %704, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc6) | |
%706 = torch.aten.to.dtype %705, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc6) | |
%707 = torch.aten.sum.dim_IntList %698, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc413) | |
%708 = torch.aten.div.Scalar %707, %int524288 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc413) | |
%709 = torch.aten.add.Scalar %706, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc414) | |
%710 = torch.aten.rsqrt %709 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc415) | |
%711 = torch.aten.sub.Tensor %698, %708, %int1 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc416) | |
%712 = torch.aten.mul.Tensor %711, %710 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc417) | |
%713 = torch.prim.ListConstruct %int1, %int256, %int256, %int256 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc418) | |
%714 = torch.aten.view %712, %713 : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc419) | |
%715 = torch.aten.unsqueeze %53, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc420) | |
%716 = torch.aten.unsqueeze %715, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc421) | |
%717 = torch.aten.mul.Tensor %714, %716 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc422) | |
%718 = torch.aten.unsqueeze %52, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc423) | |
%719 = torch.aten.unsqueeze %718, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc424) | |
%720 = torch.aten.add.Tensor %717, %719, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc425) | |
%721 = torch.aten.sigmoid %720 : !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc426) | |
%722 = torch.aten.mul.Tensor %721, %720 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc426) | |
%723 = torch.aten.convolution %722, %51, %50, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc427) | |
%724 = torch.aten.convolution %669, %49, %48, %140, %141, %140, %false, %141, %int1 : !torch.vtensor<[1,512,?,?],f32>, !torch.vtensor<[256,512,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,?,?],f32> loc(#loc428) | |
%725 = torch.aten.add.Tensor %724, %723, %int1 : !torch.vtensor<[1,256,?,?],f32>, !torch.vtensor<[1,256,256,256],f32>, !torch.int -> !torch.vtensor<[1,256,?,?],f32> loc(#loc429) | |
%726 = torch.aten.div.Scalar %725, %float1.000000e00 : !torch.vtensor<[1,256,?,?],f32>, !torch.float -> !torch.vtensor<[1,256,?,?],f32> loc(#loc430) | |
%727 = torch.aten.view %726, %697 : !torch.vtensor<[1,256,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc431) | |
%728 = torch.aten.to.dtype %727, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc432) | |
%729 = torch.aten.sum.dim_IntList %728, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc432) | |
%730 = torch.aten.div.Scalar %729, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc432) | |
%731 = torch.aten.sub.Tensor %728, %730, %float1.000000e00 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc432) | |
%732 = torch.aten.mul.Tensor %731, %731 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,8,65536],f64> -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc432) | |
%733 = torch.aten.sum.dim_IntList %732, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc432) | |
%734 = torch.aten.div.Scalar %733, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc432) | |
%735 = torch.aten.to.dtype %734, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc432) | |
%736 = torch.aten.sum.dim_IntList %727, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc433) | |
%737 = torch.aten.div.Scalar %736, %int524288 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc433) | |
%738 = torch.aten.add.Scalar %735, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc434) | |
%739 = torch.aten.rsqrt %738 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc435) | |
%740 = torch.aten.sub.Tensor %727, %737, %int1 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc436) | |
%741 = torch.aten.mul.Tensor %740, %739 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc437) | |
%742 = torch.aten.view %741, %713 : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc438) | |
%743 = torch.aten.unsqueeze %47, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc439) | |
%744 = torch.aten.unsqueeze %743, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc440) | |
%745 = torch.aten.mul.Tensor %742, %744 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc441) | |
%746 = torch.aten.unsqueeze %46, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc442) | |
%747 = torch.aten.unsqueeze %746, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc443) | |
%748 = torch.aten.add.Tensor %745, %747, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc444) | |
%749 = torch.aten.sigmoid %748 : !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc445) | |
%750 = torch.aten.mul.Tensor %749, %748 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc445) | |
%751 = torch.aten.convolution %750, %45, %44, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc446) | |
%752 = torch.aten.view %751, %697 : !torch.vtensor<[1,256,256,256],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc447) | |
%753 = torch.aten.to.dtype %752, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc448) | |
%754 = torch.aten.sum.dim_IntList %753, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc448) | |
%755 = torch.aten.div.Scalar %754, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc448) | |
%756 = torch.aten.sub.Tensor %753, %755, %float1.000000e00 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc448) | |
%757 = torch.aten.mul.Tensor %756, %756 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,8,65536],f64> -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc448) | |
%758 = torch.aten.sum.dim_IntList %757, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc448) | |
%759 = torch.aten.div.Scalar %758, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc448) | |
%760 = torch.aten.to.dtype %759, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc448) | |
%761 = torch.aten.sum.dim_IntList %752, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc449) | |
%762 = torch.aten.div.Scalar %761, %int524288 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc449) | |
%763 = torch.aten.add.Scalar %760, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc450) | |
%764 = torch.aten.rsqrt %763 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc451) | |
%765 = torch.aten.sub.Tensor %752, %762, %int1 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc452) | |
%766 = torch.aten.mul.Tensor %765, %764 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc453) | |
%767 = torch.aten.view %766, %713 : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc454) | |
%768 = torch.aten.unsqueeze %43, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc455) | |
%769 = torch.aten.unsqueeze %768, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc456) | |
%770 = torch.aten.mul.Tensor %767, %769 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc457) | |
%771 = torch.aten.unsqueeze %42, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc458) | |
%772 = torch.aten.unsqueeze %771, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc459) | |
%773 = torch.aten.add.Tensor %770, %772, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc460) | |
%774 = torch.aten.sigmoid %773 : !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc461) | |
%775 = torch.aten.mul.Tensor %774, %773 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc461) | |
%776 = torch.aten.convolution %775, %41, %40, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc462) | |
%777 = torch.aten.add.Tensor %726, %776, %int1 : !torch.vtensor<[1,256,?,?],f32>, !torch.vtensor<[1,256,256,256],f32>, !torch.int -> !torch.vtensor<[1,256,?,?],f32> loc(#loc463) | |
%778 = torch.aten.div.Scalar %777, %float1.000000e00 : !torch.vtensor<[1,256,?,?],f32>, !torch.float -> !torch.vtensor<[1,256,?,?],f32> loc(#loc464) | |
%779 = torch.aten.view %778, %697 : !torch.vtensor<[1,256,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc465) | |
%780 = torch.aten.to.dtype %779, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc466) | |
%781 = torch.aten.sum.dim_IntList %780, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc466) | |
%782 = torch.aten.div.Scalar %781, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc466) | |
%783 = torch.aten.sub.Tensor %780, %782, %float1.000000e00 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc466) | |
%784 = torch.aten.mul.Tensor %783, %783 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,8,65536],f64> -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc466) | |
%785 = torch.aten.sum.dim_IntList %784, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc466) | |
%786 = torch.aten.div.Scalar %785, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc466) | |
%787 = torch.aten.to.dtype %786, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc466) | |
%788 = torch.aten.sum.dim_IntList %779, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc467) | |
%789 = torch.aten.div.Scalar %788, %int524288 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc467) | |
%790 = torch.aten.add.Scalar %787, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc468) | |
%791 = torch.aten.rsqrt %790 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc469) | |
%792 = torch.aten.sub.Tensor %779, %789, %int1 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc470) | |
%793 = torch.aten.mul.Tensor %792, %791 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc471) | |
%794 = torch.aten.view %793, %713 : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc472) | |
%795 = torch.aten.unsqueeze %39, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc473) | |
%796 = torch.aten.unsqueeze %795, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc474) | |
%797 = torch.aten.mul.Tensor %794, %796 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc475) | |
%798 = torch.aten.unsqueeze %38, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc476) | |
%799 = torch.aten.unsqueeze %798, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc477) | |
%800 = torch.aten.add.Tensor %797, %799, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc478) | |
%801 = torch.aten.sigmoid %800 : !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc479) | |
%802 = torch.aten.mul.Tensor %801, %800 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc479) | |
%803 = torch.aten.convolution %802, %37, %36, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc480) | |
%804 = torch.aten.view %803, %697 : !torch.vtensor<[1,256,256,256],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc481) | |
%805 = torch.aten.to.dtype %804, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc482) | |
%806 = torch.aten.sum.dim_IntList %805, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc482) | |
%807 = torch.aten.div.Scalar %806, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc482) | |
%808 = torch.aten.sub.Tensor %805, %807, %float1.000000e00 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc482) | |
%809 = torch.aten.mul.Tensor %808, %808 : !torch.vtensor<[1,32,8,65536],f64>, !torch.vtensor<[1,32,8,65536],f64> -> !torch.vtensor<[1,32,8,65536],f64> loc(#loc482) | |
%810 = torch.aten.sum.dim_IntList %809, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc482) | |
%811 = torch.aten.div.Scalar %810, %int524288 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc482) | |
%812 = torch.aten.to.dtype %811, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc482) | |
%813 = torch.aten.sum.dim_IntList %804, %146, %true, %none : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc483) | |
%814 = torch.aten.div.Scalar %813, %int524288 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc483) | |
%815 = torch.aten.add.Scalar %812, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc484) | |
%816 = torch.aten.rsqrt %815 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc485) | |
%817 = torch.aten.sub.Tensor %804, %814, %int1 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc486) | |
%818 = torch.aten.mul.Tensor %817, %816 : !torch.vtensor<[1,32,8,65536],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,65536],f32> loc(#loc487) | |
%819 = torch.aten.view %818, %713 : !torch.vtensor<[1,32,8,65536],f32>, !torch.list<int> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc488) | |
%820 = torch.aten.unsqueeze %35, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc489) | |
%821 = torch.aten.unsqueeze %820, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc490) | |
%822 = torch.aten.mul.Tensor %819, %821 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc491) | |
%823 = torch.aten.unsqueeze %34, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc492) | |
%824 = torch.aten.unsqueeze %823, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc493) | |
%825 = torch.aten.add.Tensor %822, %824, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc494) | |
%826 = torch.aten.sigmoid %825 : !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc495) | |
%827 = torch.aten.mul.Tensor %826, %825 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[1,256,256,256],f32> -> !torch.vtensor<[1,256,256,256],f32> loc(#loc495) | |
%828 = torch.aten.convolution %827, %33, %32, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,256,256],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,256,256],f32> loc(#loc496) | |
%829 = torch.aten.add.Tensor %778, %828, %int1 : !torch.vtensor<[1,256,?,?],f32>, !torch.vtensor<[1,256,256,256],f32>, !torch.int -> !torch.vtensor<[1,256,?,?],f32> loc(#loc497) | |
%830 = torch.aten.div.Scalar %829, %float1.000000e00 : !torch.vtensor<[1,256,?,?],f32>, !torch.float -> !torch.vtensor<[1,256,?,?],f32> loc(#loc498) | |
%831 = torch.aten.upsample_nearest2d.vec %830, %none, %507 : !torch.vtensor<[1,256,?,?],f32>, !torch.none, !torch.list<float> -> !torch.vtensor<[1,256,?,?],f32> loc(#loc499) | |
%832 = torch.aten.convolution %831, %31, %30, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,?,?],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,?,?],f32> loc(#loc500) | |
%833 = torch.prim.ListConstruct %int1, %int32, %int8, %int262144 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc501) | |
%834 = torch.aten.view %832, %833 : !torch.vtensor<[1,256,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,8,262144],f32> loc(#loc502) | |
%835 = torch.aten.to.dtype %834, %int7, %false, %false, %none : !torch.vtensor<[1,32,8,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,8,262144],f64> loc(#loc4) | |
%836 = torch.aten.sum.dim_IntList %835, %146, %true, %none : !torch.vtensor<[1,32,8,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc4) | |
%837 = torch.aten.div.Scalar %836, %int2097152 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc4) | |
%838 = torch.aten.sub.Tensor %835, %837, %float1.000000e00 : !torch.vtensor<[1,32,8,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,8,262144],f64> loc(#loc4) | |
%839 = torch.aten.mul.Tensor %838, %838 : !torch.vtensor<[1,32,8,262144],f64>, !torch.vtensor<[1,32,8,262144],f64> -> !torch.vtensor<[1,32,8,262144],f64> loc(#loc4) | |
%840 = torch.aten.sum.dim_IntList %839, %146, %true, %none : !torch.vtensor<[1,32,8,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc4) | |
%841 = torch.aten.div.Scalar %840, %int2097152 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc4) | |
%842 = torch.aten.to.dtype %841, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc4) | |
%843 = torch.aten.sum.dim_IntList %834, %146, %true, %none : !torch.vtensor<[1,32,8,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc503) | |
%844 = torch.aten.div.Scalar %843, %int2097152 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc503) | |
%845 = torch.aten.add.Scalar %842, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc504) | |
%846 = torch.aten.rsqrt %845 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc505) | |
%847 = torch.aten.sub.Tensor %834, %844, %int1 : !torch.vtensor<[1,32,8,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,8,262144],f32> loc(#loc506) | |
%848 = torch.aten.mul.Tensor %847, %846 : !torch.vtensor<[1,32,8,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,8,262144],f32> loc(#loc507) | |
%849 = torch.prim.ListConstruct %int1, %int256, %int512, %int512 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc508) | |
%850 = torch.aten.view %848, %849 : !torch.vtensor<[1,32,8,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,256,512,512],f32> loc(#loc509) | |
%851 = torch.aten.unsqueeze %29, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc510) | |
%852 = torch.aten.unsqueeze %851, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc511) | |
%853 = torch.aten.mul.Tensor %850, %852 : !torch.vtensor<[1,256,512,512],f32>, !torch.vtensor<[256,1,1],f32> -> !torch.vtensor<[1,256,512,512],f32> loc(#loc512) | |
%854 = torch.aten.unsqueeze %28, %int-1 : !torch.vtensor<[256],f32>, !torch.int -> !torch.vtensor<[256,1],f32> loc(#loc513) | |
%855 = torch.aten.unsqueeze %854, %int-1 : !torch.vtensor<[256,1],f32>, !torch.int -> !torch.vtensor<[256,1,1],f32> loc(#loc514) | |
%856 = torch.aten.add.Tensor %853, %855, %int1 : !torch.vtensor<[1,256,512,512],f32>, !torch.vtensor<[256,1,1],f32>, !torch.int -> !torch.vtensor<[1,256,512,512],f32> loc(#loc515) | |
%857 = torch.aten.sigmoid %856 : !torch.vtensor<[1,256,512,512],f32> -> !torch.vtensor<[1,256,512,512],f32> loc(#loc516) | |
%858 = torch.aten.mul.Tensor %857, %856 : !torch.vtensor<[1,256,512,512],f32>, !torch.vtensor<[1,256,512,512],f32> -> !torch.vtensor<[1,256,512,512],f32> loc(#loc516) | |
%859 = torch.aten.convolution %858, %27, %26, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,256,512,512],f32>, !torch.vtensor<[128,256,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc517) | |
%860 = torch.prim.ListConstruct %int1, %int32, %int4, %int262144 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc518) | |
%861 = torch.aten.view %859, %860 : !torch.vtensor<[1,128,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc519) | |
%862 = torch.aten.to.dtype %861, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc520) | |
%863 = torch.aten.sum.dim_IntList %862, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc520) | |
%864 = torch.aten.div.Scalar %863, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc520) | |
%865 = torch.aten.sub.Tensor %862, %864, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc520) | |
%866 = torch.aten.mul.Tensor %865, %865 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc520) | |
%867 = torch.aten.sum.dim_IntList %866, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc520) | |
%868 = torch.aten.div.Scalar %867, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc520) | |
%869 = torch.aten.to.dtype %868, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc520) | |
%870 = torch.aten.sum.dim_IntList %861, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc521) | |
%871 = torch.aten.div.Scalar %870, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc521) | |
%872 = torch.aten.add.Scalar %869, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc522) | |
%873 = torch.aten.rsqrt %872 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc523) | |
%874 = torch.aten.sub.Tensor %861, %871, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc524) | |
%875 = torch.aten.mul.Tensor %874, %873 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc525) | |
%876 = torch.prim.ListConstruct %int1, %int128, %int512, %int512 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> loc(#loc526) | |
%877 = torch.aten.view %875, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc527) | |
%878 = torch.aten.unsqueeze %25, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc528) | |
%879 = torch.aten.unsqueeze %878, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc529) | |
%880 = torch.aten.mul.Tensor %877, %879 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc530) | |
%881 = torch.aten.unsqueeze %24, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc531) | |
%882 = torch.aten.unsqueeze %881, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc532) | |
%883 = torch.aten.add.Tensor %880, %882, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc533) | |
%884 = torch.aten.sigmoid %883 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc534) | |
%885 = torch.aten.mul.Tensor %884, %883 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc534) | |
%886 = torch.aten.convolution %885, %23, %22, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc535) | |
%887 = torch.aten.convolution %832, %21, %20, %140, %141, %140, %false, %141, %int1 : !torch.vtensor<[1,256,?,?],f32>, !torch.vtensor<[128,256,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,?,?],f32> loc(#loc536) | |
%888 = torch.aten.add.Tensor %887, %886, %int1 : !torch.vtensor<[1,128,?,?],f32>, !torch.vtensor<[1,128,512,512],f32>, !torch.int -> !torch.vtensor<[1,128,?,?],f32> loc(#loc537) | |
%889 = torch.aten.div.Scalar %888, %float1.000000e00 : !torch.vtensor<[1,128,?,?],f32>, !torch.float -> !torch.vtensor<[1,128,?,?],f32> loc(#loc538) | |
%890 = torch.aten.view %889, %860 : !torch.vtensor<[1,128,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc539) | |
%891 = torch.aten.to.dtype %890, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc540) | |
%892 = torch.aten.sum.dim_IntList %891, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc540) | |
%893 = torch.aten.div.Scalar %892, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc540) | |
%894 = torch.aten.sub.Tensor %891, %893, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc540) | |
%895 = torch.aten.mul.Tensor %894, %894 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc540) | |
%896 = torch.aten.sum.dim_IntList %895, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc540) | |
%897 = torch.aten.div.Scalar %896, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc540) | |
%898 = torch.aten.to.dtype %897, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc540) | |
%899 = torch.aten.sum.dim_IntList %890, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc541) | |
%900 = torch.aten.div.Scalar %899, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc541) | |
%901 = torch.aten.add.Scalar %898, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc542) | |
%902 = torch.aten.rsqrt %901 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc543) | |
%903 = torch.aten.sub.Tensor %890, %900, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc544) | |
%904 = torch.aten.mul.Tensor %903, %902 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc545) | |
%905 = torch.aten.view %904, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc546) | |
%906 = torch.aten.unsqueeze %19, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc547) | |
%907 = torch.aten.unsqueeze %906, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc548) | |
%908 = torch.aten.mul.Tensor %905, %907 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc549) | |
%909 = torch.aten.unsqueeze %18, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc550) | |
%910 = torch.aten.unsqueeze %909, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc551) | |
%911 = torch.aten.add.Tensor %908, %910, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc552) | |
%912 = torch.aten.sigmoid %911 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc553) | |
%913 = torch.aten.mul.Tensor %912, %911 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc553) | |
%914 = torch.aten.convolution %913, %17, %16, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc554) | |
%915 = torch.aten.view %914, %860 : !torch.vtensor<[1,128,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc555) | |
%916 = torch.aten.to.dtype %915, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc556) | |
%917 = torch.aten.sum.dim_IntList %916, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc556) | |
%918 = torch.aten.div.Scalar %917, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc556) | |
%919 = torch.aten.sub.Tensor %916, %918, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc556) | |
%920 = torch.aten.mul.Tensor %919, %919 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc556) | |
%921 = torch.aten.sum.dim_IntList %920, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc556) | |
%922 = torch.aten.div.Scalar %921, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc556) | |
%923 = torch.aten.to.dtype %922, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc556) | |
%924 = torch.aten.sum.dim_IntList %915, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc557) | |
%925 = torch.aten.div.Scalar %924, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc557) | |
%926 = torch.aten.add.Scalar %923, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc558) | |
%927 = torch.aten.rsqrt %926 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc559) | |
%928 = torch.aten.sub.Tensor %915, %925, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc560) | |
%929 = torch.aten.mul.Tensor %928, %927 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc561) | |
%930 = torch.aten.view %929, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc562) | |
%931 = torch.aten.unsqueeze %15, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc563) | |
%932 = torch.aten.unsqueeze %931, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc564) | |
%933 = torch.aten.mul.Tensor %930, %932 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc565) | |
%934 = torch.aten.unsqueeze %14, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc566) | |
%935 = torch.aten.unsqueeze %934, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc567) | |
%936 = torch.aten.add.Tensor %933, %935, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc568) | |
%937 = torch.aten.sigmoid %936 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc569) | |
%938 = torch.aten.mul.Tensor %937, %936 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc569) | |
%939 = torch.aten.convolution %938, %13, %12, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc570) | |
%940 = torch.aten.add.Tensor %889, %939, %int1 : !torch.vtensor<[1,128,?,?],f32>, !torch.vtensor<[1,128,512,512],f32>, !torch.int -> !torch.vtensor<[1,128,?,?],f32> loc(#loc571) | |
%941 = torch.aten.div.Scalar %940, %float1.000000e00 : !torch.vtensor<[1,128,?,?],f32>, !torch.float -> !torch.vtensor<[1,128,?,?],f32> loc(#loc572) | |
%942 = torch.aten.view %941, %860 : !torch.vtensor<[1,128,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc573) | |
%943 = torch.aten.to.dtype %942, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc574) | |
%944 = torch.aten.sum.dim_IntList %943, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc574) | |
%945 = torch.aten.div.Scalar %944, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc574) | |
%946 = torch.aten.sub.Tensor %943, %945, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc574) | |
%947 = torch.aten.mul.Tensor %946, %946 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc574) | |
%948 = torch.aten.sum.dim_IntList %947, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc574) | |
%949 = torch.aten.div.Scalar %948, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc574) | |
%950 = torch.aten.to.dtype %949, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc574) | |
%951 = torch.aten.sum.dim_IntList %942, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc575) | |
%952 = torch.aten.div.Scalar %951, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc575) | |
%953 = torch.aten.add.Scalar %950, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc576) | |
%954 = torch.aten.rsqrt %953 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc577) | |
%955 = torch.aten.sub.Tensor %942, %952, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc578) | |
%956 = torch.aten.mul.Tensor %955, %954 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc579) | |
%957 = torch.aten.view %956, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc580) | |
%958 = torch.aten.unsqueeze %11, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc581) | |
%959 = torch.aten.unsqueeze %958, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc582) | |
%960 = torch.aten.mul.Tensor %957, %959 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc583) | |
%961 = torch.aten.unsqueeze %10, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc584) | |
%962 = torch.aten.unsqueeze %961, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc585) | |
%963 = torch.aten.add.Tensor %960, %962, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc586) | |
%964 = torch.aten.sigmoid %963 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc587) | |
%965 = torch.aten.mul.Tensor %964, %963 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc587) | |
%966 = torch.aten.convolution %965, %9, %8, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc588) | |
%967 = torch.aten.view %966, %860 : !torch.vtensor<[1,128,512,512],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc589) | |
%968 = torch.aten.to.dtype %967, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc590) | |
%969 = torch.aten.sum.dim_IntList %968, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc590) | |
%970 = torch.aten.div.Scalar %969, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc590) | |
%971 = torch.aten.sub.Tensor %968, %970, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc590) | |
%972 = torch.aten.mul.Tensor %971, %971 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc590) | |
%973 = torch.aten.sum.dim_IntList %972, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc590) | |
%974 = torch.aten.div.Scalar %973, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc590) | |
%975 = torch.aten.to.dtype %974, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc590) | |
%976 = torch.aten.sum.dim_IntList %967, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc591) | |
%977 = torch.aten.div.Scalar %976, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc591) | |
%978 = torch.aten.add.Scalar %975, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc592) | |
%979 = torch.aten.rsqrt %978 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc593) | |
%980 = torch.aten.sub.Tensor %967, %977, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc594) | |
%981 = torch.aten.mul.Tensor %980, %979 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc595) | |
%982 = torch.aten.view %981, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc596) | |
%983 = torch.aten.unsqueeze %7, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc597) | |
%984 = torch.aten.unsqueeze %983, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc598) | |
%985 = torch.aten.mul.Tensor %982, %984 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc599) | |
%986 = torch.aten.unsqueeze %6, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc600) | |
%987 = torch.aten.unsqueeze %986, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc601) | |
%988 = torch.aten.add.Tensor %985, %987, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc602) | |
%989 = torch.aten.sigmoid %988 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc603) | |
%990 = torch.aten.mul.Tensor %989, %988 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc603) | |
%991 = torch.aten.convolution %990, %5, %4, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc604) | |
%992 = torch.aten.add.Tensor %941, %991, %int1 : !torch.vtensor<[1,128,?,?],f32>, !torch.vtensor<[1,128,512,512],f32>, !torch.int -> !torch.vtensor<[1,128,?,?],f32> loc(#loc605) | |
%993 = torch.aten.div.Scalar %992, %float1.000000e00 : !torch.vtensor<[1,128,?,?],f32>, !torch.float -> !torch.vtensor<[1,128,?,?],f32> loc(#loc606) | |
%994 = torch.aten.view %993, %860 : !torch.vtensor<[1,128,?,?],f32>, !torch.list<int> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc607) | |
%995 = torch.aten.to.dtype %994, %int7, %false, %false, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc608) | |
%996 = torch.aten.sum.dim_IntList %995, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc608) | |
%997 = torch.aten.div.Scalar %996, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc608) | |
%998 = torch.aten.sub.Tensor %995, %997, %float1.000000e00 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,1,1],f64>, !torch.float -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc608) | |
%999 = torch.aten.mul.Tensor %998, %998 : !torch.vtensor<[1,32,4,262144],f64>, !torch.vtensor<[1,32,4,262144],f64> -> !torch.vtensor<[1,32,4,262144],f64> loc(#loc608) | |
%1000 = torch.aten.sum.dim_IntList %999, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f64>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f64> loc(#loc608) | |
%1001 = torch.aten.div.Scalar %1000, %int1048576 : !torch.vtensor<[1,32,1,1],f64>, !torch.int -> !torch.vtensor<[1,32,1,1],f64> loc(#loc608) | |
%1002 = torch.aten.to.dtype %1001, %int6, %false, %false, %none : !torch.vtensor<[1,32,1,1],f64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc608) | |
%1003 = torch.aten.sum.dim_IntList %994, %146, %true, %none : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,32,1,1],f32> loc(#loc609) | |
%1004 = torch.aten.div.Scalar %1003, %int1048576 : !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc609) | |
%1005 = torch.aten.add.Scalar %1002, %float9.999990e-07, %int1 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],f32> loc(#loc610) | |
%1006 = torch.aten.rsqrt %1005 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32> loc(#loc611) | |
%1007 = torch.aten.sub.Tensor %994, %1004, %int1 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32>, !torch.int -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc612) | |
%1008 = torch.aten.mul.Tensor %1007, %1006 : !torch.vtensor<[1,32,4,262144],f32>, !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,4,262144],f32> loc(#loc613) | |
%1009 = torch.aten.view %1008, %876 : !torch.vtensor<[1,32,4,262144],f32>, !torch.list<int> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc614) | |
%1010 = torch.aten.unsqueeze %3, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc615) | |
%1011 = torch.aten.unsqueeze %1010, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc616) | |
%1012 = torch.aten.mul.Tensor %1009, %1011 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc617) | |
%1013 = torch.aten.unsqueeze %2, %int-1 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128,1],f32> loc(#loc618) | |
%1014 = torch.aten.unsqueeze %1013, %int-1 : !torch.vtensor<[128,1],f32>, !torch.int -> !torch.vtensor<[128,1,1],f32> loc(#loc619) | |
%1015 = torch.aten.add.Tensor %1012, %1014, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[128,1,1],f32>, !torch.int -> !torch.vtensor<[1,128,512,512],f32> loc(#loc620) | |
%1016 = torch.aten.sigmoid %1015 : !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc621) | |
%1017 = torch.aten.mul.Tensor %1016, %1015 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[1,128,512,512],f32> -> !torch.vtensor<[1,128,512,512],f32> loc(#loc621) | |
%1018 = torch.aten.convolution %1017, %1, %0, %140, %140, %140, %false, %141, %int1 : !torch.vtensor<[1,128,512,512],f32>, !torch.vtensor<[3,128,3,3],f32>, !torch.vtensor<[3],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,3,512,512],f32> loc(#loc622) | |
return %1018 : !torch.vtensor<[1,3,512,512],f32> loc(#loc0) | |
} loc(#loc0) | |
} loc(#loc0) | |
#loc1 = loc("<eval_with_key>.4":545:13) | |
#loc2 = loc("<eval_with_key>.4":713:61) | |
#loc3 = loc("<eval_with_key>.4":739:58) | |
#loc4 = loc("<eval_with_key>.4":714:13) | |
#loc5 = loc("<eval_with_key>.4":570:58) | |
#loc6 = loc("<eval_with_key>.4":571:13) | |
#loc7 = loc("<eval_with_key>.4":544:62) | |
#loc8 = loc("<eval_with_key>.4":11:54) | |
#loc9 = loc("<eval_with_key>.4":378:62) | |
#loc10 = loc("<eval_with_key>.4":11:58) | |
#loc11 = loc("<eval_with_key>.4":12:10) | |
#loc12 = loc("<eval_with_key>.4":555:51) | |
#loc13 = loc("<eval_with_key>.4":389:51) | |
#loc14 = loc("<eval_with_key>.4":374:73) | |
#loc15 = loc("<eval_with_key>.4":263:39) | |
#loc16 = loc("<eval_with_key>.4":125:40) | |
#loc17 = loc("<eval_with_key>.4":22:47) | |
#loc18 = loc("<eval_with_key>.4":22:42) | |
#loc19 = loc("<eval_with_key>.4":14:34) | |
#loc20 = loc("<eval_with_key>.4":12:39) | |
#loc21 = loc("<eval_with_key>.4":12:36) | |
#loc22 = loc("<eval_with_key>.4":11:50) | |
#loc23 = loc("<eval_with_key>.4":7:98) | |
#loc24 = loc("<eval_with_key>.4":7:90) | |
#loc25 = loc("<eval_with_key>.4":7:113) | |
#loc26 = loc("<eval_with_key>.4":12:69) | |
#loc27 = loc("<eval_with_key>.4":24:59) | |
#loc28 = loc("<eval_with_key>.4":126:58) | |
#loc29 = loc("<eval_with_key>.4":7:18) | |
#loc30 = loc("<eval_with_key>.4":10:20) | |
#loc31 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":11:11)) | |
#loc32 = loc("<eval_with_key>.4":11:11) | |
#loc33 = loc("<eval_with_key>.4":13:11) | |
#loc34 = loc("<eval_with_key>.4":14:10) | |
#loc35 = loc("<eval_with_key>.4":15:12) | |
#loc36 = loc("<eval_with_key>.4":16:10) | |
#loc37 = loc("<eval_with_key>.4":17:10) | |
#loc38 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":22:13)) | |
#loc39 = loc("<eval_with_key>.4":22:13) | |
#loc40 = loc("<eval_with_key>.4":24:16) | |
#loc41 = loc("<eval_with_key>.4":25:18) | |
#loc42 = loc("<eval_with_key>.4":26:12) | |
#loc43 = loc("<eval_with_key>.4":28:18) | |
#loc44 = loc("<eval_with_key>.4":29:18) | |
#loc45 = loc("<eval_with_key>.4":30:12) | |
#loc46 = loc("<eval_with_key>.4":33:11) | |
#loc47 = loc("<eval_with_key>.4":36:20) | |
#loc48 = loc("<eval_with_key>.4":37:13) | |
#loc49 = loc("<eval_with_key>.4":38:12) | |
#loc50 = loc("<eval_with_key>.4":39:13) | |
#loc51 = loc("<eval_with_key>.4":40:12) | |
#loc52 = loc("<eval_with_key>.4":41:14) | |
#loc53 = loc("<eval_with_key>.4":42:12) | |
#loc54 = loc("<eval_with_key>.4":43:12) | |
#loc55 = loc("<eval_with_key>.4":48:13) | |
#loc56 = loc("<eval_with_key>.4":50:18) | |
#loc57 = loc("<eval_with_key>.4":51:18) | |
#loc58 = loc("<eval_with_key>.4":52:12) | |
#loc59 = loc("<eval_with_key>.4":54:18) | |
#loc60 = loc("<eval_with_key>.4":55:18) | |
#loc61 = loc("<eval_with_key>.4":56:12) | |
#loc62 = loc("<eval_with_key>.4":59:13) | |
#loc63 = loc("<eval_with_key>.4":62:20) | |
#loc64 = loc("<eval_with_key>.4":63:12) | |
#loc65 = loc("<eval_with_key>.4":64:10) | |
#loc66 = loc("<eval_with_key>.4":65:13) | |
#loc67 = loc("<eval_with_key>.4":66:12) | |
#loc68 = loc("<eval_with_key>.4":67:13) | |
#loc69 = loc("<eval_with_key>.4":68:12) | |
#loc70 = loc("<eval_with_key>.4":69:14) | |
#loc71 = loc("<eval_with_key>.4":70:12) | |
#loc72 = loc("<eval_with_key>.4":71:12) | |
#loc73 = loc("<eval_with_key>.4":76:13) | |
#loc74 = loc("<eval_with_key>.4":78:18) | |
#loc75 = loc("<eval_with_key>.4":79:18) | |
#loc76 = loc("<eval_with_key>.4":80:12) | |
#loc77 = loc("<eval_with_key>.4":82:19) | |
#loc78 = loc("<eval_with_key>.4":83:19) | |
#loc79 = loc("<eval_with_key>.4":84:12) | |
#loc80 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":87:13)) | |
#loc81 = loc("<eval_with_key>.4":87:13) | |
#loc82 = loc("<eval_with_key>.4":88:16) | |
#loc83 = loc("<eval_with_key>.4":90:8) | |
#loc84 = loc(callsite(callsite("-":4395:15 at "-":5987:10) at "<eval_with_key>.4":91:13)) | |
#loc85 = loc("<eval_with_key>.4":91:13) | |
#loc86 = loc("<eval_with_key>.4":92:21) | |
#loc87 = loc(callsite(callsite("-":4395:15 at "-":5987:10) at "<eval_with_key>.4":93:15)) | |
#loc88 = loc("<eval_with_key>.4":93:15) | |
#loc89 = loc("<eval_with_key>.4":94:23) | |
#loc90 = loc("<eval_with_key>.4":95:10) | |
#loc91 = loc("<eval_with_key>.4":96:19) | |
#loc92 = loc("<eval_with_key>.4":98:12) | |
#loc93 = loc("<eval_with_key>.4":100:10) | |
#loc94 = loc("<eval_with_key>.4":101:15) | |
#loc95 = loc("<eval_with_key>.4":102:23) | |
#loc96 = loc("<eval_with_key>.4":103:15) | |
#loc97 = loc("<eval_with_key>.4":104:23) | |
#loc98 = loc("<eval_with_key>.4":105:12) | |
#loc99 = loc("<eval_with_key>.4":106:21) | |
#loc100 = loc("<eval_with_key>.4":108:12) | |
#loc101 = loc("<eval_with_key>.4":110:10) | |
#loc102 = loc("<eval_with_key>.4":111:15) | |
#loc103 = loc("<eval_with_key>.4":112:23) | |
#loc104 = loc("<eval_with_key>.4":113:15) | |
#loc105 = loc("<eval_with_key>.4":114:23) | |
#loc106 = loc("<eval_with_key>.4":115:12) | |
#loc107 = loc("<eval_with_key>.4":116:21) | |
#loc108 = loc("<eval_with_key>.4":118:12) | |
#loc109 = loc("<eval_with_key>.4":119:13) | |
#loc110 = loc("<eval_with_key>.4":120:14) | |
#loc111 = loc("<eval_with_key>.4":121:13) | |
#loc112 = loc("<eval_with_key>.4":122:16) | |
#loc113 = loc("<eval_with_key>.4":123:13) | |
#loc114 = loc("<eval_with_key>.4":124:16) | |
#loc115 = loc("<eval_with_key>.4":125:12) | |
#loc116 = loc("<eval_with_key>.4":126:18) | |
#loc117 = loc("<eval_with_key>.4":127:12) | |
#loc118 = loc(callsite(callsite("-":4395:15 at "-":5987:10) at "<eval_with_key>.4":128:15)) | |
#loc119 = loc("<eval_with_key>.4":128:15) | |
#loc120 = loc("<eval_with_key>.4":129:23) | |
#loc121 = loc(callsite(callsite("-":4395:15 at "-":5987:10) at "<eval_with_key>.4":130:15)) | |
#loc122 = loc("<eval_with_key>.4":130:15) | |
#loc123 = loc("<eval_with_key>.4":131:23) | |
#loc124 = loc("<eval_with_key>.4":132:12) | |
#loc125 = loc(callsite(callsite("-":10:12 at "-":5543:10) at "<eval_with_key>.4":134:15)) | |
#loc126 = loc("<eval_with_key>.4":133:21) | |
#loc127 = loc("<eval_with_key>.4":134:15) | |
#loc128 = loc("<eval_with_key>.4":136:15) | |
#loc129 = loc(callsite(callsite("-":4227:13 at "-":6007:10) at "<eval_with_key>.4":137:23)) | |
#loc130 = loc("<eval_with_key>.4":137:23) | |
#loc131 = loc("<eval_with_key>.4":138:15) | |
#loc132 = loc("<eval_with_key>.4":139:23) | |
#loc133 = loc("<eval_with_key>.4":140:12) | |
#loc134 = loc("<eval_with_key>.4":141:21) | |
#loc135 = loc("<eval_with_key>.4":142:16) | |
#loc136 = loc("<eval_with_key>.4":143:14) | |
#loc137 = loc("<eval_with_key>.4":145:10) | |
#loc138 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":146:14)) | |
#loc139 = loc("<eval_with_key>.4":146:14) | |
#loc140 = loc("<eval_with_key>.4":148:12) | |
#loc141 = loc("<eval_with_key>.4":149:14) | |
#loc142 = loc("<eval_with_key>.4":150:18) | |
#loc143 = loc("<eval_with_key>.4":151:24) | |
#loc144 = loc("<eval_with_key>.4":152:13) | |
#loc145 = loc("<eval_with_key>.4":153:12) | |
#loc146 = loc("<eval_with_key>.4":154:12) | |
#loc147 = loc("<eval_with_key>.4":155:14) | |
#loc148 = loc("<eval_with_key>.4":156:12) | |
#loc149 = loc("<eval_with_key>.4":157:13) | |
#loc150 = loc("<eval_with_key>.4":158:13) | |
#loc151 = loc("<eval_with_key>.4":159:14) | |
#loc152 = loc("<eval_with_key>.4":160:12) | |
#loc153 = loc("<eval_with_key>.4":161:12) | |
#loc154 = loc("<eval_with_key>.4":166:14) | |
#loc155 = loc("<eval_with_key>.4":168:19) | |
#loc156 = loc("<eval_with_key>.4":169:19) | |
#loc157 = loc("<eval_with_key>.4":170:12) | |
#loc158 = loc("<eval_with_key>.4":172:19) | |
#loc159 = loc("<eval_with_key>.4":173:19) | |
#loc160 = loc("<eval_with_key>.4":174:13) | |
#loc161 = loc("<eval_with_key>.4":177:13) | |
#loc162 = loc("<eval_with_key>.4":180:20) | |
#loc163 = loc("<eval_with_key>.4":181:14) | |
#loc164 = loc("<eval_with_key>.4":182:12) | |
#loc165 = loc("<eval_with_key>.4":183:13) | |
#loc166 = loc("<eval_with_key>.4":184:13) | |
#loc167 = loc("<eval_with_key>.4":185:14) | |
#loc168 = loc("<eval_with_key>.4":186:12) | |
#loc169 = loc("<eval_with_key>.4":187:13) | |
#loc170 = loc("<eval_with_key>.4":192:14) | |
#loc171 = loc("<eval_with_key>.4":194:19) | |
#loc172 = loc("<eval_with_key>.4":195:19) | |
#loc173 = loc("<eval_with_key>.4":196:13) | |
#loc174 = loc("<eval_with_key>.4":198:19) | |
#loc175 = loc("<eval_with_key>.4":199:19) | |
#loc176 = loc("<eval_with_key>.4":200:13) | |
#loc177 = loc("<eval_with_key>.4":203:13) | |
#loc178 = loc("<eval_with_key>.4":206:20) | |
#loc179 = loc("<eval_with_key>.4":207:13) | |
#loc180 = loc("<eval_with_key>.4":208:12) | |
#loc181 = loc("<eval_with_key>.4":209:14) | |
#loc182 = loc("<eval_with_key>.4":210:14) | |
#loc183 = loc("<eval_with_key>.4":211:12) | |
#loc184 = loc("<eval_with_key>.4":212:13) | |
#loc185 = loc("<eval_with_key>.4":213:13) | |
#loc186 = loc("<eval_with_key>.4":214:14) | |
#loc187 = loc("<eval_with_key>.4":215:12) | |
#loc188 = loc("<eval_with_key>.4":216:13) | |
#loc189 = loc("<eval_with_key>.4":221:14) | |
#loc190 = loc("<eval_with_key>.4":223:19) | |
#loc191 = loc("<eval_with_key>.4":224:19) | |
#loc192 = loc("<eval_with_key>.4":225:13) | |
#loc193 = loc("<eval_with_key>.4":227:19) | |
#loc194 = loc("<eval_with_key>.4":228:19) | |
#loc195 = loc("<eval_with_key>.4":229:13) | |
#loc196 = loc("<eval_with_key>.4":232:13) | |
#loc197 = loc("<eval_with_key>.4":235:20) | |
#loc198 = loc("<eval_with_key>.4":236:14) | |
#loc199 = loc("<eval_with_key>.4":237:12) | |
#loc200 = loc("<eval_with_key>.4":238:13) | |
#loc201 = loc("<eval_with_key>.4":239:13) | |
#loc202 = loc("<eval_with_key>.4":240:14) | |
#loc203 = loc("<eval_with_key>.4":241:12) | |
#loc204 = loc("<eval_with_key>.4":242:13) | |
#loc205 = loc("<eval_with_key>.4":247:14) | |
#loc206 = loc("<eval_with_key>.4":249:19) | |
#loc207 = loc("<eval_with_key>.4":250:19) | |
#loc208 = loc("<eval_with_key>.4":251:13) | |
#loc209 = loc("<eval_with_key>.4":253:19) | |
#loc210 = loc("<eval_with_key>.4":254:19) | |
#loc211 = loc("<eval_with_key>.4":255:13) | |
#loc212 = loc("<eval_with_key>.4":258:13) | |
#loc213 = loc("<eval_with_key>.4":261:20) | |
#loc214 = loc("<eval_with_key>.4":262:13) | |
#loc215 = loc("<eval_with_key>.4":263:12) | |
#loc216 = loc("<eval_with_key>.4":264:14) | |
#loc217 = loc("<eval_with_key>.4":265:14) | |
#loc218 = loc("<eval_with_key>.4":266:12) | |
#loc219 = loc("<eval_with_key>.4":267:13) | |
#loc220 = loc("<eval_with_key>.4":268:13) | |
#loc221 = loc("<eval_with_key>.4":269:14) | |
#loc222 = loc("<eval_with_key>.4":270:12) | |
#loc223 = loc("<eval_with_key>.4":271:13) | |
#loc224 = loc("<eval_with_key>.4":276:14) | |
#loc225 = loc("<eval_with_key>.4":278:19) | |
#loc226 = loc("<eval_with_key>.4":279:19) | |
#loc227 = loc("<eval_with_key>.4":280:13) | |
#loc228 = loc("<eval_with_key>.4":282:19) | |
#loc229 = loc("<eval_with_key>.4":283:19) | |
#loc230 = loc("<eval_with_key>.4":284:13) | |
#loc231 = loc("<eval_with_key>.4":287:13) | |
#loc232 = loc("<eval_with_key>.4":290:20) | |
#loc233 = loc("<eval_with_key>.4":291:14) | |
#loc234 = loc("<eval_with_key>.4":292:12) | |
#loc235 = loc("<eval_with_key>.4":293:13) | |
#loc236 = loc("<eval_with_key>.4":294:13) | |
#loc237 = loc("<eval_with_key>.4":295:14) | |
#loc238 = loc("<eval_with_key>.4":296:12) | |
#loc239 = loc("<eval_with_key>.4":297:13) | |
#loc240 = loc("<eval_with_key>.4":302:14) | |
#loc241 = loc("<eval_with_key>.4":304:19) | |
#loc242 = loc("<eval_with_key>.4":305:19) | |
#loc243 = loc("<eval_with_key>.4":306:13) | |
#loc244 = loc("<eval_with_key>.4":308:19) | |
#loc245 = loc("<eval_with_key>.4":309:19) | |
#loc246 = loc("<eval_with_key>.4":310:13) | |
#loc247 = loc("<eval_with_key>.4":313:13) | |
#loc248 = loc("<eval_with_key>.4":316:20) | |
#loc249 = loc("<eval_with_key>.4":317:13) | |
#loc250 = loc("<eval_with_key>.4":318:12) | |
#loc251 = loc("<eval_with_key>.4":319:14) | |
#loc252 = loc("<eval_with_key>.4":320:14) | |
#loc253 = loc("<eval_with_key>.4":321:12) | |
#loc254 = loc("<eval_with_key>.4":322:13) | |
#loc255 = loc("<eval_with_key>.4":323:13) | |
#loc256 = loc("<eval_with_key>.4":324:14) | |
#loc257 = loc("<eval_with_key>.4":325:12) | |
#loc258 = loc("<eval_with_key>.4":326:13) | |
#loc259 = loc("<eval_with_key>.4":331:14) | |
#loc260 = loc("<eval_with_key>.4":333:19) | |
#loc261 = loc("<eval_with_key>.4":334:19) | |
#loc262 = loc("<eval_with_key>.4":335:13) | |
#loc263 = loc("<eval_with_key>.4":337:19) | |
#loc264 = loc("<eval_with_key>.4":338:19) | |
#loc265 = loc("<eval_with_key>.4":339:13) | |
#loc266 = loc("<eval_with_key>.4":342:13) | |
#loc267 = loc("<eval_with_key>.4":345:21) | |
#loc268 = loc("<eval_with_key>.4":346:14) | |
#loc269 = loc("<eval_with_key>.4":347:13) | |
#loc270 = loc("<eval_with_key>.4":348:14) | |
#loc271 = loc("<eval_with_key>.4":349:13) | |
#loc272 = loc("<eval_with_key>.4":350:15) | |
#loc273 = loc("<eval_with_key>.4":351:13) | |
#loc274 = loc("<eval_with_key>.4":352:13) | |
#loc275 = loc("<eval_with_key>.4":357:14) | |
#loc276 = loc("<eval_with_key>.4":359:19) | |
#loc277 = loc("<eval_with_key>.4":360:19) | |
#loc278 = loc("<eval_with_key>.4":361:13) | |
#loc279 = loc("<eval_with_key>.4":363:19) | |
#loc280 = loc("<eval_with_key>.4":364:19) | |
#loc281 = loc("<eval_with_key>.4":365:13) | |
#loc282 = loc("<eval_with_key>.4":368:13) | |
#loc283 = loc("<eval_with_key>.4":371:21) | |
#loc284 = loc("<eval_with_key>.4":372:13) | |
#loc285 = loc("<eval_with_key>.4":373:12) | |
#loc286 = loc("<eval_with_key>.4":374:25) | |
#loc287 = loc("<eval_with_key>.4":377:21) | |
#loc288 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":378:14)) | |
#loc289 = loc("<eval_with_key>.4":378:14) | |
#loc290 = loc("<eval_with_key>.4":379:13) | |
#loc291 = loc("<eval_with_key>.4":380:14) | |
#loc292 = loc("<eval_with_key>.4":381:13) | |
#loc293 = loc("<eval_with_key>.4":382:15) | |
#loc294 = loc("<eval_with_key>.4":383:13) | |
#loc295 = loc("<eval_with_key>.4":384:13) | |
#loc296 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":389:14)) | |
#loc297 = loc("<eval_with_key>.4":389:14) | |
#loc298 = loc("<eval_with_key>.4":391:19) | |
#loc299 = loc("<eval_with_key>.4":392:19) | |
#loc300 = loc("<eval_with_key>.4":393:13) | |
#loc301 = loc("<eval_with_key>.4":395:19) | |
#loc302 = loc("<eval_with_key>.4":396:19) | |
#loc303 = loc("<eval_with_key>.4":397:13) | |
#loc304 = loc("<eval_with_key>.4":400:14) | |
#loc305 = loc("<eval_with_key>.4":403:21) | |
#loc306 = loc("<eval_with_key>.4":404:14) | |
#loc307 = loc("<eval_with_key>.4":405:13) | |
#loc308 = loc("<eval_with_key>.4":406:14) | |
#loc309 = loc("<eval_with_key>.4":407:13) | |
#loc310 = loc("<eval_with_key>.4":408:15) | |
#loc311 = loc("<eval_with_key>.4":409:13) | |
#loc312 = loc("<eval_with_key>.4":410:13) | |
#loc313 = loc("<eval_with_key>.4":415:14) | |
#loc314 = loc("<eval_with_key>.4":417:19) | |
#loc315 = loc("<eval_with_key>.4":418:19) | |
#loc316 = loc("<eval_with_key>.4":419:13) | |
#loc317 = loc("<eval_with_key>.4":421:19) | |
#loc318 = loc("<eval_with_key>.4":422:19) | |
#loc319 = loc("<eval_with_key>.4":423:13) | |
#loc320 = loc("<eval_with_key>.4":426:14) | |
#loc321 = loc("<eval_with_key>.4":429:21) | |
#loc322 = loc("<eval_with_key>.4":430:13) | |
#loc323 = loc("<eval_with_key>.4":431:12) | |
#loc324 = loc("<eval_with_key>.4":432:14) | |
#loc325 = loc("<eval_with_key>.4":433:13) | |
#loc326 = loc("<eval_with_key>.4":434:14) | |
#loc327 = loc("<eval_with_key>.4":435:13) | |
#loc328 = loc("<eval_with_key>.4":436:15) | |
#loc329 = loc("<eval_with_key>.4":437:13) | |
#loc330 = loc("<eval_with_key>.4":438:13) | |
#loc331 = loc("<eval_with_key>.4":443:14) | |
#loc332 = loc("<eval_with_key>.4":445:19) | |
#loc333 = loc("<eval_with_key>.4":446:19) | |
#loc334 = loc("<eval_with_key>.4":447:13) | |
#loc335 = loc("<eval_with_key>.4":449:19) | |
#loc336 = loc("<eval_with_key>.4":450:19) | |
#loc337 = loc("<eval_with_key>.4":451:13) | |
#loc338 = loc("<eval_with_key>.4":454:14) | |
#loc339 = loc("<eval_with_key>.4":457:21) | |
#loc340 = loc("<eval_with_key>.4":458:14) | |
#loc341 = loc("<eval_with_key>.4":459:13) | |
#loc342 = loc("<eval_with_key>.4":460:14) | |
#loc343 = loc("<eval_with_key>.4":461:13) | |
#loc344 = loc("<eval_with_key>.4":462:15) | |
#loc345 = loc("<eval_with_key>.4":463:13) | |
#loc346 = loc("<eval_with_key>.4":464:13) | |
#loc347 = loc("<eval_with_key>.4":469:14) | |
#loc348 = loc("<eval_with_key>.4":471:19) | |
#loc349 = loc("<eval_with_key>.4":472:19) | |
#loc350 = loc("<eval_with_key>.4":473:13) | |
#loc351 = loc("<eval_with_key>.4":475:19) | |
#loc352 = loc("<eval_with_key>.4":476:19) | |
#loc353 = loc("<eval_with_key>.4":477:13) | |
#loc354 = loc("<eval_with_key>.4":480:14) | |
#loc355 = loc("<eval_with_key>.4":483:21) | |
#loc356 = loc("<eval_with_key>.4":484:13) | |
#loc357 = loc("<eval_with_key>.4":485:12) | |
#loc358 = loc("<eval_with_key>.4":486:14) | |
#loc359 = loc("<eval_with_key>.4":487:13) | |
#loc360 = loc("<eval_with_key>.4":488:14) | |
#loc361 = loc("<eval_with_key>.4":489:13) | |
#loc362 = loc("<eval_with_key>.4":490:15) | |
#loc363 = loc("<eval_with_key>.4":491:13) | |
#loc364 = loc("<eval_with_key>.4":492:13) | |
#loc365 = loc("<eval_with_key>.4":497:14) | |
#loc366 = loc("<eval_with_key>.4":499:19) | |
#loc367 = loc("<eval_with_key>.4":500:19) | |
#loc368 = loc("<eval_with_key>.4":501:13) | |
#loc369 = loc("<eval_with_key>.4":503:19) | |
#loc370 = loc("<eval_with_key>.4":504:19) | |
#loc371 = loc("<eval_with_key>.4":505:13) | |
#loc372 = loc("<eval_with_key>.4":508:14) | |
#loc373 = loc("<eval_with_key>.4":511:21) | |
#loc374 = loc("<eval_with_key>.4":512:14) | |
#loc375 = loc("<eval_with_key>.4":513:13) | |
#loc376 = loc("<eval_with_key>.4":514:14) | |
#loc377 = loc("<eval_with_key>.4":515:13) | |
#loc378 = loc("<eval_with_key>.4":516:15) | |
#loc379 = loc("<eval_with_key>.4":517:13) | |
#loc380 = loc("<eval_with_key>.4":518:13) | |
#loc381 = loc("<eval_with_key>.4":523:14) | |
#loc382 = loc("<eval_with_key>.4":525:19) | |
#loc383 = loc("<eval_with_key>.4":526:19) | |
#loc384 = loc("<eval_with_key>.4":527:13) | |
#loc385 = loc("<eval_with_key>.4":529:19) | |
#loc386 = loc("<eval_with_key>.4":530:19) | |
#loc387 = loc("<eval_with_key>.4":531:13) | |
#loc388 = loc("<eval_with_key>.4":534:14) | |
#loc389 = loc("<eval_with_key>.4":537:21) | |
#loc390 = loc("<eval_with_key>.4":538:13) | |
#loc391 = loc("<eval_with_key>.4":539:12) | |
#loc392 = loc("<eval_with_key>.4":540:27) | |
#loc393 = loc("<eval_with_key>.4":543:21) | |
#loc394 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":544:14)) | |
#loc395 = loc("<eval_with_key>.4":544:14) | |
#loc396 = loc("<eval_with_key>.4":546:14) | |
#loc397 = loc("<eval_with_key>.4":547:13) | |
#loc398 = loc("<eval_with_key>.4":548:15) | |
#loc399 = loc("<eval_with_key>.4":549:13) | |
#loc400 = loc("<eval_with_key>.4":550:13) | |
#loc401 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":555:14)) | |
#loc402 = loc("<eval_with_key>.4":555:14) | |
#loc403 = loc("<eval_with_key>.4":557:19) | |
#loc404 = loc("<eval_with_key>.4":558:19) | |
#loc405 = loc("<eval_with_key>.4":559:13) | |
#loc406 = loc("<eval_with_key>.4":561:19) | |
#loc407 = loc("<eval_with_key>.4":562:19) | |
#loc408 = loc("<eval_with_key>.4":563:13) | |
#loc409 = loc("<eval_with_key>.4":566:14) | |
#loc410 = loc("<eval_with_key>.4":569:21) | |
#loc411 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":570:14)) | |
#loc412 = loc("<eval_with_key>.4":570:14) | |
#loc413 = loc("<eval_with_key>.4":572:14) | |
#loc414 = loc("<eval_with_key>.4":573:13) | |
#loc415 = loc("<eval_with_key>.4":574:15) | |
#loc416 = loc("<eval_with_key>.4":575:13) | |
#loc417 = loc("<eval_with_key>.4":576:13) | |
#loc418 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":581:14)) | |
#loc419 = loc("<eval_with_key>.4":581:14) | |
#loc420 = loc("<eval_with_key>.4":583:19) | |
#loc421 = loc("<eval_with_key>.4":584:19) | |
#loc422 = loc("<eval_with_key>.4":585:13) | |
#loc423 = loc("<eval_with_key>.4":587:19) | |
#loc424 = loc("<eval_with_key>.4":588:19) | |
#loc425 = loc("<eval_with_key>.4":589:13) | |
#loc426 = loc("<eval_with_key>.4":592:14) | |
#loc427 = loc("<eval_with_key>.4":595:21) | |
#loc428 = loc("<eval_with_key>.4":598:21) | |
#loc429 = loc("<eval_with_key>.4":599:13) | |
#loc430 = loc("<eval_with_key>.4":600:12) | |
#loc431 = loc("<eval_with_key>.4":601:14) | |
#loc432 = loc("<eval_with_key>.4":602:13) | |
#loc433 = loc("<eval_with_key>.4":603:14) | |
#loc434 = loc("<eval_with_key>.4":604:13) | |
#loc435 = loc("<eval_with_key>.4":605:15) | |
#loc436 = loc("<eval_with_key>.4":606:13) | |
#loc437 = loc("<eval_with_key>.4":607:13) | |
#loc438 = loc("<eval_with_key>.4":612:14) | |
#loc439 = loc("<eval_with_key>.4":614:19) | |
#loc440 = loc("<eval_with_key>.4":615:19) | |
#loc441 = loc("<eval_with_key>.4":616:13) | |
#loc442 = loc("<eval_with_key>.4":618:19) | |
#loc443 = loc("<eval_with_key>.4":619:19) | |
#loc444 = loc("<eval_with_key>.4":620:13) | |
#loc445 = loc("<eval_with_key>.4":623:14) | |
#loc446 = loc("<eval_with_key>.4":626:21) | |
#loc447 = loc("<eval_with_key>.4":627:14) | |
#loc448 = loc("<eval_with_key>.4":628:13) | |
#loc449 = loc("<eval_with_key>.4":629:14) | |
#loc450 = loc("<eval_with_key>.4":630:13) | |
#loc451 = loc("<eval_with_key>.4":631:15) | |
#loc452 = loc("<eval_with_key>.4":632:13) | |
#loc453 = loc("<eval_with_key>.4":633:13) | |
#loc454 = loc("<eval_with_key>.4":638:14) | |
#loc455 = loc("<eval_with_key>.4":640:19) | |
#loc456 = loc("<eval_with_key>.4":641:19) | |
#loc457 = loc("<eval_with_key>.4":642:13) | |
#loc458 = loc("<eval_with_key>.4":644:19) | |
#loc459 = loc("<eval_with_key>.4":645:19) | |
#loc460 = loc("<eval_with_key>.4":646:13) | |
#loc461 = loc("<eval_with_key>.4":649:14) | |
#loc462 = loc("<eval_with_key>.4":652:21) | |
#loc463 = loc("<eval_with_key>.4":653:13) | |
#loc464 = loc("<eval_with_key>.4":654:13) | |
#loc465 = loc("<eval_with_key>.4":655:14) | |
#loc466 = loc("<eval_with_key>.4":656:13) | |
#loc467 = loc("<eval_with_key>.4":657:14) | |
#loc468 = loc("<eval_with_key>.4":658:13) | |
#loc469 = loc("<eval_with_key>.4":659:15) | |
#loc470 = loc("<eval_with_key>.4":660:13) | |
#loc471 = loc("<eval_with_key>.4":661:13) | |
#loc472 = loc("<eval_with_key>.4":666:14) | |
#loc473 = loc("<eval_with_key>.4":668:19) | |
#loc474 = loc("<eval_with_key>.4":669:19) | |
#loc475 = loc("<eval_with_key>.4":670:13) | |
#loc476 = loc("<eval_with_key>.4":672:19) | |
#loc477 = loc("<eval_with_key>.4":673:19) | |
#loc478 = loc("<eval_with_key>.4":674:13) | |
#loc479 = loc("<eval_with_key>.4":677:14) | |
#loc480 = loc("<eval_with_key>.4":680:21) | |
#loc481 = loc("<eval_with_key>.4":681:14) | |
#loc482 = loc("<eval_with_key>.4":682:13) | |
#loc483 = loc("<eval_with_key>.4":683:14) | |
#loc484 = loc("<eval_with_key>.4":684:13) | |
#loc485 = loc("<eval_with_key>.4":685:15) | |
#loc486 = loc("<eval_with_key>.4":686:13) | |
#loc487 = loc("<eval_with_key>.4":687:13) | |
#loc488 = loc("<eval_with_key>.4":692:14) | |
#loc489 = loc("<eval_with_key>.4":694:19) | |
#loc490 = loc("<eval_with_key>.4":695:19) | |
#loc491 = loc("<eval_with_key>.4":696:13) | |
#loc492 = loc("<eval_with_key>.4":698:19) | |
#loc493 = loc("<eval_with_key>.4":699:19) | |
#loc494 = loc("<eval_with_key>.4":700:13) | |
#loc495 = loc("<eval_with_key>.4":703:14) | |
#loc496 = loc("<eval_with_key>.4":706:21) | |
#loc497 = loc("<eval_with_key>.4":707:13) | |
#loc498 = loc("<eval_with_key>.4":708:13) | |
#loc499 = loc("<eval_with_key>.4":709:27) | |
#loc500 = loc("<eval_with_key>.4":712:21) | |
#loc501 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":713:14)) | |
#loc502 = loc("<eval_with_key>.4":713:14) | |
#loc503 = loc("<eval_with_key>.4":715:14) | |
#loc504 = loc("<eval_with_key>.4":716:13) | |
#loc505 = loc("<eval_with_key>.4":717:15) | |
#loc506 = loc("<eval_with_key>.4":718:13) | |
#loc507 = loc("<eval_with_key>.4":719:13) | |
#loc508 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":724:14)) | |
#loc509 = loc("<eval_with_key>.4":724:14) | |
#loc510 = loc("<eval_with_key>.4":726:19) | |
#loc511 = loc("<eval_with_key>.4":727:19) | |
#loc512 = loc("<eval_with_key>.4":728:13) | |
#loc513 = loc("<eval_with_key>.4":730:19) | |
#loc514 = loc("<eval_with_key>.4":731:19) | |
#loc515 = loc("<eval_with_key>.4":732:13) | |
#loc516 = loc("<eval_with_key>.4":735:14) | |
#loc517 = loc("<eval_with_key>.4":738:21) | |
#loc518 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":739:14)) | |
#loc519 = loc("<eval_with_key>.4":739:14) | |
#loc520 = loc("<eval_with_key>.4":740:13) | |
#loc521 = loc("<eval_with_key>.4":741:14) | |
#loc522 = loc("<eval_with_key>.4":742:13) | |
#loc523 = loc("<eval_with_key>.4":743:15) | |
#loc524 = loc("<eval_with_key>.4":744:13) | |
#loc525 = loc("<eval_with_key>.4":745:13) | |
#loc526 = loc(callsite(callsite("-":4227:13 at "-":5999:10) at "<eval_with_key>.4":750:14)) | |
#loc527 = loc("<eval_with_key>.4":750:14) | |
#loc528 = loc("<eval_with_key>.4":752:19) | |
#loc529 = loc("<eval_with_key>.4":753:19) | |
#loc530 = loc("<eval_with_key>.4":754:13) | |
#loc531 = loc("<eval_with_key>.4":756:19) | |
#loc532 = loc("<eval_with_key>.4":757:19) | |
#loc533 = loc("<eval_with_key>.4":758:13) | |
#loc534 = loc("<eval_with_key>.4":761:14) | |
#loc535 = loc("<eval_with_key>.4":764:21) | |
#loc536 = loc("<eval_with_key>.4":767:21) | |
#loc537 = loc("<eval_with_key>.4":768:13) | |
#loc538 = loc("<eval_with_key>.4":769:13) | |
#loc539 = loc("<eval_with_key>.4":770:14) | |
#loc540 = loc("<eval_with_key>.4":771:13) | |
#loc541 = loc("<eval_with_key>.4":772:14) | |
#loc542 = loc("<eval_with_key>.4":773:13) | |
#loc543 = loc("<eval_with_key>.4":774:15) | |
#loc544 = loc("<eval_with_key>.4":775:13) | |
#loc545 = loc("<eval_with_key>.4":776:13) | |
#loc546 = loc("<eval_with_key>.4":781:14) | |
#loc547 = loc("<eval_with_key>.4":783:20) | |
#loc548 = loc("<eval_with_key>.4":784:20) | |
#loc549 = loc("<eval_with_key>.4":785:13) | |
#loc550 = loc("<eval_with_key>.4":787:20) | |
#loc551 = loc("<eval_with_key>.4":788:20) | |
#loc552 = loc("<eval_with_key>.4":789:13) | |
#loc553 = loc("<eval_with_key>.4":792:14) | |
#loc554 = loc("<eval_with_key>.4":795:21) | |
#loc555 = loc("<eval_with_key>.4":796:14) | |
#loc556 = loc("<eval_with_key>.4":797:13) | |
#loc557 = loc("<eval_with_key>.4":798:14) | |
#loc558 = loc("<eval_with_key>.4":799:13) | |
#loc559 = loc("<eval_with_key>.4":800:15) | |
#loc560 = loc("<eval_with_key>.4":801:13) | |
#loc561 = loc("<eval_with_key>.4":802:13) | |
#loc562 = loc("<eval_with_key>.4":807:14) | |
#loc563 = loc("<eval_with_key>.4":809:20) | |
#loc564 = loc("<eval_with_key>.4":810:20) | |
#loc565 = loc("<eval_with_key>.4":811:13) | |
#loc566 = loc("<eval_with_key>.4":813:20) | |
#loc567 = loc("<eval_with_key>.4":814:20) | |
#loc568 = loc("<eval_with_key>.4":815:13) | |
#loc569 = loc("<eval_with_key>.4":818:14) | |
#loc570 = loc("<eval_with_key>.4":821:21) | |
#loc571 = loc("<eval_with_key>.4":822:13) | |
#loc572 = loc("<eval_with_key>.4":823:13) | |
#loc573 = loc("<eval_with_key>.4":824:14) | |
#loc574 = loc("<eval_with_key>.4":825:13) | |
#loc575 = loc("<eval_with_key>.4":826:14) | |
#loc576 = loc("<eval_with_key>.4":827:13) | |
#loc577 = loc("<eval_with_key>.4":828:15) | |
#loc578 = loc("<eval_with_key>.4":829:13) | |
#loc579 = loc("<eval_with_key>.4":830:13) | |
#loc580 = loc("<eval_with_key>.4":835:14) | |
#loc581 = loc("<eval_with_key>.4":837:20) | |
#loc582 = loc("<eval_with_key>.4":838:20) | |
#loc583 = loc("<eval_with_key>.4":839:13) | |
#loc584 = loc("<eval_with_key>.4":841:20) | |
#loc585 = loc("<eval_with_key>.4":842:20) | |
#loc586 = loc("<eval_with_key>.4":843:13) | |
#loc587 = loc("<eval_with_key>.4":846:14) | |
#loc588 = loc("<eval_with_key>.4":849:21) | |
#loc589 = loc("<eval_with_key>.4":850:14) | |
#loc590 = loc("<eval_with_key>.4":851:13) | |
#loc591 = loc("<eval_with_key>.4":852:14) | |
#loc592 = loc("<eval_with_key>.4":853:13) | |
#loc593 = loc("<eval_with_key>.4":854:15) | |
#loc594 = loc("<eval_with_key>.4":855:13) | |
#loc595 = loc("<eval_with_key>.4":856:13) | |
#loc596 = loc("<eval_with_key>.4":861:14) | |
#loc597 = loc("<eval_with_key>.4":863:20) | |
#loc598 = loc("<eval_with_key>.4":864:20) | |
#loc599 = loc("<eval_with_key>.4":865:13) | |
#loc600 = loc("<eval_with_key>.4":867:20) | |
#loc601 = loc("<eval_with_key>.4":868:20) | |
#loc602 = loc("<eval_with_key>.4":869:13) | |
#loc603 = loc("<eval_with_key>.4":872:14) | |
#loc604 = loc("<eval_with_key>.4":875:21) | |
#loc605 = loc("<eval_with_key>.4":876:13) | |
#loc606 = loc("<eval_with_key>.4":877:13) | |
#loc607 = loc("<eval_with_key>.4":878:14) | |
#loc608 = loc("<eval_with_key>.4":879:13) | |
#loc609 = loc("<eval_with_key>.4":880:14) | |
#loc610 = loc("<eval_with_key>.4":881:13) | |
#loc611 = loc("<eval_with_key>.4":882:15) | |
#loc612 = loc("<eval_with_key>.4":883:13) | |
#loc613 = loc("<eval_with_key>.4":884:13) | |
#loc614 = loc("<eval_with_key>.4":889:14) | |
#loc615 = loc("<eval_with_key>.4":891:20) | |
#loc616 = loc("<eval_with_key>.4":892:20) | |
#loc617 = loc("<eval_with_key>.4":893:13) | |
#loc618 = loc("<eval_with_key>.4":895:20) | |
#loc619 = loc("<eval_with_key>.4":896:20) | |
#loc620 = loc("<eval_with_key>.4":897:13) | |
#loc621 = loc("<eval_with_key>.4":900:14) | |
#loc622 = loc("<eval_with_key>.4":903:21) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment