Created
April 22, 2024 18:32
-
-
Save AmosLewis/8ddf6301e0db6ff5b50418881d82abd6 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module { | |
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,224,224],f32>) -> !torch.vtensor<[1,3,896,896],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} { | |
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x3x3x3xf32>) : !torch.vtensor<[64,3,3,3],f32> | |
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%187 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%190 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%198 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%199 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%200 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%201 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%202 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%203 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%204 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%205 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%206 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%207 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%208 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%209 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%210 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%211 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%212 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%213 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%214 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%215 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%216 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%217 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%218 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%219 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%220 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%221 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%222 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%223 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%224 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%225 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%226 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%227 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%228 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%229 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%230 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%231 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%232 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%233 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%234 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%235 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%236 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%237 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%238 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%239 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%240 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%241 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%242 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%243 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%244 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%245 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%246 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%247 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%248 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%249 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%250 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%251 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%252 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%253 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%254 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%255 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%256 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%257 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%258 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%259 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%260 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%261 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%262 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%263 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%264 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%265 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%266 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%267 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%268 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%269 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%270 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%271 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%272 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%273 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%274 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%275 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%276 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%277 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%278 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%279 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%280 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%281 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%282 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%283 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%284 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%285 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%286 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%287 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%288 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%289 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%290 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%291 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%292 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%293 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%294 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%295 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%296 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%297 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%298 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%299 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%300 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%301 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%302 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%303 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%304 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%305 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%306 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%307 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%308 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%309 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%310 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%311 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%312 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%313 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%314 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%315 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%316 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%317 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%318 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%319 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%320 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%321 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%322 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%323 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%324 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%325 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%326 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%327 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%328 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%329 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%330 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%331 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%332 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%333 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%334 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%335 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%336 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%337 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%338 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%339 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%340 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%341 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%342 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%343 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%344 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%345 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%346 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%347 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%348 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%349 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%350 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%351 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%352 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%353 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%354 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%355 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%356 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%357 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%358 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%359 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%360 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%361 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%362 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%363 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%364 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%365 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%366 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%367 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%368 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%369 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%370 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%371 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%372 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%373 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%374 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%375 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%376 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%377 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%378 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%379 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%380 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%381 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%382 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%383 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%384 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%385 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%386 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%387 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%388 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%389 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%390 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%391 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%392 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%393 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%394 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%395 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%396 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%397 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%398 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%399 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%400 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%401 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%402 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%403 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%404 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%405 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%406 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%407 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%408 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%409 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%410 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%411 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%412 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%413 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%414 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%415 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%416 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%417 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%418 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%419 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%420 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%421 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%422 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%423 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%424 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%425 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%426 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%427 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%428 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%429 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%430 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%431 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%432 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%433 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%434 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%435 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%436 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%437 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%438 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%439 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%440 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%441 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%442 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%443 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%444 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%445 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%446 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%447 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%448 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%449 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%450 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%451 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%452 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%453 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%454 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%455 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%456 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%457 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%458 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%459 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%460 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%461 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%462 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%463 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%464 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%465 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%466 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%467 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%468 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%469 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%470 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%471 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%472 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%473 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%474 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%475 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%476 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%477 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%478 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%479 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%480 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%481 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%482 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%483 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%484 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%485 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%486 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%487 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%488 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%489 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%490 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%491 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%492 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%493 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%494 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%495 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%496 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%497 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%498 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%499 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%500 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%501 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%502 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%503 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%504 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%505 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%506 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%507 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%508 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%509 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%510 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%511 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%512 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%513 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%514 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%515 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%516 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%517 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%518 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%519 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%520 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%521 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%522 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%523 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%524 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%525 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%526 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%527 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%528 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%529 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%530 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%531 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%532 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%533 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%534 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%535 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%536 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%537 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%538 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%539 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%540 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%541 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%542 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%543 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%544 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%545 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%546 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%547 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%548 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%549 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%550 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%551 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%552 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%553 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%554 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%555 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%556 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%557 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%558 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%559 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%560 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%561 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%562 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%563 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%564 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%565 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%566 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%567 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%568 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%569 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%570 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%571 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%572 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%573 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%574 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%575 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%576 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%577 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%578 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%579 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%580 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%581 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%582 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%583 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%584 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%585 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%586 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%587 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%588 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%589 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%590 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%591 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%592 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%593 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%594 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%595 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%596 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%597 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%598 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%599 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%600 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%601 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%602 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%603 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%604 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%605 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%606 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%607 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%608 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%609 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%610 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%611 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%612 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%613 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%614 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%615 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%616 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%617 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%618 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%619 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%620 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%621 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%622 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%623 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%624 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%625 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%626 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%627 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%628 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%629 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%630 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%631 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%632 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%633 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%634 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%635 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%636 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%637 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%638 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%639 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%640 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%641 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%642 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%643 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%644 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%645 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%646 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%647 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%648 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%649 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%650 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%651 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%652 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%653 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%654 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%655 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%656 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%657 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%658 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%659 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%660 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%661 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%662 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%663 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%664 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%665 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%666 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%667 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%668 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%669 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%670 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%671 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%672 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%673 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%674 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%675 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%676 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%677 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%678 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%679 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%680 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%681 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%682 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x64x3x3xf32>) : !torch.vtensor<[32,64,3,3],f32> | |
%683 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%684 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x96x3x3xf32>) : !torch.vtensor<[32,96,3,3],f32> | |
%685 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%686 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x128x3x3xf32>) : !torch.vtensor<[32,128,3,3],f32> | |
%687 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%688 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x160x3x3xf32>) : !torch.vtensor<[32,160,3,3],f32> | |
%689 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
%690 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x192x3x3xf32>) : !torch.vtensor<[64,192,3,3],f32> | |
%691 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%692 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32> | |
%693 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%694 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32> | |
%695 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%696 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32> | |
%697 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%698 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32> | |
%699 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
%700 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3x64x3x3xf32>) : !torch.vtensor<[3,64,3,3],f32> | |
%701 = torch.vtensor.literal(dense<[0.00537109375, 0.00732421875, 0.0163574219]> : tensor<3xf32>) : !torch.vtensor<[3],f32> | |
%none = torch.constant.none | |
%702 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%703 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%704 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12 = torch.constant.int 12 | |
%705 = torch.aten.item %703 : !torch.vtensor<[],f32> -> !torch.float | |
%706 = torch.aten.item %704 : !torch.vtensor<[],si8> -> !torch.int | |
%707 = torch.aten.quantize_per_tensor %702, %705, %706, %int12 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%708 = torch.aten.int_repr %707 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%709 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%710 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%711 = torch.aten.item %709 : !torch.vtensor<[],f32> -> !torch.float | |
%712 = torch.aten.item %710 : !torch.vtensor<[],si8> -> !torch.int | |
%713 = torch.aten._make_per_tensor_quantized_tensor %708, %711, %712 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%714 = torch.aten.dequantize.self %713 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%715 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%716 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%717 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_0 = torch.constant.int 12 | |
%718 = torch.aten.item %716 : !torch.vtensor<[],f32> -> !torch.float | |
%719 = torch.aten.item %717 : !torch.vtensor<[],si8> -> !torch.int | |
%720 = torch.aten.quantize_per_tensor %715, %718, %719, %int12_0 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%721 = torch.aten.int_repr %720 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%722 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%724 = torch.aten.item %722 : !torch.vtensor<[],f32> -> !torch.float | |
%725 = torch.aten.item %723 : !torch.vtensor<[],si8> -> !torch.int | |
%726 = torch.aten._make_per_tensor_quantized_tensor %721, %724, %725 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%727 = torch.aten.dequantize.self %726 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%728 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%729 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%730 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1 = torch.constant.int 12 | |
%731 = torch.aten.item %729 : !torch.vtensor<[],f32> -> !torch.float | |
%732 = torch.aten.item %730 : !torch.vtensor<[],si8> -> !torch.int | |
%733 = torch.aten.quantize_per_tensor %728, %731, %732, %int12_1 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%734 = torch.aten.int_repr %733 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%735 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%736 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%737 = torch.aten.item %735 : !torch.vtensor<[],f32> -> !torch.float | |
%738 = torch.aten.item %736 : !torch.vtensor<[],si8> -> !torch.int | |
%739 = torch.aten._make_per_tensor_quantized_tensor %734, %737, %738 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%740 = torch.aten.dequantize.self %739 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%741 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%742 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%743 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_2 = torch.constant.int 12 | |
%744 = torch.aten.item %742 : !torch.vtensor<[],f32> -> !torch.float | |
%745 = torch.aten.item %743 : !torch.vtensor<[],si8> -> !torch.int | |
%746 = torch.aten.quantize_per_tensor %741, %744, %745, %int12_2 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%747 = torch.aten.int_repr %746 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%748 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%749 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%750 = torch.aten.item %748 : !torch.vtensor<[],f32> -> !torch.float | |
%751 = torch.aten.item %749 : !torch.vtensor<[],si8> -> !torch.int | |
%752 = torch.aten._make_per_tensor_quantized_tensor %747, %750, %751 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%753 = torch.aten.dequantize.self %752 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%754 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%755 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%756 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_3 = torch.constant.int 12 | |
%757 = torch.aten.item %755 : !torch.vtensor<[],f32> -> !torch.float | |
%758 = torch.aten.item %756 : !torch.vtensor<[],si8> -> !torch.int | |
%759 = torch.aten.quantize_per_tensor %754, %757, %758, %int12_3 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%760 = torch.aten.int_repr %759 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%761 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%762 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%763 = torch.aten.item %761 : !torch.vtensor<[],f32> -> !torch.float | |
%764 = torch.aten.item %762 : !torch.vtensor<[],si8> -> !torch.int | |
%765 = torch.aten._make_per_tensor_quantized_tensor %760, %763, %764 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%766 = torch.aten.dequantize.self %765 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%767 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%768 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%769 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_4 = torch.constant.int 12 | |
%770 = torch.aten.item %768 : !torch.vtensor<[],f32> -> !torch.float | |
%771 = torch.aten.item %769 : !torch.vtensor<[],si8> -> !torch.int | |
%772 = torch.aten.quantize_per_tensor %767, %770, %771, %int12_4 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%773 = torch.aten.int_repr %772 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%774 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%775 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%776 = torch.aten.item %774 : !torch.vtensor<[],f32> -> !torch.float | |
%777 = torch.aten.item %775 : !torch.vtensor<[],si8> -> !torch.int | |
%778 = torch.aten._make_per_tensor_quantized_tensor %773, %776, %777 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%779 = torch.aten.dequantize.self %778 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%780 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%781 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%782 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_5 = torch.constant.int 12 | |
%783 = torch.aten.item %781 : !torch.vtensor<[],f32> -> !torch.float | |
%784 = torch.aten.item %782 : !torch.vtensor<[],si8> -> !torch.int | |
%785 = torch.aten.quantize_per_tensor %780, %783, %784, %int12_5 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%786 = torch.aten.int_repr %785 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%787 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%788 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%789 = torch.aten.item %787 : !torch.vtensor<[],f32> -> !torch.float | |
%790 = torch.aten.item %788 : !torch.vtensor<[],si8> -> !torch.int | |
%791 = torch.aten._make_per_tensor_quantized_tensor %786, %789, %790 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%792 = torch.aten.dequantize.self %791 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%793 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%794 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_6 = torch.constant.int 12 | |
%796 = torch.aten.item %794 : !torch.vtensor<[],f32> -> !torch.float | |
%797 = torch.aten.item %795 : !torch.vtensor<[],si8> -> !torch.int | |
%798 = torch.aten.quantize_per_tensor %793, %796, %797, %int12_6 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%799 = torch.aten.int_repr %798 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%800 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%802 = torch.aten.item %800 : !torch.vtensor<[],f32> -> !torch.float | |
%803 = torch.aten.item %801 : !torch.vtensor<[],si8> -> !torch.int | |
%804 = torch.aten._make_per_tensor_quantized_tensor %799, %802, %803 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%805 = torch.aten.dequantize.self %804 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%806 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%807 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%808 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_7 = torch.constant.int 12 | |
%809 = torch.aten.item %807 : !torch.vtensor<[],f32> -> !torch.float | |
%810 = torch.aten.item %808 : !torch.vtensor<[],si8> -> !torch.int | |
%811 = torch.aten.quantize_per_tensor %806, %809, %810, %int12_7 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%812 = torch.aten.int_repr %811 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%813 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%814 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%815 = torch.aten.item %813 : !torch.vtensor<[],f32> -> !torch.float | |
%816 = torch.aten.item %814 : !torch.vtensor<[],si8> -> !torch.int | |
%817 = torch.aten._make_per_tensor_quantized_tensor %812, %815, %816 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%818 = torch.aten.dequantize.self %817 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%819 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%820 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%821 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_8 = torch.constant.int 12 | |
%822 = torch.aten.item %820 : !torch.vtensor<[],f32> -> !torch.float | |
%823 = torch.aten.item %821 : !torch.vtensor<[],si8> -> !torch.int | |
%824 = torch.aten.quantize_per_tensor %819, %822, %823, %int12_8 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%825 = torch.aten.int_repr %824 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%826 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%827 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%828 = torch.aten.item %826 : !torch.vtensor<[],f32> -> !torch.float | |
%829 = torch.aten.item %827 : !torch.vtensor<[],si8> -> !torch.int | |
%830 = torch.aten._make_per_tensor_quantized_tensor %825, %828, %829 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%831 = torch.aten.dequantize.self %830 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%832 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%833 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%834 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_9 = torch.constant.int 12 | |
%835 = torch.aten.item %833 : !torch.vtensor<[],f32> -> !torch.float | |
%836 = torch.aten.item %834 : !torch.vtensor<[],si8> -> !torch.int | |
%837 = torch.aten.quantize_per_tensor %832, %835, %836, %int12_9 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%838 = torch.aten.int_repr %837 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%839 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%840 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%841 = torch.aten.item %839 : !torch.vtensor<[],f32> -> !torch.float | |
%842 = torch.aten.item %840 : !torch.vtensor<[],si8> -> !torch.int | |
%843 = torch.aten._make_per_tensor_quantized_tensor %838, %841, %842 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%844 = torch.aten.dequantize.self %843 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%845 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%846 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%847 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_10 = torch.constant.int 12 | |
%848 = torch.aten.item %846 : !torch.vtensor<[],f32> -> !torch.float | |
%849 = torch.aten.item %847 : !torch.vtensor<[],si8> -> !torch.int | |
%850 = torch.aten.quantize_per_tensor %845, %848, %849, %int12_10 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%851 = torch.aten.int_repr %850 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%852 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%853 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%854 = torch.aten.item %852 : !torch.vtensor<[],f32> -> !torch.float | |
%855 = torch.aten.item %853 : !torch.vtensor<[],si8> -> !torch.int | |
%856 = torch.aten._make_per_tensor_quantized_tensor %851, %854, %855 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%857 = torch.aten.dequantize.self %856 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%858 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%859 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%860 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_11 = torch.constant.int 12 | |
%861 = torch.aten.item %859 : !torch.vtensor<[],f32> -> !torch.float | |
%862 = torch.aten.item %860 : !torch.vtensor<[],si8> -> !torch.int | |
%863 = torch.aten.quantize_per_tensor %858, %861, %862, %int12_11 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%864 = torch.aten.int_repr %863 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%865 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%866 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%867 = torch.aten.item %865 : !torch.vtensor<[],f32> -> !torch.float | |
%868 = torch.aten.item %866 : !torch.vtensor<[],si8> -> !torch.int | |
%869 = torch.aten._make_per_tensor_quantized_tensor %864, %867, %868 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%870 = torch.aten.dequantize.self %869 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%871 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%872 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%873 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_12 = torch.constant.int 12 | |
%874 = torch.aten.item %872 : !torch.vtensor<[],f32> -> !torch.float | |
%875 = torch.aten.item %873 : !torch.vtensor<[],si8> -> !torch.int | |
%876 = torch.aten.quantize_per_tensor %871, %874, %875, %int12_12 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%877 = torch.aten.int_repr %876 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%878 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%879 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%880 = torch.aten.item %878 : !torch.vtensor<[],f32> -> !torch.float | |
%881 = torch.aten.item %879 : !torch.vtensor<[],si8> -> !torch.int | |
%882 = torch.aten._make_per_tensor_quantized_tensor %877, %880, %881 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%883 = torch.aten.dequantize.self %882 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%884 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%885 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%886 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_13 = torch.constant.int 12 | |
%887 = torch.aten.item %885 : !torch.vtensor<[],f32> -> !torch.float | |
%888 = torch.aten.item %886 : !torch.vtensor<[],si8> -> !torch.int | |
%889 = torch.aten.quantize_per_tensor %884, %887, %888, %int12_13 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%890 = torch.aten.int_repr %889 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%891 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%892 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%893 = torch.aten.item %891 : !torch.vtensor<[],f32> -> !torch.float | |
%894 = torch.aten.item %892 : !torch.vtensor<[],si8> -> !torch.int | |
%895 = torch.aten._make_per_tensor_quantized_tensor %890, %893, %894 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%896 = torch.aten.dequantize.self %895 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%897 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%898 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%899 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_14 = torch.constant.int 12 | |
%900 = torch.aten.item %898 : !torch.vtensor<[],f32> -> !torch.float | |
%901 = torch.aten.item %899 : !torch.vtensor<[],si8> -> !torch.int | |
%902 = torch.aten.quantize_per_tensor %897, %900, %901, %int12_14 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%903 = torch.aten.int_repr %902 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%904 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%905 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%906 = torch.aten.item %904 : !torch.vtensor<[],f32> -> !torch.float | |
%907 = torch.aten.item %905 : !torch.vtensor<[],si8> -> !torch.int | |
%908 = torch.aten._make_per_tensor_quantized_tensor %903, %906, %907 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%909 = torch.aten.dequantize.self %908 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%910 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%911 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_15 = torch.constant.int 12 | |
%913 = torch.aten.item %911 : !torch.vtensor<[],f32> -> !torch.float | |
%914 = torch.aten.item %912 : !torch.vtensor<[],si8> -> !torch.int | |
%915 = torch.aten.quantize_per_tensor %910, %913, %914, %int12_15 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%916 = torch.aten.int_repr %915 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%917 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%918 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%919 = torch.aten.item %917 : !torch.vtensor<[],f32> -> !torch.float | |
%920 = torch.aten.item %918 : !torch.vtensor<[],si8> -> !torch.int | |
%921 = torch.aten._make_per_tensor_quantized_tensor %916, %919, %920 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%922 = torch.aten.dequantize.self %921 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%923 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%924 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%925 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_16 = torch.constant.int 12 | |
%926 = torch.aten.item %924 : !torch.vtensor<[],f32> -> !torch.float | |
%927 = torch.aten.item %925 : !torch.vtensor<[],si8> -> !torch.int | |
%928 = torch.aten.quantize_per_tensor %923, %926, %927, %int12_16 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%929 = torch.aten.int_repr %928 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%930 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%931 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%932 = torch.aten.item %930 : !torch.vtensor<[],f32> -> !torch.float | |
%933 = torch.aten.item %931 : !torch.vtensor<[],si8> -> !torch.int | |
%934 = torch.aten._make_per_tensor_quantized_tensor %929, %932, %933 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%935 = torch.aten.dequantize.self %934 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%936 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%937 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%938 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_17 = torch.constant.int 12 | |
%939 = torch.aten.item %937 : !torch.vtensor<[],f32> -> !torch.float | |
%940 = torch.aten.item %938 : !torch.vtensor<[],si8> -> !torch.int | |
%941 = torch.aten.quantize_per_tensor %936, %939, %940, %int12_17 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%942 = torch.aten.int_repr %941 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%943 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%944 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%945 = torch.aten.item %943 : !torch.vtensor<[],f32> -> !torch.float | |
%946 = torch.aten.item %944 : !torch.vtensor<[],si8> -> !torch.int | |
%947 = torch.aten._make_per_tensor_quantized_tensor %942, %945, %946 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%948 = torch.aten.dequantize.self %947 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%949 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%950 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_18 = torch.constant.int 12 | |
%952 = torch.aten.item %950 : !torch.vtensor<[],f32> -> !torch.float | |
%953 = torch.aten.item %951 : !torch.vtensor<[],si8> -> !torch.int | |
%954 = torch.aten.quantize_per_tensor %949, %952, %953, %int12_18 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%955 = torch.aten.int_repr %954 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%956 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%958 = torch.aten.item %956 : !torch.vtensor<[],f32> -> !torch.float | |
%959 = torch.aten.item %957 : !torch.vtensor<[],si8> -> !torch.int | |
%960 = torch.aten._make_per_tensor_quantized_tensor %955, %958, %959 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%961 = torch.aten.dequantize.self %960 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%962 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%963 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%964 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_19 = torch.constant.int 12 | |
%965 = torch.aten.item %963 : !torch.vtensor<[],f32> -> !torch.float | |
%966 = torch.aten.item %964 : !torch.vtensor<[],si8> -> !torch.int | |
%967 = torch.aten.quantize_per_tensor %962, %965, %966, %int12_19 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%968 = torch.aten.int_repr %967 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%969 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%970 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%971 = torch.aten.item %969 : !torch.vtensor<[],f32> -> !torch.float | |
%972 = torch.aten.item %970 : !torch.vtensor<[],si8> -> !torch.int | |
%973 = torch.aten._make_per_tensor_quantized_tensor %968, %971, %972 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%974 = torch.aten.dequantize.self %973 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%975 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%976 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%977 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_20 = torch.constant.int 12 | |
%978 = torch.aten.item %976 : !torch.vtensor<[],f32> -> !torch.float | |
%979 = torch.aten.item %977 : !torch.vtensor<[],si8> -> !torch.int | |
%980 = torch.aten.quantize_per_tensor %975, %978, %979, %int12_20 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%981 = torch.aten.int_repr %980 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%982 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%983 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%984 = torch.aten.item %982 : !torch.vtensor<[],f32> -> !torch.float | |
%985 = torch.aten.item %983 : !torch.vtensor<[],si8> -> !torch.int | |
%986 = torch.aten._make_per_tensor_quantized_tensor %981, %984, %985 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%987 = torch.aten.dequantize.self %986 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%988 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%989 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%990 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_21 = torch.constant.int 12 | |
%991 = torch.aten.item %989 : !torch.vtensor<[],f32> -> !torch.float | |
%992 = torch.aten.item %990 : !torch.vtensor<[],si8> -> !torch.int | |
%993 = torch.aten.quantize_per_tensor %988, %991, %992, %int12_21 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%994 = torch.aten.int_repr %993 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%995 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%996 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%997 = torch.aten.item %995 : !torch.vtensor<[],f32> -> !torch.float | |
%998 = torch.aten.item %996 : !torch.vtensor<[],si8> -> !torch.int | |
%999 = torch.aten._make_per_tensor_quantized_tensor %994, %997, %998 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1000 = torch.aten.dequantize.self %999 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1001 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1002 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1003 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_22 = torch.constant.int 12 | |
%1004 = torch.aten.item %1002 : !torch.vtensor<[],f32> -> !torch.float | |
%1005 = torch.aten.item %1003 : !torch.vtensor<[],si8> -> !torch.int | |
%1006 = torch.aten.quantize_per_tensor %1001, %1004, %1005, %int12_22 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1007 = torch.aten.int_repr %1006 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1008 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1009 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1010 = torch.aten.item %1008 : !torch.vtensor<[],f32> -> !torch.float | |
%1011 = torch.aten.item %1009 : !torch.vtensor<[],si8> -> !torch.int | |
%1012 = torch.aten._make_per_tensor_quantized_tensor %1007, %1010, %1011 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1013 = torch.aten.dequantize.self %1012 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1014 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1015 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1016 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_23 = torch.constant.int 12 | |
%1017 = torch.aten.item %1015 : !torch.vtensor<[],f32> -> !torch.float | |
%1018 = torch.aten.item %1016 : !torch.vtensor<[],si8> -> !torch.int | |
%1019 = torch.aten.quantize_per_tensor %1014, %1017, %1018, %int12_23 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1020 = torch.aten.int_repr %1019 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1021 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1022 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1023 = torch.aten.item %1021 : !torch.vtensor<[],f32> -> !torch.float | |
%1024 = torch.aten.item %1022 : !torch.vtensor<[],si8> -> !torch.int | |
%1025 = torch.aten._make_per_tensor_quantized_tensor %1020, %1023, %1024 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1026 = torch.aten.dequantize.self %1025 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1027 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1028 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1029 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_24 = torch.constant.int 12 | |
%1030 = torch.aten.item %1028 : !torch.vtensor<[],f32> -> !torch.float | |
%1031 = torch.aten.item %1029 : !torch.vtensor<[],si8> -> !torch.int | |
%1032 = torch.aten.quantize_per_tensor %1027, %1030, %1031, %int12_24 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1033 = torch.aten.int_repr %1032 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1034 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1035 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1036 = torch.aten.item %1034 : !torch.vtensor<[],f32> -> !torch.float | |
%1037 = torch.aten.item %1035 : !torch.vtensor<[],si8> -> !torch.int | |
%1038 = torch.aten._make_per_tensor_quantized_tensor %1033, %1036, %1037 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1039 = torch.aten.dequantize.self %1038 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1040 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1041 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1042 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_25 = torch.constant.int 12 | |
%1043 = torch.aten.item %1041 : !torch.vtensor<[],f32> -> !torch.float | |
%1044 = torch.aten.item %1042 : !torch.vtensor<[],si8> -> !torch.int | |
%1045 = torch.aten.quantize_per_tensor %1040, %1043, %1044, %int12_25 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1046 = torch.aten.int_repr %1045 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1047 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1048 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1049 = torch.aten.item %1047 : !torch.vtensor<[],f32> -> !torch.float | |
%1050 = torch.aten.item %1048 : !torch.vtensor<[],si8> -> !torch.int | |
%1051 = torch.aten._make_per_tensor_quantized_tensor %1046, %1049, %1050 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1052 = torch.aten.dequantize.self %1051 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1053 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1054 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_26 = torch.constant.int 12 | |
%1056 = torch.aten.item %1054 : !torch.vtensor<[],f32> -> !torch.float | |
%1057 = torch.aten.item %1055 : !torch.vtensor<[],si8> -> !torch.int | |
%1058 = torch.aten.quantize_per_tensor %1053, %1056, %1057, %int12_26 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1059 = torch.aten.int_repr %1058 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1060 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1061 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1062 = torch.aten.item %1060 : !torch.vtensor<[],f32> -> !torch.float | |
%1063 = torch.aten.item %1061 : !torch.vtensor<[],si8> -> !torch.int | |
%1064 = torch.aten._make_per_tensor_quantized_tensor %1059, %1062, %1063 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1065 = torch.aten.dequantize.self %1064 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1066 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1067 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1068 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_27 = torch.constant.int 12 | |
%1069 = torch.aten.item %1067 : !torch.vtensor<[],f32> -> !torch.float | |
%1070 = torch.aten.item %1068 : !torch.vtensor<[],si8> -> !torch.int | |
%1071 = torch.aten.quantize_per_tensor %1066, %1069, %1070, %int12_27 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1072 = torch.aten.int_repr %1071 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1073 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1074 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1075 = torch.aten.item %1073 : !torch.vtensor<[],f32> -> !torch.float | |
%1076 = torch.aten.item %1074 : !torch.vtensor<[],si8> -> !torch.int | |
%1077 = torch.aten._make_per_tensor_quantized_tensor %1072, %1075, %1076 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1078 = torch.aten.dequantize.self %1077 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1079 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1080 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_28 = torch.constant.int 12 | |
%1082 = torch.aten.item %1080 : !torch.vtensor<[],f32> -> !torch.float | |
%1083 = torch.aten.item %1081 : !torch.vtensor<[],si8> -> !torch.int | |
%1084 = torch.aten.quantize_per_tensor %1079, %1082, %1083, %int12_28 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1085 = torch.aten.int_repr %1084 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1086 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1088 = torch.aten.item %1086 : !torch.vtensor<[],f32> -> !torch.float | |
%1089 = torch.aten.item %1087 : !torch.vtensor<[],si8> -> !torch.int | |
%1090 = torch.aten._make_per_tensor_quantized_tensor %1085, %1088, %1089 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1091 = torch.aten.dequantize.self %1090 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1092 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1093 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1094 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_29 = torch.constant.int 12 | |
%1095 = torch.aten.item %1093 : !torch.vtensor<[],f32> -> !torch.float | |
%1096 = torch.aten.item %1094 : !torch.vtensor<[],si8> -> !torch.int | |
%1097 = torch.aten.quantize_per_tensor %1092, %1095, %1096, %int12_29 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1098 = torch.aten.int_repr %1097 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1099 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1100 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1101 = torch.aten.item %1099 : !torch.vtensor<[],f32> -> !torch.float | |
%1102 = torch.aten.item %1100 : !torch.vtensor<[],si8> -> !torch.int | |
%1103 = torch.aten._make_per_tensor_quantized_tensor %1098, %1101, %1102 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1104 = torch.aten.dequantize.self %1103 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1105 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1106 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1107 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_30 = torch.constant.int 12 | |
%1108 = torch.aten.item %1106 : !torch.vtensor<[],f32> -> !torch.float | |
%1109 = torch.aten.item %1107 : !torch.vtensor<[],si8> -> !torch.int | |
%1110 = torch.aten.quantize_per_tensor %1105, %1108, %1109, %int12_30 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1111 = torch.aten.int_repr %1110 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1112 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1113 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1114 = torch.aten.item %1112 : !torch.vtensor<[],f32> -> !torch.float | |
%1115 = torch.aten.item %1113 : !torch.vtensor<[],si8> -> !torch.int | |
%1116 = torch.aten._make_per_tensor_quantized_tensor %1111, %1114, %1115 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1117 = torch.aten.dequantize.self %1116 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1118 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1119 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_31 = torch.constant.int 12 | |
%1121 = torch.aten.item %1119 : !torch.vtensor<[],f32> -> !torch.float | |
%1122 = torch.aten.item %1120 : !torch.vtensor<[],si8> -> !torch.int | |
%1123 = torch.aten.quantize_per_tensor %1118, %1121, %1122, %int12_31 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1124 = torch.aten.int_repr %1123 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1125 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1127 = torch.aten.item %1125 : !torch.vtensor<[],f32> -> !torch.float | |
%1128 = torch.aten.item %1126 : !torch.vtensor<[],si8> -> !torch.int | |
%1129 = torch.aten._make_per_tensor_quantized_tensor %1124, %1127, %1128 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1130 = torch.aten.dequantize.self %1129 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1131 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1132 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1133 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_32 = torch.constant.int 12 | |
%1134 = torch.aten.item %1132 : !torch.vtensor<[],f32> -> !torch.float | |
%1135 = torch.aten.item %1133 : !torch.vtensor<[],si8> -> !torch.int | |
%1136 = torch.aten.quantize_per_tensor %1131, %1134, %1135, %int12_32 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1137 = torch.aten.int_repr %1136 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1138 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1139 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1140 = torch.aten.item %1138 : !torch.vtensor<[],f32> -> !torch.float | |
%1141 = torch.aten.item %1139 : !torch.vtensor<[],si8> -> !torch.int | |
%1142 = torch.aten._make_per_tensor_quantized_tensor %1137, %1140, %1141 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1143 = torch.aten.dequantize.self %1142 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1144 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1145 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_33 = torch.constant.int 12 | |
%1147 = torch.aten.item %1145 : !torch.vtensor<[],f32> -> !torch.float | |
%1148 = torch.aten.item %1146 : !torch.vtensor<[],si8> -> !torch.int | |
%1149 = torch.aten.quantize_per_tensor %1144, %1147, %1148, %int12_33 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1150 = torch.aten.int_repr %1149 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1151 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1153 = torch.aten.item %1151 : !torch.vtensor<[],f32> -> !torch.float | |
%1154 = torch.aten.item %1152 : !torch.vtensor<[],si8> -> !torch.int | |
%1155 = torch.aten._make_per_tensor_quantized_tensor %1150, %1153, %1154 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1156 = torch.aten.dequantize.self %1155 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1157 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1158 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1159 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_34 = torch.constant.int 12 | |
%1160 = torch.aten.item %1158 : !torch.vtensor<[],f32> -> !torch.float | |
%1161 = torch.aten.item %1159 : !torch.vtensor<[],si8> -> !torch.int | |
%1162 = torch.aten.quantize_per_tensor %1157, %1160, %1161, %int12_34 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1163 = torch.aten.int_repr %1162 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1164 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1165 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1166 = torch.aten.item %1164 : !torch.vtensor<[],f32> -> !torch.float | |
%1167 = torch.aten.item %1165 : !torch.vtensor<[],si8> -> !torch.int | |
%1168 = torch.aten._make_per_tensor_quantized_tensor %1163, %1166, %1167 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1169 = torch.aten.dequantize.self %1168 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1170 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1171 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1172 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_35 = torch.constant.int 12 | |
%1173 = torch.aten.item %1171 : !torch.vtensor<[],f32> -> !torch.float | |
%1174 = torch.aten.item %1172 : !torch.vtensor<[],si8> -> !torch.int | |
%1175 = torch.aten.quantize_per_tensor %1170, %1173, %1174, %int12_35 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1176 = torch.aten.int_repr %1175 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1177 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1178 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1179 = torch.aten.item %1177 : !torch.vtensor<[],f32> -> !torch.float | |
%1180 = torch.aten.item %1178 : !torch.vtensor<[],si8> -> !torch.int | |
%1181 = torch.aten._make_per_tensor_quantized_tensor %1176, %1179, %1180 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1182 = torch.aten.dequantize.self %1181 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1183 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1184 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1185 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_36 = torch.constant.int 12 | |
%1186 = torch.aten.item %1184 : !torch.vtensor<[],f32> -> !torch.float | |
%1187 = torch.aten.item %1185 : !torch.vtensor<[],si8> -> !torch.int | |
%1188 = torch.aten.quantize_per_tensor %1183, %1186, %1187, %int12_36 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1189 = torch.aten.int_repr %1188 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1190 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1191 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1192 = torch.aten.item %1190 : !torch.vtensor<[],f32> -> !torch.float | |
%1193 = torch.aten.item %1191 : !torch.vtensor<[],si8> -> !torch.int | |
%1194 = torch.aten._make_per_tensor_quantized_tensor %1189, %1192, %1193 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1195 = torch.aten.dequantize.self %1194 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1196 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1197 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1198 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_37 = torch.constant.int 12 | |
%1199 = torch.aten.item %1197 : !torch.vtensor<[],f32> -> !torch.float | |
%1200 = torch.aten.item %1198 : !torch.vtensor<[],si8> -> !torch.int | |
%1201 = torch.aten.quantize_per_tensor %1196, %1199, %1200, %int12_37 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1202 = torch.aten.int_repr %1201 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1203 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1204 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1205 = torch.aten.item %1203 : !torch.vtensor<[],f32> -> !torch.float | |
%1206 = torch.aten.item %1204 : !torch.vtensor<[],si8> -> !torch.int | |
%1207 = torch.aten._make_per_tensor_quantized_tensor %1202, %1205, %1206 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1208 = torch.aten.dequantize.self %1207 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1209 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1210 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_38 = torch.constant.int 12 | |
%1212 = torch.aten.item %1210 : !torch.vtensor<[],f32> -> !torch.float | |
%1213 = torch.aten.item %1211 : !torch.vtensor<[],si8> -> !torch.int | |
%1214 = torch.aten.quantize_per_tensor %1209, %1212, %1213, %int12_38 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1215 = torch.aten.int_repr %1214 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1216 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1218 = torch.aten.item %1216 : !torch.vtensor<[],f32> -> !torch.float | |
%1219 = torch.aten.item %1217 : !torch.vtensor<[],si8> -> !torch.int | |
%1220 = torch.aten._make_per_tensor_quantized_tensor %1215, %1218, %1219 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1221 = torch.aten.dequantize.self %1220 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1222 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1223 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1224 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_39 = torch.constant.int 12 | |
%1225 = torch.aten.item %1223 : !torch.vtensor<[],f32> -> !torch.float | |
%1226 = torch.aten.item %1224 : !torch.vtensor<[],si8> -> !torch.int | |
%1227 = torch.aten.quantize_per_tensor %1222, %1225, %1226, %int12_39 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1228 = torch.aten.int_repr %1227 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1229 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1230 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1231 = torch.aten.item %1229 : !torch.vtensor<[],f32> -> !torch.float | |
%1232 = torch.aten.item %1230 : !torch.vtensor<[],si8> -> !torch.int | |
%1233 = torch.aten._make_per_tensor_quantized_tensor %1228, %1231, %1232 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1234 = torch.aten.dequantize.self %1233 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1235 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1236 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1237 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_40 = torch.constant.int 12 | |
%1238 = torch.aten.item %1236 : !torch.vtensor<[],f32> -> !torch.float | |
%1239 = torch.aten.item %1237 : !torch.vtensor<[],si8> -> !torch.int | |
%1240 = torch.aten.quantize_per_tensor %1235, %1238, %1239, %int12_40 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1241 = torch.aten.int_repr %1240 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1242 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1243 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1244 = torch.aten.item %1242 : !torch.vtensor<[],f32> -> !torch.float | |
%1245 = torch.aten.item %1243 : !torch.vtensor<[],si8> -> !torch.int | |
%1246 = torch.aten._make_per_tensor_quantized_tensor %1241, %1244, %1245 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1247 = torch.aten.dequantize.self %1246 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1248 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1249 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1250 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_41 = torch.constant.int 12 | |
%1251 = torch.aten.item %1249 : !torch.vtensor<[],f32> -> !torch.float | |
%1252 = torch.aten.item %1250 : !torch.vtensor<[],si8> -> !torch.int | |
%1253 = torch.aten.quantize_per_tensor %1248, %1251, %1252, %int12_41 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1254 = torch.aten.int_repr %1253 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1255 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1257 = torch.aten.item %1255 : !torch.vtensor<[],f32> -> !torch.float | |
%1258 = torch.aten.item %1256 : !torch.vtensor<[],si8> -> !torch.int | |
%1259 = torch.aten._make_per_tensor_quantized_tensor %1254, %1257, %1258 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1260 = torch.aten.dequantize.self %1259 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1261 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1262 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1263 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_42 = torch.constant.int 12 | |
%1264 = torch.aten.item %1262 : !torch.vtensor<[],f32> -> !torch.float | |
%1265 = torch.aten.item %1263 : !torch.vtensor<[],si8> -> !torch.int | |
%1266 = torch.aten.quantize_per_tensor %1261, %1264, %1265, %int12_42 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1267 = torch.aten.int_repr %1266 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1268 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1269 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1270 = torch.aten.item %1268 : !torch.vtensor<[],f32> -> !torch.float | |
%1271 = torch.aten.item %1269 : !torch.vtensor<[],si8> -> !torch.int | |
%1272 = torch.aten._make_per_tensor_quantized_tensor %1267, %1270, %1271 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1273 = torch.aten.dequantize.self %1272 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1274 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1275 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1276 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_43 = torch.constant.int 12 | |
%1277 = torch.aten.item %1275 : !torch.vtensor<[],f32> -> !torch.float | |
%1278 = torch.aten.item %1276 : !torch.vtensor<[],si8> -> !torch.int | |
%1279 = torch.aten.quantize_per_tensor %1274, %1277, %1278, %int12_43 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1280 = torch.aten.int_repr %1279 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1281 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1282 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1283 = torch.aten.item %1281 : !torch.vtensor<[],f32> -> !torch.float | |
%1284 = torch.aten.item %1282 : !torch.vtensor<[],si8> -> !torch.int | |
%1285 = torch.aten._make_per_tensor_quantized_tensor %1280, %1283, %1284 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1286 = torch.aten.dequantize.self %1285 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1287 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1288 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1289 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_44 = torch.constant.int 12 | |
%1290 = torch.aten.item %1288 : !torch.vtensor<[],f32> -> !torch.float | |
%1291 = torch.aten.item %1289 : !torch.vtensor<[],si8> -> !torch.int | |
%1292 = torch.aten.quantize_per_tensor %1287, %1290, %1291, %int12_44 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1293 = torch.aten.int_repr %1292 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1294 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1295 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1296 = torch.aten.item %1294 : !torch.vtensor<[],f32> -> !torch.float | |
%1297 = torch.aten.item %1295 : !torch.vtensor<[],si8> -> !torch.int | |
%1298 = torch.aten._make_per_tensor_quantized_tensor %1293, %1296, %1297 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1299 = torch.aten.dequantize.self %1298 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1300 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1301 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1302 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_45 = torch.constant.int 12 | |
%1303 = torch.aten.item %1301 : !torch.vtensor<[],f32> -> !torch.float | |
%1304 = torch.aten.item %1302 : !torch.vtensor<[],si8> -> !torch.int | |
%1305 = torch.aten.quantize_per_tensor %1300, %1303, %1304, %int12_45 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1306 = torch.aten.int_repr %1305 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1307 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1308 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1309 = torch.aten.item %1307 : !torch.vtensor<[],f32> -> !torch.float | |
%1310 = torch.aten.item %1308 : !torch.vtensor<[],si8> -> !torch.int | |
%1311 = torch.aten._make_per_tensor_quantized_tensor %1306, %1309, %1310 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1312 = torch.aten.dequantize.self %1311 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1313 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1314 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1315 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_46 = torch.constant.int 12 | |
%1316 = torch.aten.item %1314 : !torch.vtensor<[],f32> -> !torch.float | |
%1317 = torch.aten.item %1315 : !torch.vtensor<[],si8> -> !torch.int | |
%1318 = torch.aten.quantize_per_tensor %1313, %1316, %1317, %int12_46 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1319 = torch.aten.int_repr %1318 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1320 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1321 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1322 = torch.aten.item %1320 : !torch.vtensor<[],f32> -> !torch.float | |
%1323 = torch.aten.item %1321 : !torch.vtensor<[],si8> -> !torch.int | |
%1324 = torch.aten._make_per_tensor_quantized_tensor %1319, %1322, %1323 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1325 = torch.aten.dequantize.self %1324 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1326 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1327 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_47 = torch.constant.int 12 | |
%1329 = torch.aten.item %1327 : !torch.vtensor<[],f32> -> !torch.float | |
%1330 = torch.aten.item %1328 : !torch.vtensor<[],si8> -> !torch.int | |
%1331 = torch.aten.quantize_per_tensor %1326, %1329, %1330, %int12_47 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1332 = torch.aten.int_repr %1331 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1333 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1334 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1335 = torch.aten.item %1333 : !torch.vtensor<[],f32> -> !torch.float | |
%1336 = torch.aten.item %1334 : !torch.vtensor<[],si8> -> !torch.int | |
%1337 = torch.aten._make_per_tensor_quantized_tensor %1332, %1335, %1336 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1338 = torch.aten.dequantize.self %1337 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1339 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1340 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1341 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_48 = torch.constant.int 12 | |
%1342 = torch.aten.item %1340 : !torch.vtensor<[],f32> -> !torch.float | |
%1343 = torch.aten.item %1341 : !torch.vtensor<[],si8> -> !torch.int | |
%1344 = torch.aten.quantize_per_tensor %1339, %1342, %1343, %int12_48 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1345 = torch.aten.int_repr %1344 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1346 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1347 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1348 = torch.aten.item %1346 : !torch.vtensor<[],f32> -> !torch.float | |
%1349 = torch.aten.item %1347 : !torch.vtensor<[],si8> -> !torch.int | |
%1350 = torch.aten._make_per_tensor_quantized_tensor %1345, %1348, %1349 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1351 = torch.aten.dequantize.self %1350 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1352 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1353 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_49 = torch.constant.int 12 | |
%1355 = torch.aten.item %1353 : !torch.vtensor<[],f32> -> !torch.float | |
%1356 = torch.aten.item %1354 : !torch.vtensor<[],si8> -> !torch.int | |
%1357 = torch.aten.quantize_per_tensor %1352, %1355, %1356, %int12_49 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1358 = torch.aten.int_repr %1357 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1359 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1361 = torch.aten.item %1359 : !torch.vtensor<[],f32> -> !torch.float | |
%1362 = torch.aten.item %1360 : !torch.vtensor<[],si8> -> !torch.int | |
%1363 = torch.aten._make_per_tensor_quantized_tensor %1358, %1361, %1362 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1364 = torch.aten.dequantize.self %1363 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1365 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1366 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1367 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_50 = torch.constant.int 12 | |
%1368 = torch.aten.item %1366 : !torch.vtensor<[],f32> -> !torch.float | |
%1369 = torch.aten.item %1367 : !torch.vtensor<[],si8> -> !torch.int | |
%1370 = torch.aten.quantize_per_tensor %1365, %1368, %1369, %int12_50 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1371 = torch.aten.int_repr %1370 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1372 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1374 = torch.aten.item %1372 : !torch.vtensor<[],f32> -> !torch.float | |
%1375 = torch.aten.item %1373 : !torch.vtensor<[],si8> -> !torch.int | |
%1376 = torch.aten._make_per_tensor_quantized_tensor %1371, %1374, %1375 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1377 = torch.aten.dequantize.self %1376 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1378 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1379 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1380 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_51 = torch.constant.int 12 | |
%1381 = torch.aten.item %1379 : !torch.vtensor<[],f32> -> !torch.float | |
%1382 = torch.aten.item %1380 : !torch.vtensor<[],si8> -> !torch.int | |
%1383 = torch.aten.quantize_per_tensor %1378, %1381, %1382, %int12_51 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1384 = torch.aten.int_repr %1383 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1385 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1386 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1387 = torch.aten.item %1385 : !torch.vtensor<[],f32> -> !torch.float | |
%1388 = torch.aten.item %1386 : !torch.vtensor<[],si8> -> !torch.int | |
%1389 = torch.aten._make_per_tensor_quantized_tensor %1384, %1387, %1388 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1390 = torch.aten.dequantize.self %1389 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1391 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1392 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1393 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_52 = torch.constant.int 12 | |
%1394 = torch.aten.item %1392 : !torch.vtensor<[],f32> -> !torch.float | |
%1395 = torch.aten.item %1393 : !torch.vtensor<[],si8> -> !torch.int | |
%1396 = torch.aten.quantize_per_tensor %1391, %1394, %1395, %int12_52 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1397 = torch.aten.int_repr %1396 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1398 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1399 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1400 = torch.aten.item %1398 : !torch.vtensor<[],f32> -> !torch.float | |
%1401 = torch.aten.item %1399 : !torch.vtensor<[],si8> -> !torch.int | |
%1402 = torch.aten._make_per_tensor_quantized_tensor %1397, %1400, %1401 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1403 = torch.aten.dequantize.self %1402 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1404 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1405 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1406 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_53 = torch.constant.int 12 | |
%1407 = torch.aten.item %1405 : !torch.vtensor<[],f32> -> !torch.float | |
%1408 = torch.aten.item %1406 : !torch.vtensor<[],si8> -> !torch.int | |
%1409 = torch.aten.quantize_per_tensor %1404, %1407, %1408, %int12_53 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1410 = torch.aten.int_repr %1409 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1411 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1412 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1413 = torch.aten.item %1411 : !torch.vtensor<[],f32> -> !torch.float | |
%1414 = torch.aten.item %1412 : !torch.vtensor<[],si8> -> !torch.int | |
%1415 = torch.aten._make_per_tensor_quantized_tensor %1410, %1413, %1414 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1416 = torch.aten.dequantize.self %1415 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1417 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1418 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1419 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_54 = torch.constant.int 12 | |
%1420 = torch.aten.item %1418 : !torch.vtensor<[],f32> -> !torch.float | |
%1421 = torch.aten.item %1419 : !torch.vtensor<[],si8> -> !torch.int | |
%1422 = torch.aten.quantize_per_tensor %1417, %1420, %1421, %int12_54 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1423 = torch.aten.int_repr %1422 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1424 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1425 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1426 = torch.aten.item %1424 : !torch.vtensor<[],f32> -> !torch.float | |
%1427 = torch.aten.item %1425 : !torch.vtensor<[],si8> -> !torch.int | |
%1428 = torch.aten._make_per_tensor_quantized_tensor %1423, %1426, %1427 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1429 = torch.aten.dequantize.self %1428 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1430 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1431 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1432 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_55 = torch.constant.int 12 | |
%1433 = torch.aten.item %1431 : !torch.vtensor<[],f32> -> !torch.float | |
%1434 = torch.aten.item %1432 : !torch.vtensor<[],si8> -> !torch.int | |
%1435 = torch.aten.quantize_per_tensor %1430, %1433, %1434, %int12_55 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1436 = torch.aten.int_repr %1435 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1437 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1438 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1439 = torch.aten.item %1437 : !torch.vtensor<[],f32> -> !torch.float | |
%1440 = torch.aten.item %1438 : !torch.vtensor<[],si8> -> !torch.int | |
%1441 = torch.aten._make_per_tensor_quantized_tensor %1436, %1439, %1440 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1442 = torch.aten.dequantize.self %1441 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1443 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1444 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1445 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_56 = torch.constant.int 12 | |
%1446 = torch.aten.item %1444 : !torch.vtensor<[],f32> -> !torch.float | |
%1447 = torch.aten.item %1445 : !torch.vtensor<[],si8> -> !torch.int | |
%1448 = torch.aten.quantize_per_tensor %1443, %1446, %1447, %int12_56 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1449 = torch.aten.int_repr %1448 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1450 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1451 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1452 = torch.aten.item %1450 : !torch.vtensor<[],f32> -> !torch.float | |
%1453 = torch.aten.item %1451 : !torch.vtensor<[],si8> -> !torch.int | |
%1454 = torch.aten._make_per_tensor_quantized_tensor %1449, %1452, %1453 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1455 = torch.aten.dequantize.self %1454 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1456 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1457 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1458 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_57 = torch.constant.int 12 | |
%1459 = torch.aten.item %1457 : !torch.vtensor<[],f32> -> !torch.float | |
%1460 = torch.aten.item %1458 : !torch.vtensor<[],si8> -> !torch.int | |
%1461 = torch.aten.quantize_per_tensor %1456, %1459, %1460, %int12_57 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1462 = torch.aten.int_repr %1461 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1463 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1464 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1465 = torch.aten.item %1463 : !torch.vtensor<[],f32> -> !torch.float | |
%1466 = torch.aten.item %1464 : !torch.vtensor<[],si8> -> !torch.int | |
%1467 = torch.aten._make_per_tensor_quantized_tensor %1462, %1465, %1466 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1468 = torch.aten.dequantize.self %1467 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1469 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1470 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_58 = torch.constant.int 12 | |
%1472 = torch.aten.item %1470 : !torch.vtensor<[],f32> -> !torch.float | |
%1473 = torch.aten.item %1471 : !torch.vtensor<[],si8> -> !torch.int | |
%1474 = torch.aten.quantize_per_tensor %1469, %1472, %1473, %int12_58 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1475 = torch.aten.int_repr %1474 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1476 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1478 = torch.aten.item %1476 : !torch.vtensor<[],f32> -> !torch.float | |
%1479 = torch.aten.item %1477 : !torch.vtensor<[],si8> -> !torch.int | |
%1480 = torch.aten._make_per_tensor_quantized_tensor %1475, %1478, %1479 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1481 = torch.aten.dequantize.self %1480 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1482 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1483 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1484 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_59 = torch.constant.int 12 | |
%1485 = torch.aten.item %1483 : !torch.vtensor<[],f32> -> !torch.float | |
%1486 = torch.aten.item %1484 : !torch.vtensor<[],si8> -> !torch.int | |
%1487 = torch.aten.quantize_per_tensor %1482, %1485, %1486, %int12_59 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1488 = torch.aten.int_repr %1487 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1489 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1490 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1491 = torch.aten.item %1489 : !torch.vtensor<[],f32> -> !torch.float | |
%1492 = torch.aten.item %1490 : !torch.vtensor<[],si8> -> !torch.int | |
%1493 = torch.aten._make_per_tensor_quantized_tensor %1488, %1491, %1492 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1494 = torch.aten.dequantize.self %1493 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1495 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1496 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1497 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_60 = torch.constant.int 12 | |
%1498 = torch.aten.item %1496 : !torch.vtensor<[],f32> -> !torch.float | |
%1499 = torch.aten.item %1497 : !torch.vtensor<[],si8> -> !torch.int | |
%1500 = torch.aten.quantize_per_tensor %1495, %1498, %1499, %int12_60 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1501 = torch.aten.int_repr %1500 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1502 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1503 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1504 = torch.aten.item %1502 : !torch.vtensor<[],f32> -> !torch.float | |
%1505 = torch.aten.item %1503 : !torch.vtensor<[],si8> -> !torch.int | |
%1506 = torch.aten._make_per_tensor_quantized_tensor %1501, %1504, %1505 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1507 = torch.aten.dequantize.self %1506 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1508 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1509 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1510 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_61 = torch.constant.int 12 | |
%1511 = torch.aten.item %1509 : !torch.vtensor<[],f32> -> !torch.float | |
%1512 = torch.aten.item %1510 : !torch.vtensor<[],si8> -> !torch.int | |
%1513 = torch.aten.quantize_per_tensor %1508, %1511, %1512, %int12_61 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1514 = torch.aten.int_repr %1513 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1515 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1517 = torch.aten.item %1515 : !torch.vtensor<[],f32> -> !torch.float | |
%1518 = torch.aten.item %1516 : !torch.vtensor<[],si8> -> !torch.int | |
%1519 = torch.aten._make_per_tensor_quantized_tensor %1514, %1517, %1518 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1520 = torch.aten.dequantize.self %1519 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1521 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1522 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1523 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_62 = torch.constant.int 12 | |
%1524 = torch.aten.item %1522 : !torch.vtensor<[],f32> -> !torch.float | |
%1525 = torch.aten.item %1523 : !torch.vtensor<[],si8> -> !torch.int | |
%1526 = torch.aten.quantize_per_tensor %1521, %1524, %1525, %int12_62 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1527 = torch.aten.int_repr %1526 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1528 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1530 = torch.aten.item %1528 : !torch.vtensor<[],f32> -> !torch.float | |
%1531 = torch.aten.item %1529 : !torch.vtensor<[],si8> -> !torch.int | |
%1532 = torch.aten._make_per_tensor_quantized_tensor %1527, %1530, %1531 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1533 = torch.aten.dequantize.self %1532 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1534 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1535 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1536 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_63 = torch.constant.int 12 | |
%1537 = torch.aten.item %1535 : !torch.vtensor<[],f32> -> !torch.float | |
%1538 = torch.aten.item %1536 : !torch.vtensor<[],si8> -> !torch.int | |
%1539 = torch.aten.quantize_per_tensor %1534, %1537, %1538, %int12_63 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1540 = torch.aten.int_repr %1539 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1541 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1542 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1543 = torch.aten.item %1541 : !torch.vtensor<[],f32> -> !torch.float | |
%1544 = torch.aten.item %1542 : !torch.vtensor<[],si8> -> !torch.int | |
%1545 = torch.aten._make_per_tensor_quantized_tensor %1540, %1543, %1544 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1546 = torch.aten.dequantize.self %1545 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1547 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1548 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1549 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_64 = torch.constant.int 12 | |
%1550 = torch.aten.item %1548 : !torch.vtensor<[],f32> -> !torch.float | |
%1551 = torch.aten.item %1549 : !torch.vtensor<[],si8> -> !torch.int | |
%1552 = torch.aten.quantize_per_tensor %1547, %1550, %1551, %int12_64 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1553 = torch.aten.int_repr %1552 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1554 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1555 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1556 = torch.aten.item %1554 : !torch.vtensor<[],f32> -> !torch.float | |
%1557 = torch.aten.item %1555 : !torch.vtensor<[],si8> -> !torch.int | |
%1558 = torch.aten._make_per_tensor_quantized_tensor %1553, %1556, %1557 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1559 = torch.aten.dequantize.self %1558 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1560 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1561 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1562 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_65 = torch.constant.int 12 | |
%1563 = torch.aten.item %1561 : !torch.vtensor<[],f32> -> !torch.float | |
%1564 = torch.aten.item %1562 : !torch.vtensor<[],si8> -> !torch.int | |
%1565 = torch.aten.quantize_per_tensor %1560, %1563, %1564, %int12_65 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1566 = torch.aten.int_repr %1565 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1567 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1568 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1569 = torch.aten.item %1567 : !torch.vtensor<[],f32> -> !torch.float | |
%1570 = torch.aten.item %1568 : !torch.vtensor<[],si8> -> !torch.int | |
%1571 = torch.aten._make_per_tensor_quantized_tensor %1566, %1569, %1570 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1572 = torch.aten.dequantize.self %1571 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1573 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1574 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1575 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_66 = torch.constant.int 12 | |
%1576 = torch.aten.item %1574 : !torch.vtensor<[],f32> -> !torch.float | |
%1577 = torch.aten.item %1575 : !torch.vtensor<[],si8> -> !torch.int | |
%1578 = torch.aten.quantize_per_tensor %1573, %1576, %1577, %int12_66 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1579 = torch.aten.int_repr %1578 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1580 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1581 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1582 = torch.aten.item %1580 : !torch.vtensor<[],f32> -> !torch.float | |
%1583 = torch.aten.item %1581 : !torch.vtensor<[],si8> -> !torch.int | |
%1584 = torch.aten._make_per_tensor_quantized_tensor %1579, %1582, %1583 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1585 = torch.aten.dequantize.self %1584 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1586 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1587 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1588 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_67 = torch.constant.int 12 | |
%1589 = torch.aten.item %1587 : !torch.vtensor<[],f32> -> !torch.float | |
%1590 = torch.aten.item %1588 : !torch.vtensor<[],si8> -> !torch.int | |
%1591 = torch.aten.quantize_per_tensor %1586, %1589, %1590, %int12_67 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1592 = torch.aten.int_repr %1591 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1593 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1594 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1595 = torch.aten.item %1593 : !torch.vtensor<[],f32> -> !torch.float | |
%1596 = torch.aten.item %1594 : !torch.vtensor<[],si8> -> !torch.int | |
%1597 = torch.aten._make_per_tensor_quantized_tensor %1592, %1595, %1596 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1598 = torch.aten.dequantize.self %1597 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1599 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1600 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1601 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_68 = torch.constant.int 12 | |
%1602 = torch.aten.item %1600 : !torch.vtensor<[],f32> -> !torch.float | |
%1603 = torch.aten.item %1601 : !torch.vtensor<[],si8> -> !torch.int | |
%1604 = torch.aten.quantize_per_tensor %1599, %1602, %1603, %int12_68 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1605 = torch.aten.int_repr %1604 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1606 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1607 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1608 = torch.aten.item %1606 : !torch.vtensor<[],f32> -> !torch.float | |
%1609 = torch.aten.item %1607 : !torch.vtensor<[],si8> -> !torch.int | |
%1610 = torch.aten._make_per_tensor_quantized_tensor %1605, %1608, %1609 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1611 = torch.aten.dequantize.self %1610 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1612 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1613 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1614 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_69 = torch.constant.int 12 | |
%1615 = torch.aten.item %1613 : !torch.vtensor<[],f32> -> !torch.float | |
%1616 = torch.aten.item %1614 : !torch.vtensor<[],si8> -> !torch.int | |
%1617 = torch.aten.quantize_per_tensor %1612, %1615, %1616, %int12_69 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1618 = torch.aten.int_repr %1617 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1619 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1620 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1621 = torch.aten.item %1619 : !torch.vtensor<[],f32> -> !torch.float | |
%1622 = torch.aten.item %1620 : !torch.vtensor<[],si8> -> !torch.int | |
%1623 = torch.aten._make_per_tensor_quantized_tensor %1618, %1621, %1622 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1624 = torch.aten.dequantize.self %1623 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1625 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1626 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_70 = torch.constant.int 12 | |
%1628 = torch.aten.item %1626 : !torch.vtensor<[],f32> -> !torch.float | |
%1629 = torch.aten.item %1627 : !torch.vtensor<[],si8> -> !torch.int | |
%1630 = torch.aten.quantize_per_tensor %1625, %1628, %1629, %int12_70 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1631 = torch.aten.int_repr %1630 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1632 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1634 = torch.aten.item %1632 : !torch.vtensor<[],f32> -> !torch.float | |
%1635 = torch.aten.item %1633 : !torch.vtensor<[],si8> -> !torch.int | |
%1636 = torch.aten._make_per_tensor_quantized_tensor %1631, %1634, %1635 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1637 = torch.aten.dequantize.self %1636 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1638 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1639 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1640 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_71 = torch.constant.int 12 | |
%1641 = torch.aten.item %1639 : !torch.vtensor<[],f32> -> !torch.float | |
%1642 = torch.aten.item %1640 : !torch.vtensor<[],si8> -> !torch.int | |
%1643 = torch.aten.quantize_per_tensor %1638, %1641, %1642, %int12_71 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1644 = torch.aten.int_repr %1643 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1645 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1647 = torch.aten.item %1645 : !torch.vtensor<[],f32> -> !torch.float | |
%1648 = torch.aten.item %1646 : !torch.vtensor<[],si8> -> !torch.int | |
%1649 = torch.aten._make_per_tensor_quantized_tensor %1644, %1647, %1648 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1650 = torch.aten.dequantize.self %1649 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1651 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1652 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1653 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_72 = torch.constant.int 12 | |
%1654 = torch.aten.item %1652 : !torch.vtensor<[],f32> -> !torch.float | |
%1655 = torch.aten.item %1653 : !torch.vtensor<[],si8> -> !torch.int | |
%1656 = torch.aten.quantize_per_tensor %1651, %1654, %1655, %int12_72 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1657 = torch.aten.int_repr %1656 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1658 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1659 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1660 = torch.aten.item %1658 : !torch.vtensor<[],f32> -> !torch.float | |
%1661 = torch.aten.item %1659 : !torch.vtensor<[],si8> -> !torch.int | |
%1662 = torch.aten._make_per_tensor_quantized_tensor %1657, %1660, %1661 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1663 = torch.aten.dequantize.self %1662 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1664 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1665 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1666 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_73 = torch.constant.int 12 | |
%1667 = torch.aten.item %1665 : !torch.vtensor<[],f32> -> !torch.float | |
%1668 = torch.aten.item %1666 : !torch.vtensor<[],si8> -> !torch.int | |
%1669 = torch.aten.quantize_per_tensor %1664, %1667, %1668, %int12_73 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1670 = torch.aten.int_repr %1669 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1671 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1672 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1673 = torch.aten.item %1671 : !torch.vtensor<[],f32> -> !torch.float | |
%1674 = torch.aten.item %1672 : !torch.vtensor<[],si8> -> !torch.int | |
%1675 = torch.aten._make_per_tensor_quantized_tensor %1670, %1673, %1674 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1676 = torch.aten.dequantize.self %1675 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1677 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1678 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1679 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_74 = torch.constant.int 12 | |
%1680 = torch.aten.item %1678 : !torch.vtensor<[],f32> -> !torch.float | |
%1681 = torch.aten.item %1679 : !torch.vtensor<[],si8> -> !torch.int | |
%1682 = torch.aten.quantize_per_tensor %1677, %1680, %1681, %int12_74 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1683 = torch.aten.int_repr %1682 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1684 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1685 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1686 = torch.aten.item %1684 : !torch.vtensor<[],f32> -> !torch.float | |
%1687 = torch.aten.item %1685 : !torch.vtensor<[],si8> -> !torch.int | |
%1688 = torch.aten._make_per_tensor_quantized_tensor %1683, %1686, %1687 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1689 = torch.aten.dequantize.self %1688 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1690 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1691 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1692 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_75 = torch.constant.int 12 | |
%1693 = torch.aten.item %1691 : !torch.vtensor<[],f32> -> !torch.float | |
%1694 = torch.aten.item %1692 : !torch.vtensor<[],si8> -> !torch.int | |
%1695 = torch.aten.quantize_per_tensor %1690, %1693, %1694, %int12_75 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1696 = torch.aten.int_repr %1695 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1697 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1698 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1699 = torch.aten.item %1697 : !torch.vtensor<[],f32> -> !torch.float | |
%1700 = torch.aten.item %1698 : !torch.vtensor<[],si8> -> !torch.int | |
%1701 = torch.aten._make_per_tensor_quantized_tensor %1696, %1699, %1700 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1702 = torch.aten.dequantize.self %1701 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1703 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1704 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1705 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_76 = torch.constant.int 12 | |
%1706 = torch.aten.item %1704 : !torch.vtensor<[],f32> -> !torch.float | |
%1707 = torch.aten.item %1705 : !torch.vtensor<[],si8> -> !torch.int | |
%1708 = torch.aten.quantize_per_tensor %1703, %1706, %1707, %int12_76 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1709 = torch.aten.int_repr %1708 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1710 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1712 = torch.aten.item %1710 : !torch.vtensor<[],f32> -> !torch.float | |
%1713 = torch.aten.item %1711 : !torch.vtensor<[],si8> -> !torch.int | |
%1714 = torch.aten._make_per_tensor_quantized_tensor %1709, %1712, %1713 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1715 = torch.aten.dequantize.self %1714 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1716 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1717 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1718 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_77 = torch.constant.int 12 | |
%1719 = torch.aten.item %1717 : !torch.vtensor<[],f32> -> !torch.float | |
%1720 = torch.aten.item %1718 : !torch.vtensor<[],si8> -> !torch.int | |
%1721 = torch.aten.quantize_per_tensor %1716, %1719, %1720, %int12_77 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1722 = torch.aten.int_repr %1721 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1723 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1724 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1725 = torch.aten.item %1723 : !torch.vtensor<[],f32> -> !torch.float | |
%1726 = torch.aten.item %1724 : !torch.vtensor<[],si8> -> !torch.int | |
%1727 = torch.aten._make_per_tensor_quantized_tensor %1722, %1725, %1726 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1728 = torch.aten.dequantize.self %1727 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1729 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1730 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1731 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_78 = torch.constant.int 12 | |
%1732 = torch.aten.item %1730 : !torch.vtensor<[],f32> -> !torch.float | |
%1733 = torch.aten.item %1731 : !torch.vtensor<[],si8> -> !torch.int | |
%1734 = torch.aten.quantize_per_tensor %1729, %1732, %1733, %int12_78 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1735 = torch.aten.int_repr %1734 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1736 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1737 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1738 = torch.aten.item %1736 : !torch.vtensor<[],f32> -> !torch.float | |
%1739 = torch.aten.item %1737 : !torch.vtensor<[],si8> -> !torch.int | |
%1740 = torch.aten._make_per_tensor_quantized_tensor %1735, %1738, %1739 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1741 = torch.aten.dequantize.self %1740 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1742 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1743 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1744 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_79 = torch.constant.int 12 | |
%1745 = torch.aten.item %1743 : !torch.vtensor<[],f32> -> !torch.float | |
%1746 = torch.aten.item %1744 : !torch.vtensor<[],si8> -> !torch.int | |
%1747 = torch.aten.quantize_per_tensor %1742, %1745, %1746, %int12_79 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1748 = torch.aten.int_repr %1747 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1749 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1750 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1751 = torch.aten.item %1749 : !torch.vtensor<[],f32> -> !torch.float | |
%1752 = torch.aten.item %1750 : !torch.vtensor<[],si8> -> !torch.int | |
%1753 = torch.aten._make_per_tensor_quantized_tensor %1748, %1751, %1752 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1754 = torch.aten.dequantize.self %1753 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1755 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1756 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1757 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_80 = torch.constant.int 12 | |
%1758 = torch.aten.item %1756 : !torch.vtensor<[],f32> -> !torch.float | |
%1759 = torch.aten.item %1757 : !torch.vtensor<[],si8> -> !torch.int | |
%1760 = torch.aten.quantize_per_tensor %1755, %1758, %1759, %int12_80 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1761 = torch.aten.int_repr %1760 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1762 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1763 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1764 = torch.aten.item %1762 : !torch.vtensor<[],f32> -> !torch.float | |
%1765 = torch.aten.item %1763 : !torch.vtensor<[],si8> -> !torch.int | |
%1766 = torch.aten._make_per_tensor_quantized_tensor %1761, %1764, %1765 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1767 = torch.aten.dequantize.self %1766 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1768 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1769 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_81 = torch.constant.int 12 | |
%1771 = torch.aten.item %1769 : !torch.vtensor<[],f32> -> !torch.float | |
%1772 = torch.aten.item %1770 : !torch.vtensor<[],si8> -> !torch.int | |
%1773 = torch.aten.quantize_per_tensor %1768, %1771, %1772, %int12_81 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1774 = torch.aten.int_repr %1773 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1775 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1776 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1777 = torch.aten.item %1775 : !torch.vtensor<[],f32> -> !torch.float | |
%1778 = torch.aten.item %1776 : !torch.vtensor<[],si8> -> !torch.int | |
%1779 = torch.aten._make_per_tensor_quantized_tensor %1774, %1777, %1778 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1780 = torch.aten.dequantize.self %1779 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1781 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1782 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_82 = torch.constant.int 12 | |
%1784 = torch.aten.item %1782 : !torch.vtensor<[],f32> -> !torch.float | |
%1785 = torch.aten.item %1783 : !torch.vtensor<[],si8> -> !torch.int | |
%1786 = torch.aten.quantize_per_tensor %1781, %1784, %1785, %int12_82 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1787 = torch.aten.int_repr %1786 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1788 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1790 = torch.aten.item %1788 : !torch.vtensor<[],f32> -> !torch.float | |
%1791 = torch.aten.item %1789 : !torch.vtensor<[],si8> -> !torch.int | |
%1792 = torch.aten._make_per_tensor_quantized_tensor %1787, %1790, %1791 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1793 = torch.aten.dequantize.self %1792 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1794 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1795 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1796 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_83 = torch.constant.int 12 | |
%1797 = torch.aten.item %1795 : !torch.vtensor<[],f32> -> !torch.float | |
%1798 = torch.aten.item %1796 : !torch.vtensor<[],si8> -> !torch.int | |
%1799 = torch.aten.quantize_per_tensor %1794, %1797, %1798, %int12_83 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1800 = torch.aten.int_repr %1799 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1801 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1802 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1803 = torch.aten.item %1801 : !torch.vtensor<[],f32> -> !torch.float | |
%1804 = torch.aten.item %1802 : !torch.vtensor<[],si8> -> !torch.int | |
%1805 = torch.aten._make_per_tensor_quantized_tensor %1800, %1803, %1804 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1806 = torch.aten.dequantize.self %1805 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1807 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1808 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1809 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_84 = torch.constant.int 12 | |
%1810 = torch.aten.item %1808 : !torch.vtensor<[],f32> -> !torch.float | |
%1811 = torch.aten.item %1809 : !torch.vtensor<[],si8> -> !torch.int | |
%1812 = torch.aten.quantize_per_tensor %1807, %1810, %1811, %int12_84 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1813 = torch.aten.int_repr %1812 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1814 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1815 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1816 = torch.aten.item %1814 : !torch.vtensor<[],f32> -> !torch.float | |
%1817 = torch.aten.item %1815 : !torch.vtensor<[],si8> -> !torch.int | |
%1818 = torch.aten._make_per_tensor_quantized_tensor %1813, %1816, %1817 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1819 = torch.aten.dequantize.self %1818 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1820 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1821 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1822 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_85 = torch.constant.int 12 | |
%1823 = torch.aten.item %1821 : !torch.vtensor<[],f32> -> !torch.float | |
%1824 = torch.aten.item %1822 : !torch.vtensor<[],si8> -> !torch.int | |
%1825 = torch.aten.quantize_per_tensor %1820, %1823, %1824, %int12_85 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1826 = torch.aten.int_repr %1825 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1827 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1828 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1829 = torch.aten.item %1827 : !torch.vtensor<[],f32> -> !torch.float | |
%1830 = torch.aten.item %1828 : !torch.vtensor<[],si8> -> !torch.int | |
%1831 = torch.aten._make_per_tensor_quantized_tensor %1826, %1829, %1830 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1832 = torch.aten.dequantize.self %1831 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1833 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1834 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1835 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_86 = torch.constant.int 12 | |
%1836 = torch.aten.item %1834 : !torch.vtensor<[],f32> -> !torch.float | |
%1837 = torch.aten.item %1835 : !torch.vtensor<[],si8> -> !torch.int | |
%1838 = torch.aten.quantize_per_tensor %1833, %1836, %1837, %int12_86 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1839 = torch.aten.int_repr %1838 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1840 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1841 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1842 = torch.aten.item %1840 : !torch.vtensor<[],f32> -> !torch.float | |
%1843 = torch.aten.item %1841 : !torch.vtensor<[],si8> -> !torch.int | |
%1844 = torch.aten._make_per_tensor_quantized_tensor %1839, %1842, %1843 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1845 = torch.aten.dequantize.self %1844 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1846 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1847 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1848 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_87 = torch.constant.int 12 | |
%1849 = torch.aten.item %1847 : !torch.vtensor<[],f32> -> !torch.float | |
%1850 = torch.aten.item %1848 : !torch.vtensor<[],si8> -> !torch.int | |
%1851 = torch.aten.quantize_per_tensor %1846, %1849, %1850, %int12_87 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1852 = torch.aten.int_repr %1851 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1853 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1854 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1855 = torch.aten.item %1853 : !torch.vtensor<[],f32> -> !torch.float | |
%1856 = torch.aten.item %1854 : !torch.vtensor<[],si8> -> !torch.int | |
%1857 = torch.aten._make_per_tensor_quantized_tensor %1852, %1855, %1856 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1858 = torch.aten.dequantize.self %1857 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1859 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1860 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1861 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_88 = torch.constant.int 12 | |
%1862 = torch.aten.item %1860 : !torch.vtensor<[],f32> -> !torch.float | |
%1863 = torch.aten.item %1861 : !torch.vtensor<[],si8> -> !torch.int | |
%1864 = torch.aten.quantize_per_tensor %1859, %1862, %1863, %int12_88 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1865 = torch.aten.int_repr %1864 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1866 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1867 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1868 = torch.aten.item %1866 : !torch.vtensor<[],f32> -> !torch.float | |
%1869 = torch.aten.item %1867 : !torch.vtensor<[],si8> -> !torch.int | |
%1870 = torch.aten._make_per_tensor_quantized_tensor %1865, %1868, %1869 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1871 = torch.aten.dequantize.self %1870 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1872 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1873 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1874 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_89 = torch.constant.int 12 | |
%1875 = torch.aten.item %1873 : !torch.vtensor<[],f32> -> !torch.float | |
%1876 = torch.aten.item %1874 : !torch.vtensor<[],si8> -> !torch.int | |
%1877 = torch.aten.quantize_per_tensor %1872, %1875, %1876, %int12_89 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1878 = torch.aten.int_repr %1877 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1879 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1880 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1881 = torch.aten.item %1879 : !torch.vtensor<[],f32> -> !torch.float | |
%1882 = torch.aten.item %1880 : !torch.vtensor<[],si8> -> !torch.int | |
%1883 = torch.aten._make_per_tensor_quantized_tensor %1878, %1881, %1882 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1884 = torch.aten.dequantize.self %1883 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1885 = torch.vtensor.literal(dense<2.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1886 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1887 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_90 = torch.constant.int 12 | |
%1888 = torch.aten.item %1886 : !torch.vtensor<[],f32> -> !torch.float | |
%1889 = torch.aten.item %1887 : !torch.vtensor<[],si8> -> !torch.int | |
%1890 = torch.aten.quantize_per_tensor %1885, %1888, %1889, %int12_90 : !torch.vtensor<[],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1891 = torch.aten.int_repr %1890 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],si8> | |
%1892 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1893 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1894 = torch.aten.item %1892 : !torch.vtensor<[],f32> -> !torch.float | |
%1895 = torch.aten.item %1893 : !torch.vtensor<[],si8> -> !torch.int | |
%1896 = torch.aten._make_per_tensor_quantized_tensor %1891, %1894, %1895 : !torch.vtensor<[],si8>, !torch.float, !torch.int -> !torch.vtensor<[],!torch.qint8> | |
%1897 = torch.aten.dequantize.self %1896 : !torch.vtensor<[],!torch.qint8> -> !torch.vtensor<[],f32> | |
%1898 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1899 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_91 = torch.constant.int 12 | |
%1900 = torch.aten.item %1898 : !torch.vtensor<[],f32> -> !torch.float | |
%1901 = torch.aten.item %1899 : !torch.vtensor<[],si8> -> !torch.int | |
%1902 = torch.aten.quantize_per_tensor %arg0, %1900, %1901, %int12_91 : !torch.vtensor<[1,3,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8> | |
%1903 = torch.aten.int_repr %1902 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],si8> | |
%1904 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1905 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1906 = torch.aten.item %1904 : !torch.vtensor<[],f32> -> !torch.float | |
%1907 = torch.aten.item %1905 : !torch.vtensor<[],si8> -> !torch.int | |
%1908 = torch.aten._make_per_tensor_quantized_tensor %1903, %1906, %1907 : !torch.vtensor<[1,3,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8> | |
%1909 = torch.aten.dequantize.self %1908 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],f32> | |
%1910 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1911 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_92 = torch.constant.int 12 | |
%1912 = torch.aten.item %1910 : !torch.vtensor<[],f32> -> !torch.float | |
%1913 = torch.aten.item %1911 : !torch.vtensor<[],si8> -> !torch.int | |
%1914 = torch.aten.quantize_per_tensor %0, %1912, %1913, %int12_92 : !torch.vtensor<[64,3,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,3,3,3],!torch.qint8> | |
%1915 = torch.aten.int_repr %1914 : !torch.vtensor<[64,3,3,3],!torch.qint8> -> !torch.vtensor<[64,3,3,3],si8> | |
%1916 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1917 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1918 = torch.aten.item %1916 : !torch.vtensor<[],f32> -> !torch.float | |
%1919 = torch.aten.item %1917 : !torch.vtensor<[],si8> -> !torch.int | |
%1920 = torch.aten._make_per_tensor_quantized_tensor %1915, %1918, %1919 : !torch.vtensor<[64,3,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,3,3,3],!torch.qint8> | |
%1921 = torch.aten.dequantize.self %1920 : !torch.vtensor<[64,3,3,3],!torch.qint8> -> !torch.vtensor<[64,3,3,3],f32> | |
%1922 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1923 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_93 = torch.constant.int 12 | |
%1924 = torch.aten.item %1922 : !torch.vtensor<[],f32> -> !torch.float | |
%1925 = torch.aten.item %1923 : !torch.vtensor<[],si8> -> !torch.int | |
%1926 = torch.aten.quantize_per_tensor %1, %1924, %1925, %int12_93 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%1927 = torch.aten.int_repr %1926 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%1928 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1929 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1930 = torch.aten.item %1928 : !torch.vtensor<[],f32> -> !torch.float | |
%1931 = torch.aten.item %1929 : !torch.vtensor<[],si8> -> !torch.int | |
%1932 = torch.aten._make_per_tensor_quantized_tensor %1927, %1930, %1931 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%1933 = torch.aten.dequantize.self %1932 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1 = torch.constant.int 1 | |
%int1_94 = torch.constant.int 1 | |
%int1_95 = torch.constant.int 1 | |
%int1_96 = torch.constant.int 1 | |
%int1_97 = torch.constant.int 1 | |
%int1_98 = torch.constant.int 1 | |
%int0 = torch.constant.int 0 | |
%1934 = torch.prim.ListConstruct %int1, %int1_94 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1935 = torch.prim.ListConstruct %int1_95, %int1_96 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1936 = torch.prim.ListConstruct %int1_97, %int1_98 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1937 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false = torch.constant.bool false | |
%int1_99 = torch.constant.int 1 | |
%1938 = torch.aten.convolution %1909, %1921, %1933, %1936, %1934, %1935, %false, %1937, %int1_99 : !torch.vtensor<[1,3,224,224],f32>, !torch.vtensor<[64,3,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%1939 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1940 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_100 = torch.constant.int 12 | |
%1941 = torch.aten.item %1939 : !torch.vtensor<[],f32> -> !torch.float | |
%1942 = torch.aten.item %1940 : !torch.vtensor<[],si8> -> !torch.int | |
%1943 = torch.aten.quantize_per_tensor %1938, %1941, %1942, %int12_100 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%1944 = torch.aten.int_repr %1943 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%1945 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1946 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1947 = torch.aten.item %1945 : !torch.vtensor<[],f32> -> !torch.float | |
%1948 = torch.aten.item %1946 : !torch.vtensor<[],si8> -> !torch.int | |
%1949 = torch.aten._make_per_tensor_quantized_tensor %1944, %1947, %1948 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%1950 = torch.aten.dequantize.self %1949 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%1951 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1952 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_101 = torch.constant.int 12 | |
%1953 = torch.aten.item %1951 : !torch.vtensor<[],f32> -> !torch.float | |
%1954 = torch.aten.item %1952 : !torch.vtensor<[],si8> -> !torch.int | |
%1955 = torch.aten.quantize_per_tensor %2, %1953, %1954, %int12_101 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%1956 = torch.aten.int_repr %1955 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%1957 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1958 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1959 = torch.aten.item %1957 : !torch.vtensor<[],f32> -> !torch.float | |
%1960 = torch.aten.item %1958 : !torch.vtensor<[],si8> -> !torch.int | |
%1961 = torch.aten._make_per_tensor_quantized_tensor %1956, %1959, %1960 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%1962 = torch.aten.dequantize.self %1961 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%1963 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1964 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_102 = torch.constant.int 12 | |
%1965 = torch.aten.item %1963 : !torch.vtensor<[],f32> -> !torch.float | |
%1966 = torch.aten.item %1964 : !torch.vtensor<[],si8> -> !torch.int | |
%1967 = torch.aten.quantize_per_tensor %3, %1965, %1966, %int12_102 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%1968 = torch.aten.int_repr %1967 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%1969 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1970 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1971 = torch.aten.item %1969 : !torch.vtensor<[],f32> -> !torch.float | |
%1972 = torch.aten.item %1970 : !torch.vtensor<[],si8> -> !torch.int | |
%1973 = torch.aten._make_per_tensor_quantized_tensor %1968, %1971, %1972 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%1974 = torch.aten.dequantize.self %1973 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_103 = torch.constant.int 1 | |
%int1_104 = torch.constant.int 1 | |
%int1_105 = torch.constant.int 1 | |
%int1_106 = torch.constant.int 1 | |
%int1_107 = torch.constant.int 1 | |
%int1_108 = torch.constant.int 1 | |
%int0_109 = torch.constant.int 0 | |
%1975 = torch.prim.ListConstruct %int1_103, %int1_104 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1976 = torch.prim.ListConstruct %int1_105, %int1_106 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1977 = torch.prim.ListConstruct %int1_107, %int1_108 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1978 = torch.prim.ListConstruct %int0_109, %int0_109 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_110 = torch.constant.bool false | |
%int1_111 = torch.constant.int 1 | |
%1979 = torch.aten.convolution %1950, %1962, %1974, %1977, %1975, %1976, %false_110, %1978, %int1_111 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01 = torch.constant.float 0.1015625 | |
%1980 = torch.aten.leaky_relu %1979, %float1.015630e-01 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%1981 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1982 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_112 = torch.constant.int 12 | |
%1983 = torch.aten.item %1981 : !torch.vtensor<[],f32> -> !torch.float | |
%1984 = torch.aten.item %1982 : !torch.vtensor<[],si8> -> !torch.int | |
%1985 = torch.aten.quantize_per_tensor %1980, %1983, %1984, %int12_112 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%1986 = torch.aten.int_repr %1985 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%1987 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1988 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%1989 = torch.aten.item %1987 : !torch.vtensor<[],f32> -> !torch.float | |
%1990 = torch.aten.item %1988 : !torch.vtensor<[],si8> -> !torch.int | |
%1991 = torch.aten._make_per_tensor_quantized_tensor %1986, %1989, %1990 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%1992 = torch.aten.dequantize.self %1991 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%1993 = torch.prim.ListConstruct %1950, %1992 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_113 = torch.constant.int 1 | |
%1994 = torch.aten.cat %1993, %int1_113 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%1995 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%1996 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_114 = torch.constant.int 12 | |
%1997 = torch.aten.item %1995 : !torch.vtensor<[],f32> -> !torch.float | |
%1998 = torch.aten.item %1996 : !torch.vtensor<[],si8> -> !torch.int | |
%1999 = torch.aten.quantize_per_tensor %1994, %1997, %1998, %int12_114 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2000 = torch.aten.int_repr %1999 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%2001 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2002 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2003 = torch.aten.item %2001 : !torch.vtensor<[],f32> -> !torch.float | |
%2004 = torch.aten.item %2002 : !torch.vtensor<[],si8> -> !torch.int | |
%2005 = torch.aten._make_per_tensor_quantized_tensor %2000, %2003, %2004 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2006 = torch.aten.dequantize.self %2005 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%2007 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2008 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_115 = torch.constant.int 12 | |
%2009 = torch.aten.item %2007 : !torch.vtensor<[],f32> -> !torch.float | |
%2010 = torch.aten.item %2008 : !torch.vtensor<[],si8> -> !torch.int | |
%2011 = torch.aten.quantize_per_tensor %4, %2009, %2010, %int12_115 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2012 = torch.aten.int_repr %2011 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%2013 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2014 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2015 = torch.aten.item %2013 : !torch.vtensor<[],f32> -> !torch.float | |
%2016 = torch.aten.item %2014 : !torch.vtensor<[],si8> -> !torch.int | |
%2017 = torch.aten._make_per_tensor_quantized_tensor %2012, %2015, %2016 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2018 = torch.aten.dequantize.self %2017 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%2019 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2020 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_116 = torch.constant.int 12 | |
%2021 = torch.aten.item %2019 : !torch.vtensor<[],f32> -> !torch.float | |
%2022 = torch.aten.item %2020 : !torch.vtensor<[],si8> -> !torch.int | |
%2023 = torch.aten.quantize_per_tensor %5, %2021, %2022, %int12_116 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2024 = torch.aten.int_repr %2023 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2025 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2026 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2027 = torch.aten.item %2025 : !torch.vtensor<[],f32> -> !torch.float | |
%2028 = torch.aten.item %2026 : !torch.vtensor<[],si8> -> !torch.int | |
%2029 = torch.aten._make_per_tensor_quantized_tensor %2024, %2027, %2028 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2030 = torch.aten.dequantize.self %2029 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_117 = torch.constant.int 1 | |
%int1_118 = torch.constant.int 1 | |
%int1_119 = torch.constant.int 1 | |
%int1_120 = torch.constant.int 1 | |
%int1_121 = torch.constant.int 1 | |
%int1_122 = torch.constant.int 1 | |
%int0_123 = torch.constant.int 0 | |
%2031 = torch.prim.ListConstruct %int1_117, %int1_118 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2032 = torch.prim.ListConstruct %int1_119, %int1_120 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2033 = torch.prim.ListConstruct %int1_121, %int1_122 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2034 = torch.prim.ListConstruct %int0_123, %int0_123 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_124 = torch.constant.bool false | |
%int1_125 = torch.constant.int 1 | |
%2035 = torch.aten.convolution %2006, %2018, %2030, %2033, %2031, %2032, %false_124, %2034, %int1_125 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_126 = torch.constant.float 0.1015625 | |
%2036 = torch.aten.leaky_relu %2035, %float1.015630e-01_126 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2037 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2038 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_127 = torch.constant.int 12 | |
%2039 = torch.aten.item %2037 : !torch.vtensor<[],f32> -> !torch.float | |
%2040 = torch.aten.item %2038 : !torch.vtensor<[],si8> -> !torch.int | |
%2041 = torch.aten.quantize_per_tensor %2036, %2039, %2040, %int12_127 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2042 = torch.aten.int_repr %2041 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2043 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2044 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2045 = torch.aten.item %2043 : !torch.vtensor<[],f32> -> !torch.float | |
%2046 = torch.aten.item %2044 : !torch.vtensor<[],si8> -> !torch.int | |
%2047 = torch.aten._make_per_tensor_quantized_tensor %2042, %2045, %2046 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2048 = torch.aten.dequantize.self %2047 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2049 = torch.prim.ListConstruct %1950, %1992, %2048 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_128 = torch.constant.int 1 | |
%2050 = torch.aten.cat %2049, %int1_128 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%2051 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2052 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_129 = torch.constant.int 12 | |
%2053 = torch.aten.item %2051 : !torch.vtensor<[],f32> -> !torch.float | |
%2054 = torch.aten.item %2052 : !torch.vtensor<[],si8> -> !torch.int | |
%2055 = torch.aten.quantize_per_tensor %2050, %2053, %2054, %int12_129 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2056 = torch.aten.int_repr %2055 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%2057 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2058 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2059 = torch.aten.item %2057 : !torch.vtensor<[],f32> -> !torch.float | |
%2060 = torch.aten.item %2058 : !torch.vtensor<[],si8> -> !torch.int | |
%2061 = torch.aten._make_per_tensor_quantized_tensor %2056, %2059, %2060 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2062 = torch.aten.dequantize.self %2061 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%2063 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2064 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_130 = torch.constant.int 12 | |
%2065 = torch.aten.item %2063 : !torch.vtensor<[],f32> -> !torch.float | |
%2066 = torch.aten.item %2064 : !torch.vtensor<[],si8> -> !torch.int | |
%2067 = torch.aten.quantize_per_tensor %6, %2065, %2066, %int12_130 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2068 = torch.aten.int_repr %2067 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%2069 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2070 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2071 = torch.aten.item %2069 : !torch.vtensor<[],f32> -> !torch.float | |
%2072 = torch.aten.item %2070 : !torch.vtensor<[],si8> -> !torch.int | |
%2073 = torch.aten._make_per_tensor_quantized_tensor %2068, %2071, %2072 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2074 = torch.aten.dequantize.self %2073 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%2075 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2076 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_131 = torch.constant.int 12 | |
%2077 = torch.aten.item %2075 : !torch.vtensor<[],f32> -> !torch.float | |
%2078 = torch.aten.item %2076 : !torch.vtensor<[],si8> -> !torch.int | |
%2079 = torch.aten.quantize_per_tensor %7, %2077, %2078, %int12_131 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2080 = torch.aten.int_repr %2079 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2081 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2082 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2083 = torch.aten.item %2081 : !torch.vtensor<[],f32> -> !torch.float | |
%2084 = torch.aten.item %2082 : !torch.vtensor<[],si8> -> !torch.int | |
%2085 = torch.aten._make_per_tensor_quantized_tensor %2080, %2083, %2084 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2086 = torch.aten.dequantize.self %2085 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_132 = torch.constant.int 1 | |
%int1_133 = torch.constant.int 1 | |
%int1_134 = torch.constant.int 1 | |
%int1_135 = torch.constant.int 1 | |
%int1_136 = torch.constant.int 1 | |
%int1_137 = torch.constant.int 1 | |
%int0_138 = torch.constant.int 0 | |
%2087 = torch.prim.ListConstruct %int1_132, %int1_133 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2088 = torch.prim.ListConstruct %int1_134, %int1_135 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2089 = torch.prim.ListConstruct %int1_136, %int1_137 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2090 = torch.prim.ListConstruct %int0_138, %int0_138 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_139 = torch.constant.bool false | |
%int1_140 = torch.constant.int 1 | |
%2091 = torch.aten.convolution %2062, %2074, %2086, %2089, %2087, %2088, %false_139, %2090, %int1_140 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_141 = torch.constant.float 0.1015625 | |
%2092 = torch.aten.leaky_relu %2091, %float1.015630e-01_141 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2093 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2094 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_142 = torch.constant.int 12 | |
%2095 = torch.aten.item %2093 : !torch.vtensor<[],f32> -> !torch.float | |
%2096 = torch.aten.item %2094 : !torch.vtensor<[],si8> -> !torch.int | |
%2097 = torch.aten.quantize_per_tensor %2092, %2095, %2096, %int12_142 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2098 = torch.aten.int_repr %2097 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2099 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2100 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2101 = torch.aten.item %2099 : !torch.vtensor<[],f32> -> !torch.float | |
%2102 = torch.aten.item %2100 : !torch.vtensor<[],si8> -> !torch.int | |
%2103 = torch.aten._make_per_tensor_quantized_tensor %2098, %2101, %2102 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2104 = torch.aten.dequantize.self %2103 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2105 = torch.prim.ListConstruct %1950, %1992, %2048, %2104 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_143 = torch.constant.int 1 | |
%2106 = torch.aten.cat %2105, %int1_143 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%2107 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2108 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_144 = torch.constant.int 12 | |
%2109 = torch.aten.item %2107 : !torch.vtensor<[],f32> -> !torch.float | |
%2110 = torch.aten.item %2108 : !torch.vtensor<[],si8> -> !torch.int | |
%2111 = torch.aten.quantize_per_tensor %2106, %2109, %2110, %int12_144 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2112 = torch.aten.int_repr %2111 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%2113 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2114 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2115 = torch.aten.item %2113 : !torch.vtensor<[],f32> -> !torch.float | |
%2116 = torch.aten.item %2114 : !torch.vtensor<[],si8> -> !torch.int | |
%2117 = torch.aten._make_per_tensor_quantized_tensor %2112, %2115, %2116 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2118 = torch.aten.dequantize.self %2117 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%2119 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_145 = torch.constant.int 12 | |
%2121 = torch.aten.item %2119 : !torch.vtensor<[],f32> -> !torch.float | |
%2122 = torch.aten.item %2120 : !torch.vtensor<[],si8> -> !torch.int | |
%2123 = torch.aten.quantize_per_tensor %8, %2121, %2122, %int12_145 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2124 = torch.aten.int_repr %2123 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%2125 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2127 = torch.aten.item %2125 : !torch.vtensor<[],f32> -> !torch.float | |
%2128 = torch.aten.item %2126 : !torch.vtensor<[],si8> -> !torch.int | |
%2129 = torch.aten._make_per_tensor_quantized_tensor %2124, %2127, %2128 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2130 = torch.aten.dequantize.self %2129 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%2131 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2132 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_146 = torch.constant.int 12 | |
%2133 = torch.aten.item %2131 : !torch.vtensor<[],f32> -> !torch.float | |
%2134 = torch.aten.item %2132 : !torch.vtensor<[],si8> -> !torch.int | |
%2135 = torch.aten.quantize_per_tensor %9, %2133, %2134, %int12_146 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2136 = torch.aten.int_repr %2135 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2137 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2138 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2139 = torch.aten.item %2137 : !torch.vtensor<[],f32> -> !torch.float | |
%2140 = torch.aten.item %2138 : !torch.vtensor<[],si8> -> !torch.int | |
%2141 = torch.aten._make_per_tensor_quantized_tensor %2136, %2139, %2140 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2142 = torch.aten.dequantize.self %2141 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_147 = torch.constant.int 1 | |
%int1_148 = torch.constant.int 1 | |
%int1_149 = torch.constant.int 1 | |
%int1_150 = torch.constant.int 1 | |
%int1_151 = torch.constant.int 1 | |
%int1_152 = torch.constant.int 1 | |
%int0_153 = torch.constant.int 0 | |
%2143 = torch.prim.ListConstruct %int1_147, %int1_148 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2144 = torch.prim.ListConstruct %int1_149, %int1_150 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2145 = torch.prim.ListConstruct %int1_151, %int1_152 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2146 = torch.prim.ListConstruct %int0_153, %int0_153 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_154 = torch.constant.bool false | |
%int1_155 = torch.constant.int 1 | |
%2147 = torch.aten.convolution %2118, %2130, %2142, %2145, %2143, %2144, %false_154, %2146, %int1_155 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_156 = torch.constant.float 0.1015625 | |
%2148 = torch.aten.leaky_relu %2147, %float1.015630e-01_156 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2149 = torch.prim.ListConstruct %1950, %1992, %2048, %2104, %2148 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_157 = torch.constant.int 1 | |
%2150 = torch.aten.cat %2149, %int1_157 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%2151 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_158 = torch.constant.int 12 | |
%2153 = torch.aten.item %2151 : !torch.vtensor<[],f32> -> !torch.float | |
%2154 = torch.aten.item %2152 : !torch.vtensor<[],si8> -> !torch.int | |
%2155 = torch.aten.quantize_per_tensor %2150, %2153, %2154, %int12_158 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2156 = torch.aten.int_repr %2155 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%2157 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2159 = torch.aten.item %2157 : !torch.vtensor<[],f32> -> !torch.float | |
%2160 = torch.aten.item %2158 : !torch.vtensor<[],si8> -> !torch.int | |
%2161 = torch.aten._make_per_tensor_quantized_tensor %2156, %2159, %2160 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2162 = torch.aten.dequantize.self %2161 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%2163 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_159 = torch.constant.int 12 | |
%2165 = torch.aten.item %2163 : !torch.vtensor<[],f32> -> !torch.float | |
%2166 = torch.aten.item %2164 : !torch.vtensor<[],si8> -> !torch.int | |
%2167 = torch.aten.quantize_per_tensor %10, %2165, %2166, %int12_159 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2168 = torch.aten.int_repr %2167 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%2169 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2171 = torch.aten.item %2169 : !torch.vtensor<[],f32> -> !torch.float | |
%2172 = torch.aten.item %2170 : !torch.vtensor<[],si8> -> !torch.int | |
%2173 = torch.aten._make_per_tensor_quantized_tensor %2168, %2171, %2172 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2174 = torch.aten.dequantize.self %2173 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%2175 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_160 = torch.constant.int 12 | |
%2177 = torch.aten.item %2175 : !torch.vtensor<[],f32> -> !torch.float | |
%2178 = torch.aten.item %2176 : !torch.vtensor<[],si8> -> !torch.int | |
%2179 = torch.aten.quantize_per_tensor %11, %2177, %2178, %int12_160 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2180 = torch.aten.int_repr %2179 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%2181 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2183 = torch.aten.item %2181 : !torch.vtensor<[],f32> -> !torch.float | |
%2184 = torch.aten.item %2182 : !torch.vtensor<[],si8> -> !torch.int | |
%2185 = torch.aten._make_per_tensor_quantized_tensor %2180, %2183, %2184 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2186 = torch.aten.dequantize.self %2185 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_161 = torch.constant.int 1 | |
%int1_162 = torch.constant.int 1 | |
%int1_163 = torch.constant.int 1 | |
%int1_164 = torch.constant.int 1 | |
%int1_165 = torch.constant.int 1 | |
%int1_166 = torch.constant.int 1 | |
%int0_167 = torch.constant.int 0 | |
%2187 = torch.prim.ListConstruct %int1_161, %int1_162 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2188 = torch.prim.ListConstruct %int1_163, %int1_164 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2189 = torch.prim.ListConstruct %int1_165, %int1_166 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2190 = torch.prim.ListConstruct %int0_167, %int0_167 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_168 = torch.constant.bool false | |
%int1_169 = torch.constant.int 1 | |
%2191 = torch.aten.convolution %2162, %2174, %2186, %2189, %2187, %2188, %false_168, %2190, %int1_169 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2192 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2193 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_170 = torch.constant.int 12 | |
%2194 = torch.aten.item %2192 : !torch.vtensor<[],f32> -> !torch.float | |
%2195 = torch.aten.item %2193 : !torch.vtensor<[],si8> -> !torch.int | |
%2196 = torch.aten.quantize_per_tensor %2191, %2194, %2195, %int12_170 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2197 = torch.aten.int_repr %2196 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2198 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2199 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2200 = torch.aten.item %2198 : !torch.vtensor<[],f32> -> !torch.float | |
%2201 = torch.aten.item %2199 : !torch.vtensor<[],si8> -> !torch.int | |
%2202 = torch.aten._make_per_tensor_quantized_tensor %2197, %2200, %2201 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2203 = torch.aten.dequantize.self %2202 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2204 = torch.aten.mul.Tensor %2203, %714 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%2205 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_171 = torch.constant.int 12 | |
%2207 = torch.aten.item %2205 : !torch.vtensor<[],f32> -> !torch.float | |
%2208 = torch.aten.item %2206 : !torch.vtensor<[],si8> -> !torch.int | |
%2209 = torch.aten.quantize_per_tensor %2204, %2207, %2208, %int12_171 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2210 = torch.aten.int_repr %2209 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2211 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2213 = torch.aten.item %2211 : !torch.vtensor<[],f32> -> !torch.float | |
%2214 = torch.aten.item %2212 : !torch.vtensor<[],si8> -> !torch.int | |
%2215 = torch.aten._make_per_tensor_quantized_tensor %2210, %2213, %2214 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2216 = torch.aten.dequantize.self %2215 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_172 = torch.constant.int 1 | |
%2217 = torch.aten.add.Tensor %2216, %1950, %int1_172 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2218 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2219 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_173 = torch.constant.int 12 | |
%2220 = torch.aten.item %2218 : !torch.vtensor<[],f32> -> !torch.float | |
%2221 = torch.aten.item %2219 : !torch.vtensor<[],si8> -> !torch.int | |
%2222 = torch.aten.quantize_per_tensor %2217, %2220, %2221, %int12_173 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2223 = torch.aten.int_repr %2222 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2224 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2225 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2226 = torch.aten.item %2224 : !torch.vtensor<[],f32> -> !torch.float | |
%2227 = torch.aten.item %2225 : !torch.vtensor<[],si8> -> !torch.int | |
%2228 = torch.aten._make_per_tensor_quantized_tensor %2223, %2226, %2227 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2229 = torch.aten.dequantize.self %2228 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2230 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2231 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_174 = torch.constant.int 12 | |
%2232 = torch.aten.item %2230 : !torch.vtensor<[],f32> -> !torch.float | |
%2233 = torch.aten.item %2231 : !torch.vtensor<[],si8> -> !torch.int | |
%2234 = torch.aten.quantize_per_tensor %12, %2232, %2233, %int12_174 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2235 = torch.aten.int_repr %2234 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%2236 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2237 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2238 = torch.aten.item %2236 : !torch.vtensor<[],f32> -> !torch.float | |
%2239 = torch.aten.item %2237 : !torch.vtensor<[],si8> -> !torch.int | |
%2240 = torch.aten._make_per_tensor_quantized_tensor %2235, %2238, %2239 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2241 = torch.aten.dequantize.self %2240 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%2242 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2243 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_175 = torch.constant.int 12 | |
%2244 = torch.aten.item %2242 : !torch.vtensor<[],f32> -> !torch.float | |
%2245 = torch.aten.item %2243 : !torch.vtensor<[],si8> -> !torch.int | |
%2246 = torch.aten.quantize_per_tensor %13, %2244, %2245, %int12_175 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2247 = torch.aten.int_repr %2246 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2248 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2249 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2250 = torch.aten.item %2248 : !torch.vtensor<[],f32> -> !torch.float | |
%2251 = torch.aten.item %2249 : !torch.vtensor<[],si8> -> !torch.int | |
%2252 = torch.aten._make_per_tensor_quantized_tensor %2247, %2250, %2251 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2253 = torch.aten.dequantize.self %2252 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_176 = torch.constant.int 1 | |
%int1_177 = torch.constant.int 1 | |
%int1_178 = torch.constant.int 1 | |
%int1_179 = torch.constant.int 1 | |
%int1_180 = torch.constant.int 1 | |
%int1_181 = torch.constant.int 1 | |
%int0_182 = torch.constant.int 0 | |
%2254 = torch.prim.ListConstruct %int1_176, %int1_177 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2255 = torch.prim.ListConstruct %int1_178, %int1_179 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2256 = torch.prim.ListConstruct %int1_180, %int1_181 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2257 = torch.prim.ListConstruct %int0_182, %int0_182 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_183 = torch.constant.bool false | |
%int1_184 = torch.constant.int 1 | |
%2258 = torch.aten.convolution %2229, %2241, %2253, %2256, %2254, %2255, %false_183, %2257, %int1_184 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_185 = torch.constant.float 0.1015625 | |
%2259 = torch.aten.leaky_relu %2258, %float1.015630e-01_185 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2260 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_186 = torch.constant.int 12 | |
%2262 = torch.aten.item %2260 : !torch.vtensor<[],f32> -> !torch.float | |
%2263 = torch.aten.item %2261 : !torch.vtensor<[],si8> -> !torch.int | |
%2264 = torch.aten.quantize_per_tensor %2259, %2262, %2263, %int12_186 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2265 = torch.aten.int_repr %2264 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2266 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2268 = torch.aten.item %2266 : !torch.vtensor<[],f32> -> !torch.float | |
%2269 = torch.aten.item %2267 : !torch.vtensor<[],si8> -> !torch.int | |
%2270 = torch.aten._make_per_tensor_quantized_tensor %2265, %2268, %2269 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2271 = torch.aten.dequantize.self %2270 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2272 = torch.prim.ListConstruct %2229, %2271 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_187 = torch.constant.int 1 | |
%2273 = torch.aten.cat %2272, %int1_187 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%2274 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2275 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_188 = torch.constant.int 12 | |
%2276 = torch.aten.item %2274 : !torch.vtensor<[],f32> -> !torch.float | |
%2277 = torch.aten.item %2275 : !torch.vtensor<[],si8> -> !torch.int | |
%2278 = torch.aten.quantize_per_tensor %2273, %2276, %2277, %int12_188 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2279 = torch.aten.int_repr %2278 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%2280 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2281 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2282 = torch.aten.item %2280 : !torch.vtensor<[],f32> -> !torch.float | |
%2283 = torch.aten.item %2281 : !torch.vtensor<[],si8> -> !torch.int | |
%2284 = torch.aten._make_per_tensor_quantized_tensor %2279, %2282, %2283 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2285 = torch.aten.dequantize.self %2284 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%2286 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2287 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_189 = torch.constant.int 12 | |
%2288 = torch.aten.item %2286 : !torch.vtensor<[],f32> -> !torch.float | |
%2289 = torch.aten.item %2287 : !torch.vtensor<[],si8> -> !torch.int | |
%2290 = torch.aten.quantize_per_tensor %14, %2288, %2289, %int12_189 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2291 = torch.aten.int_repr %2290 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%2292 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2294 = torch.aten.item %2292 : !torch.vtensor<[],f32> -> !torch.float | |
%2295 = torch.aten.item %2293 : !torch.vtensor<[],si8> -> !torch.int | |
%2296 = torch.aten._make_per_tensor_quantized_tensor %2291, %2294, %2295 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2297 = torch.aten.dequantize.self %2296 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%2298 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2299 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_190 = torch.constant.int 12 | |
%2300 = torch.aten.item %2298 : !torch.vtensor<[],f32> -> !torch.float | |
%2301 = torch.aten.item %2299 : !torch.vtensor<[],si8> -> !torch.int | |
%2302 = torch.aten.quantize_per_tensor %15, %2300, %2301, %int12_190 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2303 = torch.aten.int_repr %2302 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2304 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2305 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2306 = torch.aten.item %2304 : !torch.vtensor<[],f32> -> !torch.float | |
%2307 = torch.aten.item %2305 : !torch.vtensor<[],si8> -> !torch.int | |
%2308 = torch.aten._make_per_tensor_quantized_tensor %2303, %2306, %2307 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2309 = torch.aten.dequantize.self %2308 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_191 = torch.constant.int 1 | |
%int1_192 = torch.constant.int 1 | |
%int1_193 = torch.constant.int 1 | |
%int1_194 = torch.constant.int 1 | |
%int1_195 = torch.constant.int 1 | |
%int1_196 = torch.constant.int 1 | |
%int0_197 = torch.constant.int 0 | |
%2310 = torch.prim.ListConstruct %int1_191, %int1_192 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2311 = torch.prim.ListConstruct %int1_193, %int1_194 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2312 = torch.prim.ListConstruct %int1_195, %int1_196 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2313 = torch.prim.ListConstruct %int0_197, %int0_197 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_198 = torch.constant.bool false | |
%int1_199 = torch.constant.int 1 | |
%2314 = torch.aten.convolution %2285, %2297, %2309, %2312, %2310, %2311, %false_198, %2313, %int1_199 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_200 = torch.constant.float 0.1015625 | |
%2315 = torch.aten.leaky_relu %2314, %float1.015630e-01_200 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2316 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2317 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_201 = torch.constant.int 12 | |
%2318 = torch.aten.item %2316 : !torch.vtensor<[],f32> -> !torch.float | |
%2319 = torch.aten.item %2317 : !torch.vtensor<[],si8> -> !torch.int | |
%2320 = torch.aten.quantize_per_tensor %2315, %2318, %2319, %int12_201 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2321 = torch.aten.int_repr %2320 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2322 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2324 = torch.aten.item %2322 : !torch.vtensor<[],f32> -> !torch.float | |
%2325 = torch.aten.item %2323 : !torch.vtensor<[],si8> -> !torch.int | |
%2326 = torch.aten._make_per_tensor_quantized_tensor %2321, %2324, %2325 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2327 = torch.aten.dequantize.self %2326 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2328 = torch.prim.ListConstruct %2229, %2271, %2327 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_202 = torch.constant.int 1 | |
%2329 = torch.aten.cat %2328, %int1_202 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%2330 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2331 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_203 = torch.constant.int 12 | |
%2332 = torch.aten.item %2330 : !torch.vtensor<[],f32> -> !torch.float | |
%2333 = torch.aten.item %2331 : !torch.vtensor<[],si8> -> !torch.int | |
%2334 = torch.aten.quantize_per_tensor %2329, %2332, %2333, %int12_203 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2335 = torch.aten.int_repr %2334 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%2336 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2337 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2338 = torch.aten.item %2336 : !torch.vtensor<[],f32> -> !torch.float | |
%2339 = torch.aten.item %2337 : !torch.vtensor<[],si8> -> !torch.int | |
%2340 = torch.aten._make_per_tensor_quantized_tensor %2335, %2338, %2339 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2341 = torch.aten.dequantize.self %2340 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%2342 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2343 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_204 = torch.constant.int 12 | |
%2344 = torch.aten.item %2342 : !torch.vtensor<[],f32> -> !torch.float | |
%2345 = torch.aten.item %2343 : !torch.vtensor<[],si8> -> !torch.int | |
%2346 = torch.aten.quantize_per_tensor %16, %2344, %2345, %int12_204 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2347 = torch.aten.int_repr %2346 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%2348 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2349 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2350 = torch.aten.item %2348 : !torch.vtensor<[],f32> -> !torch.float | |
%2351 = torch.aten.item %2349 : !torch.vtensor<[],si8> -> !torch.int | |
%2352 = torch.aten._make_per_tensor_quantized_tensor %2347, %2350, %2351 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2353 = torch.aten.dequantize.self %2352 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%2354 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2355 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_205 = torch.constant.int 12 | |
%2356 = torch.aten.item %2354 : !torch.vtensor<[],f32> -> !torch.float | |
%2357 = torch.aten.item %2355 : !torch.vtensor<[],si8> -> !torch.int | |
%2358 = torch.aten.quantize_per_tensor %17, %2356, %2357, %int12_205 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2359 = torch.aten.int_repr %2358 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2360 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2361 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2362 = torch.aten.item %2360 : !torch.vtensor<[],f32> -> !torch.float | |
%2363 = torch.aten.item %2361 : !torch.vtensor<[],si8> -> !torch.int | |
%2364 = torch.aten._make_per_tensor_quantized_tensor %2359, %2362, %2363 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2365 = torch.aten.dequantize.self %2364 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_206 = torch.constant.int 1 | |
%int1_207 = torch.constant.int 1 | |
%int1_208 = torch.constant.int 1 | |
%int1_209 = torch.constant.int 1 | |
%int1_210 = torch.constant.int 1 | |
%int1_211 = torch.constant.int 1 | |
%int0_212 = torch.constant.int 0 | |
%2366 = torch.prim.ListConstruct %int1_206, %int1_207 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2367 = torch.prim.ListConstruct %int1_208, %int1_209 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2368 = torch.prim.ListConstruct %int1_210, %int1_211 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2369 = torch.prim.ListConstruct %int0_212, %int0_212 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_213 = torch.constant.bool false | |
%int1_214 = torch.constant.int 1 | |
%2370 = torch.aten.convolution %2341, %2353, %2365, %2368, %2366, %2367, %false_213, %2369, %int1_214 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_215 = torch.constant.float 0.1015625 | |
%2371 = torch.aten.leaky_relu %2370, %float1.015630e-01_215 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2372 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_216 = torch.constant.int 12 | |
%2374 = torch.aten.item %2372 : !torch.vtensor<[],f32> -> !torch.float | |
%2375 = torch.aten.item %2373 : !torch.vtensor<[],si8> -> !torch.int | |
%2376 = torch.aten.quantize_per_tensor %2371, %2374, %2375, %int12_216 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2377 = torch.aten.int_repr %2376 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2378 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2380 = torch.aten.item %2378 : !torch.vtensor<[],f32> -> !torch.float | |
%2381 = torch.aten.item %2379 : !torch.vtensor<[],si8> -> !torch.int | |
%2382 = torch.aten._make_per_tensor_quantized_tensor %2377, %2380, %2381 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2383 = torch.aten.dequantize.self %2382 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2384 = torch.prim.ListConstruct %2229, %2271, %2327, %2383 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_217 = torch.constant.int 1 | |
%2385 = torch.aten.cat %2384, %int1_217 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%2386 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2387 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_218 = torch.constant.int 12 | |
%2388 = torch.aten.item %2386 : !torch.vtensor<[],f32> -> !torch.float | |
%2389 = torch.aten.item %2387 : !torch.vtensor<[],si8> -> !torch.int | |
%2390 = torch.aten.quantize_per_tensor %2385, %2388, %2389, %int12_218 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2391 = torch.aten.int_repr %2390 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%2392 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2393 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2394 = torch.aten.item %2392 : !torch.vtensor<[],f32> -> !torch.float | |
%2395 = torch.aten.item %2393 : !torch.vtensor<[],si8> -> !torch.int | |
%2396 = torch.aten._make_per_tensor_quantized_tensor %2391, %2394, %2395 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2397 = torch.aten.dequantize.self %2396 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%2398 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2399 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_219 = torch.constant.int 12 | |
%2400 = torch.aten.item %2398 : !torch.vtensor<[],f32> -> !torch.float | |
%2401 = torch.aten.item %2399 : !torch.vtensor<[],si8> -> !torch.int | |
%2402 = torch.aten.quantize_per_tensor %18, %2400, %2401, %int12_219 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2403 = torch.aten.int_repr %2402 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%2404 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2405 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2406 = torch.aten.item %2404 : !torch.vtensor<[],f32> -> !torch.float | |
%2407 = torch.aten.item %2405 : !torch.vtensor<[],si8> -> !torch.int | |
%2408 = torch.aten._make_per_tensor_quantized_tensor %2403, %2406, %2407 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2409 = torch.aten.dequantize.self %2408 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%2410 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_220 = torch.constant.int 12 | |
%2412 = torch.aten.item %2410 : !torch.vtensor<[],f32> -> !torch.float | |
%2413 = torch.aten.item %2411 : !torch.vtensor<[],si8> -> !torch.int | |
%2414 = torch.aten.quantize_per_tensor %19, %2412, %2413, %int12_220 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2415 = torch.aten.int_repr %2414 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2416 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2418 = torch.aten.item %2416 : !torch.vtensor<[],f32> -> !torch.float | |
%2419 = torch.aten.item %2417 : !torch.vtensor<[],si8> -> !torch.int | |
%2420 = torch.aten._make_per_tensor_quantized_tensor %2415, %2418, %2419 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2421 = torch.aten.dequantize.self %2420 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_221 = torch.constant.int 1 | |
%int1_222 = torch.constant.int 1 | |
%int1_223 = torch.constant.int 1 | |
%int1_224 = torch.constant.int 1 | |
%int1_225 = torch.constant.int 1 | |
%int1_226 = torch.constant.int 1 | |
%int0_227 = torch.constant.int 0 | |
%2422 = torch.prim.ListConstruct %int1_221, %int1_222 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2423 = torch.prim.ListConstruct %int1_223, %int1_224 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2424 = torch.prim.ListConstruct %int1_225, %int1_226 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2425 = torch.prim.ListConstruct %int0_227, %int0_227 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_228 = torch.constant.bool false | |
%int1_229 = torch.constant.int 1 | |
%2426 = torch.aten.convolution %2397, %2409, %2421, %2424, %2422, %2423, %false_228, %2425, %int1_229 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_230 = torch.constant.float 0.1015625 | |
%2427 = torch.aten.leaky_relu %2426, %float1.015630e-01_230 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2428 = torch.prim.ListConstruct %2229, %2271, %2327, %2383, %2427 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_231 = torch.constant.int 1 | |
%2429 = torch.aten.cat %2428, %int1_231 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%2430 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2431 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_232 = torch.constant.int 12 | |
%2432 = torch.aten.item %2430 : !torch.vtensor<[],f32> -> !torch.float | |
%2433 = torch.aten.item %2431 : !torch.vtensor<[],si8> -> !torch.int | |
%2434 = torch.aten.quantize_per_tensor %2429, %2432, %2433, %int12_232 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2435 = torch.aten.int_repr %2434 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%2436 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2437 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2438 = torch.aten.item %2436 : !torch.vtensor<[],f32> -> !torch.float | |
%2439 = torch.aten.item %2437 : !torch.vtensor<[],si8> -> !torch.int | |
%2440 = torch.aten._make_per_tensor_quantized_tensor %2435, %2438, %2439 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2441 = torch.aten.dequantize.self %2440 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%2442 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2443 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_233 = torch.constant.int 12 | |
%2444 = torch.aten.item %2442 : !torch.vtensor<[],f32> -> !torch.float | |
%2445 = torch.aten.item %2443 : !torch.vtensor<[],si8> -> !torch.int | |
%2446 = torch.aten.quantize_per_tensor %20, %2444, %2445, %int12_233 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2447 = torch.aten.int_repr %2446 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%2448 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2449 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2450 = torch.aten.item %2448 : !torch.vtensor<[],f32> -> !torch.float | |
%2451 = torch.aten.item %2449 : !torch.vtensor<[],si8> -> !torch.int | |
%2452 = torch.aten._make_per_tensor_quantized_tensor %2447, %2450, %2451 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2453 = torch.aten.dequantize.self %2452 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%2454 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2455 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_234 = torch.constant.int 12 | |
%2456 = torch.aten.item %2454 : !torch.vtensor<[],f32> -> !torch.float | |
%2457 = torch.aten.item %2455 : !torch.vtensor<[],si8> -> !torch.int | |
%2458 = torch.aten.quantize_per_tensor %21, %2456, %2457, %int12_234 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2459 = torch.aten.int_repr %2458 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%2460 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2461 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2462 = torch.aten.item %2460 : !torch.vtensor<[],f32> -> !torch.float | |
%2463 = torch.aten.item %2461 : !torch.vtensor<[],si8> -> !torch.int | |
%2464 = torch.aten._make_per_tensor_quantized_tensor %2459, %2462, %2463 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2465 = torch.aten.dequantize.self %2464 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_235 = torch.constant.int 1 | |
%int1_236 = torch.constant.int 1 | |
%int1_237 = torch.constant.int 1 | |
%int1_238 = torch.constant.int 1 | |
%int1_239 = torch.constant.int 1 | |
%int1_240 = torch.constant.int 1 | |
%int0_241 = torch.constant.int 0 | |
%2466 = torch.prim.ListConstruct %int1_235, %int1_236 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2467 = torch.prim.ListConstruct %int1_237, %int1_238 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2468 = torch.prim.ListConstruct %int1_239, %int1_240 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2469 = torch.prim.ListConstruct %int0_241, %int0_241 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_242 = torch.constant.bool false | |
%int1_243 = torch.constant.int 1 | |
%2470 = torch.aten.convolution %2441, %2453, %2465, %2468, %2466, %2467, %false_242, %2469, %int1_243 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2471 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2472 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_244 = torch.constant.int 12 | |
%2473 = torch.aten.item %2471 : !torch.vtensor<[],f32> -> !torch.float | |
%2474 = torch.aten.item %2472 : !torch.vtensor<[],si8> -> !torch.int | |
%2475 = torch.aten.quantize_per_tensor %2470, %2473, %2474, %int12_244 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2476 = torch.aten.int_repr %2475 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2477 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2478 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2479 = torch.aten.item %2477 : !torch.vtensor<[],f32> -> !torch.float | |
%2480 = torch.aten.item %2478 : !torch.vtensor<[],si8> -> !torch.int | |
%2481 = torch.aten._make_per_tensor_quantized_tensor %2476, %2479, %2480 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2482 = torch.aten.dequantize.self %2481 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2483 = torch.aten.mul.Tensor %2482, %727 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%2484 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2485 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_245 = torch.constant.int 12 | |
%2486 = torch.aten.item %2484 : !torch.vtensor<[],f32> -> !torch.float | |
%2487 = torch.aten.item %2485 : !torch.vtensor<[],si8> -> !torch.int | |
%2488 = torch.aten.quantize_per_tensor %2483, %2486, %2487, %int12_245 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2489 = torch.aten.int_repr %2488 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2490 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2491 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2492 = torch.aten.item %2490 : !torch.vtensor<[],f32> -> !torch.float | |
%2493 = torch.aten.item %2491 : !torch.vtensor<[],si8> -> !torch.int | |
%2494 = torch.aten._make_per_tensor_quantized_tensor %2489, %2492, %2493 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2495 = torch.aten.dequantize.self %2494 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_246 = torch.constant.int 1 | |
%2496 = torch.aten.add.Tensor %2495, %2229, %int1_246 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2497 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2498 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_247 = torch.constant.int 12 | |
%2499 = torch.aten.item %2497 : !torch.vtensor<[],f32> -> !torch.float | |
%2500 = torch.aten.item %2498 : !torch.vtensor<[],si8> -> !torch.int | |
%2501 = torch.aten.quantize_per_tensor %2496, %2499, %2500, %int12_247 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2502 = torch.aten.int_repr %2501 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2503 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2504 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2505 = torch.aten.item %2503 : !torch.vtensor<[],f32> -> !torch.float | |
%2506 = torch.aten.item %2504 : !torch.vtensor<[],si8> -> !torch.int | |
%2507 = torch.aten._make_per_tensor_quantized_tensor %2502, %2505, %2506 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2508 = torch.aten.dequantize.self %2507 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2509 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2510 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_248 = torch.constant.int 12 | |
%2511 = torch.aten.item %2509 : !torch.vtensor<[],f32> -> !torch.float | |
%2512 = torch.aten.item %2510 : !torch.vtensor<[],si8> -> !torch.int | |
%2513 = torch.aten.quantize_per_tensor %22, %2511, %2512, %int12_248 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2514 = torch.aten.int_repr %2513 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%2515 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2517 = torch.aten.item %2515 : !torch.vtensor<[],f32> -> !torch.float | |
%2518 = torch.aten.item %2516 : !torch.vtensor<[],si8> -> !torch.int | |
%2519 = torch.aten._make_per_tensor_quantized_tensor %2514, %2517, %2518 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2520 = torch.aten.dequantize.self %2519 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%2521 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2522 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_249 = torch.constant.int 12 | |
%2523 = torch.aten.item %2521 : !torch.vtensor<[],f32> -> !torch.float | |
%2524 = torch.aten.item %2522 : !torch.vtensor<[],si8> -> !torch.int | |
%2525 = torch.aten.quantize_per_tensor %23, %2523, %2524, %int12_249 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2526 = torch.aten.int_repr %2525 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2527 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2528 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2529 = torch.aten.item %2527 : !torch.vtensor<[],f32> -> !torch.float | |
%2530 = torch.aten.item %2528 : !torch.vtensor<[],si8> -> !torch.int | |
%2531 = torch.aten._make_per_tensor_quantized_tensor %2526, %2529, %2530 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2532 = torch.aten.dequantize.self %2531 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_250 = torch.constant.int 1 | |
%int1_251 = torch.constant.int 1 | |
%int1_252 = torch.constant.int 1 | |
%int1_253 = torch.constant.int 1 | |
%int1_254 = torch.constant.int 1 | |
%int1_255 = torch.constant.int 1 | |
%int0_256 = torch.constant.int 0 | |
%2533 = torch.prim.ListConstruct %int1_250, %int1_251 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2534 = torch.prim.ListConstruct %int1_252, %int1_253 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2535 = torch.prim.ListConstruct %int1_254, %int1_255 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2536 = torch.prim.ListConstruct %int0_256, %int0_256 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_257 = torch.constant.bool false | |
%int1_258 = torch.constant.int 1 | |
%2537 = torch.aten.convolution %2508, %2520, %2532, %2535, %2533, %2534, %false_257, %2536, %int1_258 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_259 = torch.constant.float 0.1015625 | |
%2538 = torch.aten.leaky_relu %2537, %float1.015630e-01_259 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2539 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2540 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_260 = torch.constant.int 12 | |
%2541 = torch.aten.item %2539 : !torch.vtensor<[],f32> -> !torch.float | |
%2542 = torch.aten.item %2540 : !torch.vtensor<[],si8> -> !torch.int | |
%2543 = torch.aten.quantize_per_tensor %2538, %2541, %2542, %int12_260 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2544 = torch.aten.int_repr %2543 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2545 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2546 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2547 = torch.aten.item %2545 : !torch.vtensor<[],f32> -> !torch.float | |
%2548 = torch.aten.item %2546 : !torch.vtensor<[],si8> -> !torch.int | |
%2549 = torch.aten._make_per_tensor_quantized_tensor %2544, %2547, %2548 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2550 = torch.aten.dequantize.self %2549 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2551 = torch.prim.ListConstruct %2508, %2550 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_261 = torch.constant.int 1 | |
%2552 = torch.aten.cat %2551, %int1_261 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%2553 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2554 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_262 = torch.constant.int 12 | |
%2555 = torch.aten.item %2553 : !torch.vtensor<[],f32> -> !torch.float | |
%2556 = torch.aten.item %2554 : !torch.vtensor<[],si8> -> !torch.int | |
%2557 = torch.aten.quantize_per_tensor %2552, %2555, %2556, %int12_262 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2558 = torch.aten.int_repr %2557 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%2559 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2560 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2561 = torch.aten.item %2559 : !torch.vtensor<[],f32> -> !torch.float | |
%2562 = torch.aten.item %2560 : !torch.vtensor<[],si8> -> !torch.int | |
%2563 = torch.aten._make_per_tensor_quantized_tensor %2558, %2561, %2562 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2564 = torch.aten.dequantize.self %2563 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%2565 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2566 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_263 = torch.constant.int 12 | |
%2567 = torch.aten.item %2565 : !torch.vtensor<[],f32> -> !torch.float | |
%2568 = torch.aten.item %2566 : !torch.vtensor<[],si8> -> !torch.int | |
%2569 = torch.aten.quantize_per_tensor %24, %2567, %2568, %int12_263 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2570 = torch.aten.int_repr %2569 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%2571 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2572 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2573 = torch.aten.item %2571 : !torch.vtensor<[],f32> -> !torch.float | |
%2574 = torch.aten.item %2572 : !torch.vtensor<[],si8> -> !torch.int | |
%2575 = torch.aten._make_per_tensor_quantized_tensor %2570, %2573, %2574 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2576 = torch.aten.dequantize.self %2575 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%2577 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2578 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_264 = torch.constant.int 12 | |
%2579 = torch.aten.item %2577 : !torch.vtensor<[],f32> -> !torch.float | |
%2580 = torch.aten.item %2578 : !torch.vtensor<[],si8> -> !torch.int | |
%2581 = torch.aten.quantize_per_tensor %25, %2579, %2580, %int12_264 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2582 = torch.aten.int_repr %2581 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2583 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2584 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2585 = torch.aten.item %2583 : !torch.vtensor<[],f32> -> !torch.float | |
%2586 = torch.aten.item %2584 : !torch.vtensor<[],si8> -> !torch.int | |
%2587 = torch.aten._make_per_tensor_quantized_tensor %2582, %2585, %2586 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2588 = torch.aten.dequantize.self %2587 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_265 = torch.constant.int 1 | |
%int1_266 = torch.constant.int 1 | |
%int1_267 = torch.constant.int 1 | |
%int1_268 = torch.constant.int 1 | |
%int1_269 = torch.constant.int 1 | |
%int1_270 = torch.constant.int 1 | |
%int0_271 = torch.constant.int 0 | |
%2589 = torch.prim.ListConstruct %int1_265, %int1_266 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2590 = torch.prim.ListConstruct %int1_267, %int1_268 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2591 = torch.prim.ListConstruct %int1_269, %int1_270 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2592 = torch.prim.ListConstruct %int0_271, %int0_271 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_272 = torch.constant.bool false | |
%int1_273 = torch.constant.int 1 | |
%2593 = torch.aten.convolution %2564, %2576, %2588, %2591, %2589, %2590, %false_272, %2592, %int1_273 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_274 = torch.constant.float 0.1015625 | |
%2594 = torch.aten.leaky_relu %2593, %float1.015630e-01_274 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2595 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2596 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_275 = torch.constant.int 12 | |
%2597 = torch.aten.item %2595 : !torch.vtensor<[],f32> -> !torch.float | |
%2598 = torch.aten.item %2596 : !torch.vtensor<[],si8> -> !torch.int | |
%2599 = torch.aten.quantize_per_tensor %2594, %2597, %2598, %int12_275 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2600 = torch.aten.int_repr %2599 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2601 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2602 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2603 = torch.aten.item %2601 : !torch.vtensor<[],f32> -> !torch.float | |
%2604 = torch.aten.item %2602 : !torch.vtensor<[],si8> -> !torch.int | |
%2605 = torch.aten._make_per_tensor_quantized_tensor %2600, %2603, %2604 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2606 = torch.aten.dequantize.self %2605 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2607 = torch.prim.ListConstruct %2508, %2550, %2606 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_276 = torch.constant.int 1 | |
%2608 = torch.aten.cat %2607, %int1_276 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%2609 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2610 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_277 = torch.constant.int 12 | |
%2611 = torch.aten.item %2609 : !torch.vtensor<[],f32> -> !torch.float | |
%2612 = torch.aten.item %2610 : !torch.vtensor<[],si8> -> !torch.int | |
%2613 = torch.aten.quantize_per_tensor %2608, %2611, %2612, %int12_277 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2614 = torch.aten.int_repr %2613 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%2615 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2616 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2617 = torch.aten.item %2615 : !torch.vtensor<[],f32> -> !torch.float | |
%2618 = torch.aten.item %2616 : !torch.vtensor<[],si8> -> !torch.int | |
%2619 = torch.aten._make_per_tensor_quantized_tensor %2614, %2617, %2618 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2620 = torch.aten.dequantize.self %2619 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%2621 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2622 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_278 = torch.constant.int 12 | |
%2623 = torch.aten.item %2621 : !torch.vtensor<[],f32> -> !torch.float | |
%2624 = torch.aten.item %2622 : !torch.vtensor<[],si8> -> !torch.int | |
%2625 = torch.aten.quantize_per_tensor %26, %2623, %2624, %int12_278 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2626 = torch.aten.int_repr %2625 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%2627 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2628 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2629 = torch.aten.item %2627 : !torch.vtensor<[],f32> -> !torch.float | |
%2630 = torch.aten.item %2628 : !torch.vtensor<[],si8> -> !torch.int | |
%2631 = torch.aten._make_per_tensor_quantized_tensor %2626, %2629, %2630 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2632 = torch.aten.dequantize.self %2631 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%2633 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2634 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_279 = torch.constant.int 12 | |
%2635 = torch.aten.item %2633 : !torch.vtensor<[],f32> -> !torch.float | |
%2636 = torch.aten.item %2634 : !torch.vtensor<[],si8> -> !torch.int | |
%2637 = torch.aten.quantize_per_tensor %27, %2635, %2636, %int12_279 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2638 = torch.aten.int_repr %2637 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2639 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2640 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2641 = torch.aten.item %2639 : !torch.vtensor<[],f32> -> !torch.float | |
%2642 = torch.aten.item %2640 : !torch.vtensor<[],si8> -> !torch.int | |
%2643 = torch.aten._make_per_tensor_quantized_tensor %2638, %2641, %2642 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2644 = torch.aten.dequantize.self %2643 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_280 = torch.constant.int 1 | |
%int1_281 = torch.constant.int 1 | |
%int1_282 = torch.constant.int 1 | |
%int1_283 = torch.constant.int 1 | |
%int1_284 = torch.constant.int 1 | |
%int1_285 = torch.constant.int 1 | |
%int0_286 = torch.constant.int 0 | |
%2645 = torch.prim.ListConstruct %int1_280, %int1_281 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2646 = torch.prim.ListConstruct %int1_282, %int1_283 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2647 = torch.prim.ListConstruct %int1_284, %int1_285 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2648 = torch.prim.ListConstruct %int0_286, %int0_286 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_287 = torch.constant.bool false | |
%int1_288 = torch.constant.int 1 | |
%2649 = torch.aten.convolution %2620, %2632, %2644, %2647, %2645, %2646, %false_287, %2648, %int1_288 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_289 = torch.constant.float 0.1015625 | |
%2650 = torch.aten.leaky_relu %2649, %float1.015630e-01_289 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2651 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_290 = torch.constant.int 12 | |
%2653 = torch.aten.item %2651 : !torch.vtensor<[],f32> -> !torch.float | |
%2654 = torch.aten.item %2652 : !torch.vtensor<[],si8> -> !torch.int | |
%2655 = torch.aten.quantize_per_tensor %2650, %2653, %2654, %int12_290 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2656 = torch.aten.int_repr %2655 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2657 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2659 = torch.aten.item %2657 : !torch.vtensor<[],f32> -> !torch.float | |
%2660 = torch.aten.item %2658 : !torch.vtensor<[],si8> -> !torch.int | |
%2661 = torch.aten._make_per_tensor_quantized_tensor %2656, %2659, %2660 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2662 = torch.aten.dequantize.self %2661 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2663 = torch.prim.ListConstruct %2508, %2550, %2606, %2662 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_291 = torch.constant.int 1 | |
%2664 = torch.aten.cat %2663, %int1_291 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%2665 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2666 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_292 = torch.constant.int 12 | |
%2667 = torch.aten.item %2665 : !torch.vtensor<[],f32> -> !torch.float | |
%2668 = torch.aten.item %2666 : !torch.vtensor<[],si8> -> !torch.int | |
%2669 = torch.aten.quantize_per_tensor %2664, %2667, %2668, %int12_292 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2670 = torch.aten.int_repr %2669 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%2671 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2672 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2673 = torch.aten.item %2671 : !torch.vtensor<[],f32> -> !torch.float | |
%2674 = torch.aten.item %2672 : !torch.vtensor<[],si8> -> !torch.int | |
%2675 = torch.aten._make_per_tensor_quantized_tensor %2670, %2673, %2674 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2676 = torch.aten.dequantize.self %2675 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%2677 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2678 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_293 = torch.constant.int 12 | |
%2679 = torch.aten.item %2677 : !torch.vtensor<[],f32> -> !torch.float | |
%2680 = torch.aten.item %2678 : !torch.vtensor<[],si8> -> !torch.int | |
%2681 = torch.aten.quantize_per_tensor %28, %2679, %2680, %int12_293 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2682 = torch.aten.int_repr %2681 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%2683 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2684 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2685 = torch.aten.item %2683 : !torch.vtensor<[],f32> -> !torch.float | |
%2686 = torch.aten.item %2684 : !torch.vtensor<[],si8> -> !torch.int | |
%2687 = torch.aten._make_per_tensor_quantized_tensor %2682, %2685, %2686 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2688 = torch.aten.dequantize.self %2687 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%2689 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2690 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_294 = torch.constant.int 12 | |
%2691 = torch.aten.item %2689 : !torch.vtensor<[],f32> -> !torch.float | |
%2692 = torch.aten.item %2690 : !torch.vtensor<[],si8> -> !torch.int | |
%2693 = torch.aten.quantize_per_tensor %29, %2691, %2692, %int12_294 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2694 = torch.aten.int_repr %2693 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2695 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2696 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2697 = torch.aten.item %2695 : !torch.vtensor<[],f32> -> !torch.float | |
%2698 = torch.aten.item %2696 : !torch.vtensor<[],si8> -> !torch.int | |
%2699 = torch.aten._make_per_tensor_quantized_tensor %2694, %2697, %2698 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2700 = torch.aten.dequantize.self %2699 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_295 = torch.constant.int 1 | |
%int1_296 = torch.constant.int 1 | |
%int1_297 = torch.constant.int 1 | |
%int1_298 = torch.constant.int 1 | |
%int1_299 = torch.constant.int 1 | |
%int1_300 = torch.constant.int 1 | |
%int0_301 = torch.constant.int 0 | |
%2701 = torch.prim.ListConstruct %int1_295, %int1_296 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2702 = torch.prim.ListConstruct %int1_297, %int1_298 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2703 = torch.prim.ListConstruct %int1_299, %int1_300 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2704 = torch.prim.ListConstruct %int0_301, %int0_301 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_302 = torch.constant.bool false | |
%int1_303 = torch.constant.int 1 | |
%2705 = torch.aten.convolution %2676, %2688, %2700, %2703, %2701, %2702, %false_302, %2704, %int1_303 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_304 = torch.constant.float 0.1015625 | |
%2706 = torch.aten.leaky_relu %2705, %float1.015630e-01_304 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2707 = torch.prim.ListConstruct %2508, %2550, %2606, %2662, %2706 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_305 = torch.constant.int 1 | |
%2708 = torch.aten.cat %2707, %int1_305 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%2709 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2710 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_306 = torch.constant.int 12 | |
%2711 = torch.aten.item %2709 : !torch.vtensor<[],f32> -> !torch.float | |
%2712 = torch.aten.item %2710 : !torch.vtensor<[],si8> -> !torch.int | |
%2713 = torch.aten.quantize_per_tensor %2708, %2711, %2712, %int12_306 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2714 = torch.aten.int_repr %2713 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%2715 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2716 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2717 = torch.aten.item %2715 : !torch.vtensor<[],f32> -> !torch.float | |
%2718 = torch.aten.item %2716 : !torch.vtensor<[],si8> -> !torch.int | |
%2719 = torch.aten._make_per_tensor_quantized_tensor %2714, %2717, %2718 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%2720 = torch.aten.dequantize.self %2719 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%2721 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2722 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_307 = torch.constant.int 12 | |
%2723 = torch.aten.item %2721 : !torch.vtensor<[],f32> -> !torch.float | |
%2724 = torch.aten.item %2722 : !torch.vtensor<[],si8> -> !torch.int | |
%2725 = torch.aten.quantize_per_tensor %30, %2723, %2724, %int12_307 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2726 = torch.aten.int_repr %2725 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%2727 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2729 = torch.aten.item %2727 : !torch.vtensor<[],f32> -> !torch.float | |
%2730 = torch.aten.item %2728 : !torch.vtensor<[],si8> -> !torch.int | |
%2731 = torch.aten._make_per_tensor_quantized_tensor %2726, %2729, %2730 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%2732 = torch.aten.dequantize.self %2731 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%2733 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_308 = torch.constant.int 12 | |
%2735 = torch.aten.item %2733 : !torch.vtensor<[],f32> -> !torch.float | |
%2736 = torch.aten.item %2734 : !torch.vtensor<[],si8> -> !torch.int | |
%2737 = torch.aten.quantize_per_tensor %31, %2735, %2736, %int12_308 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2738 = torch.aten.int_repr %2737 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%2739 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2741 = torch.aten.item %2739 : !torch.vtensor<[],f32> -> !torch.float | |
%2742 = torch.aten.item %2740 : !torch.vtensor<[],si8> -> !torch.int | |
%2743 = torch.aten._make_per_tensor_quantized_tensor %2738, %2741, %2742 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%2744 = torch.aten.dequantize.self %2743 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_309 = torch.constant.int 1 | |
%int1_310 = torch.constant.int 1 | |
%int1_311 = torch.constant.int 1 | |
%int1_312 = torch.constant.int 1 | |
%int1_313 = torch.constant.int 1 | |
%int1_314 = torch.constant.int 1 | |
%int0_315 = torch.constant.int 0 | |
%2745 = torch.prim.ListConstruct %int1_309, %int1_310 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2746 = torch.prim.ListConstruct %int1_311, %int1_312 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2747 = torch.prim.ListConstruct %int1_313, %int1_314 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2748 = torch.prim.ListConstruct %int0_315, %int0_315 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_316 = torch.constant.bool false | |
%int1_317 = torch.constant.int 1 | |
%2749 = torch.aten.convolution %2720, %2732, %2744, %2747, %2745, %2746, %false_316, %2748, %int1_317 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2750 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2751 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_318 = torch.constant.int 12 | |
%2752 = torch.aten.item %2750 : !torch.vtensor<[],f32> -> !torch.float | |
%2753 = torch.aten.item %2751 : !torch.vtensor<[],si8> -> !torch.int | |
%2754 = torch.aten.quantize_per_tensor %2749, %2752, %2753, %int12_318 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2755 = torch.aten.int_repr %2754 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2756 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2757 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2758 = torch.aten.item %2756 : !torch.vtensor<[],f32> -> !torch.float | |
%2759 = torch.aten.item %2757 : !torch.vtensor<[],si8> -> !torch.int | |
%2760 = torch.aten._make_per_tensor_quantized_tensor %2755, %2758, %2759 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2761 = torch.aten.dequantize.self %2760 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2762 = torch.aten.mul.Tensor %2761, %740 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%2763 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2764 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_319 = torch.constant.int 12 | |
%2765 = torch.aten.item %2763 : !torch.vtensor<[],f32> -> !torch.float | |
%2766 = torch.aten.item %2764 : !torch.vtensor<[],si8> -> !torch.int | |
%2767 = torch.aten.quantize_per_tensor %2762, %2765, %2766, %int12_319 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2768 = torch.aten.int_repr %2767 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2769 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2771 = torch.aten.item %2769 : !torch.vtensor<[],f32> -> !torch.float | |
%2772 = torch.aten.item %2770 : !torch.vtensor<[],si8> -> !torch.int | |
%2773 = torch.aten._make_per_tensor_quantized_tensor %2768, %2771, %2772 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2774 = torch.aten.dequantize.self %2773 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_320 = torch.constant.int 1 | |
%2775 = torch.aten.add.Tensor %2774, %2508, %int1_320 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2776 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2777 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_321 = torch.constant.int 12 | |
%2778 = torch.aten.item %2776 : !torch.vtensor<[],f32> -> !torch.float | |
%2779 = torch.aten.item %2777 : !torch.vtensor<[],si8> -> !torch.int | |
%2780 = torch.aten.quantize_per_tensor %2775, %2778, %2779, %int12_321 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2781 = torch.aten.int_repr %2780 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2782 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2784 = torch.aten.item %2782 : !torch.vtensor<[],f32> -> !torch.float | |
%2785 = torch.aten.item %2783 : !torch.vtensor<[],si8> -> !torch.int | |
%2786 = torch.aten._make_per_tensor_quantized_tensor %2781, %2784, %2785 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2787 = torch.aten.dequantize.self %2786 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2788 = torch.aten.mul.Tensor %2787, %753 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%2789 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2790 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_322 = torch.constant.int 12 | |
%2791 = torch.aten.item %2789 : !torch.vtensor<[],f32> -> !torch.float | |
%2792 = torch.aten.item %2790 : !torch.vtensor<[],si8> -> !torch.int | |
%2793 = torch.aten.quantize_per_tensor %2788, %2791, %2792, %int12_322 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2794 = torch.aten.int_repr %2793 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2795 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2796 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2797 = torch.aten.item %2795 : !torch.vtensor<[],f32> -> !torch.float | |
%2798 = torch.aten.item %2796 : !torch.vtensor<[],si8> -> !torch.int | |
%2799 = torch.aten._make_per_tensor_quantized_tensor %2794, %2797, %2798 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2800 = torch.aten.dequantize.self %2799 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_323 = torch.constant.int 1 | |
%2801 = torch.aten.add.Tensor %2800, %1950, %int1_323 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%2802 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2803 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_324 = torch.constant.int 12 | |
%2804 = torch.aten.item %2802 : !torch.vtensor<[],f32> -> !torch.float | |
%2805 = torch.aten.item %2803 : !torch.vtensor<[],si8> -> !torch.int | |
%2806 = torch.aten.quantize_per_tensor %2801, %2804, %2805, %int12_324 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2807 = torch.aten.int_repr %2806 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%2808 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2809 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2810 = torch.aten.item %2808 : !torch.vtensor<[],f32> -> !torch.float | |
%2811 = torch.aten.item %2809 : !torch.vtensor<[],si8> -> !torch.int | |
%2812 = torch.aten._make_per_tensor_quantized_tensor %2807, %2810, %2811 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%2813 = torch.aten.dequantize.self %2812 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%2814 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2815 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_325 = torch.constant.int 12 | |
%2816 = torch.aten.item %2814 : !torch.vtensor<[],f32> -> !torch.float | |
%2817 = torch.aten.item %2815 : !torch.vtensor<[],si8> -> !torch.int | |
%2818 = torch.aten.quantize_per_tensor %32, %2816, %2817, %int12_325 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2819 = torch.aten.int_repr %2818 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%2820 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2821 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2822 = torch.aten.item %2820 : !torch.vtensor<[],f32> -> !torch.float | |
%2823 = torch.aten.item %2821 : !torch.vtensor<[],si8> -> !torch.int | |
%2824 = torch.aten._make_per_tensor_quantized_tensor %2819, %2822, %2823 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%2825 = torch.aten.dequantize.self %2824 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%2826 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2827 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_326 = torch.constant.int 12 | |
%2828 = torch.aten.item %2826 : !torch.vtensor<[],f32> -> !torch.float | |
%2829 = torch.aten.item %2827 : !torch.vtensor<[],si8> -> !torch.int | |
%2830 = torch.aten.quantize_per_tensor %33, %2828, %2829, %int12_326 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2831 = torch.aten.int_repr %2830 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2832 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2833 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2834 = torch.aten.item %2832 : !torch.vtensor<[],f32> -> !torch.float | |
%2835 = torch.aten.item %2833 : !torch.vtensor<[],si8> -> !torch.int | |
%2836 = torch.aten._make_per_tensor_quantized_tensor %2831, %2834, %2835 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2837 = torch.aten.dequantize.self %2836 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_327 = torch.constant.int 1 | |
%int1_328 = torch.constant.int 1 | |
%int1_329 = torch.constant.int 1 | |
%int1_330 = torch.constant.int 1 | |
%int1_331 = torch.constant.int 1 | |
%int1_332 = torch.constant.int 1 | |
%int0_333 = torch.constant.int 0 | |
%2838 = torch.prim.ListConstruct %int1_327, %int1_328 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2839 = torch.prim.ListConstruct %int1_329, %int1_330 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2840 = torch.prim.ListConstruct %int1_331, %int1_332 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2841 = torch.prim.ListConstruct %int0_333, %int0_333 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_334 = torch.constant.bool false | |
%int1_335 = torch.constant.int 1 | |
%2842 = torch.aten.convolution %2813, %2825, %2837, %2840, %2838, %2839, %false_334, %2841, %int1_335 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_336 = torch.constant.float 0.1015625 | |
%2843 = torch.aten.leaky_relu %2842, %float1.015630e-01_336 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2844 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2845 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_337 = torch.constant.int 12 | |
%2846 = torch.aten.item %2844 : !torch.vtensor<[],f32> -> !torch.float | |
%2847 = torch.aten.item %2845 : !torch.vtensor<[],si8> -> !torch.int | |
%2848 = torch.aten.quantize_per_tensor %2843, %2846, %2847, %int12_337 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2849 = torch.aten.int_repr %2848 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2850 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2851 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2852 = torch.aten.item %2850 : !torch.vtensor<[],f32> -> !torch.float | |
%2853 = torch.aten.item %2851 : !torch.vtensor<[],si8> -> !torch.int | |
%2854 = torch.aten._make_per_tensor_quantized_tensor %2849, %2852, %2853 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2855 = torch.aten.dequantize.self %2854 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2856 = torch.prim.ListConstruct %2813, %2855 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_338 = torch.constant.int 1 | |
%2857 = torch.aten.cat %2856, %int1_338 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%2858 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2859 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_339 = torch.constant.int 12 | |
%2860 = torch.aten.item %2858 : !torch.vtensor<[],f32> -> !torch.float | |
%2861 = torch.aten.item %2859 : !torch.vtensor<[],si8> -> !torch.int | |
%2862 = torch.aten.quantize_per_tensor %2857, %2860, %2861, %int12_339 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2863 = torch.aten.int_repr %2862 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%2864 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2865 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2866 = torch.aten.item %2864 : !torch.vtensor<[],f32> -> !torch.float | |
%2867 = torch.aten.item %2865 : !torch.vtensor<[],si8> -> !torch.int | |
%2868 = torch.aten._make_per_tensor_quantized_tensor %2863, %2866, %2867 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%2869 = torch.aten.dequantize.self %2868 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%2870 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2871 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_340 = torch.constant.int 12 | |
%2872 = torch.aten.item %2870 : !torch.vtensor<[],f32> -> !torch.float | |
%2873 = torch.aten.item %2871 : !torch.vtensor<[],si8> -> !torch.int | |
%2874 = torch.aten.quantize_per_tensor %34, %2872, %2873, %int12_340 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2875 = torch.aten.int_repr %2874 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%2876 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2877 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2878 = torch.aten.item %2876 : !torch.vtensor<[],f32> -> !torch.float | |
%2879 = torch.aten.item %2877 : !torch.vtensor<[],si8> -> !torch.int | |
%2880 = torch.aten._make_per_tensor_quantized_tensor %2875, %2878, %2879 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%2881 = torch.aten.dequantize.self %2880 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%2882 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2883 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_341 = torch.constant.int 12 | |
%2884 = torch.aten.item %2882 : !torch.vtensor<[],f32> -> !torch.float | |
%2885 = torch.aten.item %2883 : !torch.vtensor<[],si8> -> !torch.int | |
%2886 = torch.aten.quantize_per_tensor %35, %2884, %2885, %int12_341 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2887 = torch.aten.int_repr %2886 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2888 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2889 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2890 = torch.aten.item %2888 : !torch.vtensor<[],f32> -> !torch.float | |
%2891 = torch.aten.item %2889 : !torch.vtensor<[],si8> -> !torch.int | |
%2892 = torch.aten._make_per_tensor_quantized_tensor %2887, %2890, %2891 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2893 = torch.aten.dequantize.self %2892 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_342 = torch.constant.int 1 | |
%int1_343 = torch.constant.int 1 | |
%int1_344 = torch.constant.int 1 | |
%int1_345 = torch.constant.int 1 | |
%int1_346 = torch.constant.int 1 | |
%int1_347 = torch.constant.int 1 | |
%int0_348 = torch.constant.int 0 | |
%2894 = torch.prim.ListConstruct %int1_342, %int1_343 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2895 = torch.prim.ListConstruct %int1_344, %int1_345 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2896 = torch.prim.ListConstruct %int1_346, %int1_347 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2897 = torch.prim.ListConstruct %int0_348, %int0_348 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_349 = torch.constant.bool false | |
%int1_350 = torch.constant.int 1 | |
%2898 = torch.aten.convolution %2869, %2881, %2893, %2896, %2894, %2895, %false_349, %2897, %int1_350 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_351 = torch.constant.float 0.1015625 | |
%2899 = torch.aten.leaky_relu %2898, %float1.015630e-01_351 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2900 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2901 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_352 = torch.constant.int 12 | |
%2902 = torch.aten.item %2900 : !torch.vtensor<[],f32> -> !torch.float | |
%2903 = torch.aten.item %2901 : !torch.vtensor<[],si8> -> !torch.int | |
%2904 = torch.aten.quantize_per_tensor %2899, %2902, %2903, %int12_352 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2905 = torch.aten.int_repr %2904 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2906 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2907 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2908 = torch.aten.item %2906 : !torch.vtensor<[],f32> -> !torch.float | |
%2909 = torch.aten.item %2907 : !torch.vtensor<[],si8> -> !torch.int | |
%2910 = torch.aten._make_per_tensor_quantized_tensor %2905, %2908, %2909 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2911 = torch.aten.dequantize.self %2910 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2912 = torch.prim.ListConstruct %2813, %2855, %2911 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_353 = torch.constant.int 1 | |
%2913 = torch.aten.cat %2912, %int1_353 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%2914 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2915 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_354 = torch.constant.int 12 | |
%2916 = torch.aten.item %2914 : !torch.vtensor<[],f32> -> !torch.float | |
%2917 = torch.aten.item %2915 : !torch.vtensor<[],si8> -> !torch.int | |
%2918 = torch.aten.quantize_per_tensor %2913, %2916, %2917, %int12_354 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2919 = torch.aten.int_repr %2918 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%2920 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2921 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2922 = torch.aten.item %2920 : !torch.vtensor<[],f32> -> !torch.float | |
%2923 = torch.aten.item %2921 : !torch.vtensor<[],si8> -> !torch.int | |
%2924 = torch.aten._make_per_tensor_quantized_tensor %2919, %2922, %2923 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%2925 = torch.aten.dequantize.self %2924 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%2926 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2927 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_355 = torch.constant.int 12 | |
%2928 = torch.aten.item %2926 : !torch.vtensor<[],f32> -> !torch.float | |
%2929 = torch.aten.item %2927 : !torch.vtensor<[],si8> -> !torch.int | |
%2930 = torch.aten.quantize_per_tensor %36, %2928, %2929, %int12_355 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2931 = torch.aten.int_repr %2930 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%2932 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2933 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2934 = torch.aten.item %2932 : !torch.vtensor<[],f32> -> !torch.float | |
%2935 = torch.aten.item %2933 : !torch.vtensor<[],si8> -> !torch.int | |
%2936 = torch.aten._make_per_tensor_quantized_tensor %2931, %2934, %2935 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%2937 = torch.aten.dequantize.self %2936 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%2938 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2939 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_356 = torch.constant.int 12 | |
%2940 = torch.aten.item %2938 : !torch.vtensor<[],f32> -> !torch.float | |
%2941 = torch.aten.item %2939 : !torch.vtensor<[],si8> -> !torch.int | |
%2942 = torch.aten.quantize_per_tensor %37, %2940, %2941, %int12_356 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2943 = torch.aten.int_repr %2942 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%2944 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2946 = torch.aten.item %2944 : !torch.vtensor<[],f32> -> !torch.float | |
%2947 = torch.aten.item %2945 : !torch.vtensor<[],si8> -> !torch.int | |
%2948 = torch.aten._make_per_tensor_quantized_tensor %2943, %2946, %2947 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2949 = torch.aten.dequantize.self %2948 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_357 = torch.constant.int 1 | |
%int1_358 = torch.constant.int 1 | |
%int1_359 = torch.constant.int 1 | |
%int1_360 = torch.constant.int 1 | |
%int1_361 = torch.constant.int 1 | |
%int1_362 = torch.constant.int 1 | |
%int0_363 = torch.constant.int 0 | |
%2950 = torch.prim.ListConstruct %int1_357, %int1_358 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2951 = torch.prim.ListConstruct %int1_359, %int1_360 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2952 = torch.prim.ListConstruct %int1_361, %int1_362 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2953 = torch.prim.ListConstruct %int0_363, %int0_363 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_364 = torch.constant.bool false | |
%int1_365 = torch.constant.int 1 | |
%2954 = torch.aten.convolution %2925, %2937, %2949, %2952, %2950, %2951, %false_364, %2953, %int1_365 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_366 = torch.constant.float 0.1015625 | |
%2955 = torch.aten.leaky_relu %2954, %float1.015630e-01_366 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%2956 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_367 = torch.constant.int 12 | |
%2958 = torch.aten.item %2956 : !torch.vtensor<[],f32> -> !torch.float | |
%2959 = torch.aten.item %2957 : !torch.vtensor<[],si8> -> !torch.int | |
%2960 = torch.aten.quantize_per_tensor %2955, %2958, %2959, %int12_367 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2961 = torch.aten.int_repr %2960 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%2962 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2963 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2964 = torch.aten.item %2962 : !torch.vtensor<[],f32> -> !torch.float | |
%2965 = torch.aten.item %2963 : !torch.vtensor<[],si8> -> !torch.int | |
%2966 = torch.aten._make_per_tensor_quantized_tensor %2961, %2964, %2965 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%2967 = torch.aten.dequantize.self %2966 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%2968 = torch.prim.ListConstruct %2813, %2855, %2911, %2967 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_368 = torch.constant.int 1 | |
%2969 = torch.aten.cat %2968, %int1_368 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%2970 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2971 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_369 = torch.constant.int 12 | |
%2972 = torch.aten.item %2970 : !torch.vtensor<[],f32> -> !torch.float | |
%2973 = torch.aten.item %2971 : !torch.vtensor<[],si8> -> !torch.int | |
%2974 = torch.aten.quantize_per_tensor %2969, %2972, %2973, %int12_369 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2975 = torch.aten.int_repr %2974 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%2976 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2977 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2978 = torch.aten.item %2976 : !torch.vtensor<[],f32> -> !torch.float | |
%2979 = torch.aten.item %2977 : !torch.vtensor<[],si8> -> !torch.int | |
%2980 = torch.aten._make_per_tensor_quantized_tensor %2975, %2978, %2979 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%2981 = torch.aten.dequantize.self %2980 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%2982 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2983 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_370 = torch.constant.int 12 | |
%2984 = torch.aten.item %2982 : !torch.vtensor<[],f32> -> !torch.float | |
%2985 = torch.aten.item %2983 : !torch.vtensor<[],si8> -> !torch.int | |
%2986 = torch.aten.quantize_per_tensor %38, %2984, %2985, %int12_370 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2987 = torch.aten.int_repr %2986 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%2988 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2989 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%2990 = torch.aten.item %2988 : !torch.vtensor<[],f32> -> !torch.float | |
%2991 = torch.aten.item %2989 : !torch.vtensor<[],si8> -> !torch.int | |
%2992 = torch.aten._make_per_tensor_quantized_tensor %2987, %2990, %2991 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%2993 = torch.aten.dequantize.self %2992 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%2994 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%2995 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_371 = torch.constant.int 12 | |
%2996 = torch.aten.item %2994 : !torch.vtensor<[],f32> -> !torch.float | |
%2997 = torch.aten.item %2995 : !torch.vtensor<[],si8> -> !torch.int | |
%2998 = torch.aten.quantize_per_tensor %39, %2996, %2997, %int12_371 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%2999 = torch.aten.int_repr %2998 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3000 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3001 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3002 = torch.aten.item %3000 : !torch.vtensor<[],f32> -> !torch.float | |
%3003 = torch.aten.item %3001 : !torch.vtensor<[],si8> -> !torch.int | |
%3004 = torch.aten._make_per_tensor_quantized_tensor %2999, %3002, %3003 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3005 = torch.aten.dequantize.self %3004 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_372 = torch.constant.int 1 | |
%int1_373 = torch.constant.int 1 | |
%int1_374 = torch.constant.int 1 | |
%int1_375 = torch.constant.int 1 | |
%int1_376 = torch.constant.int 1 | |
%int1_377 = torch.constant.int 1 | |
%int0_378 = torch.constant.int 0 | |
%3006 = torch.prim.ListConstruct %int1_372, %int1_373 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3007 = torch.prim.ListConstruct %int1_374, %int1_375 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3008 = torch.prim.ListConstruct %int1_376, %int1_377 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3009 = torch.prim.ListConstruct %int0_378, %int0_378 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_379 = torch.constant.bool false | |
%int1_380 = torch.constant.int 1 | |
%3010 = torch.aten.convolution %2981, %2993, %3005, %3008, %3006, %3007, %false_379, %3009, %int1_380 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_381 = torch.constant.float 0.1015625 | |
%3011 = torch.aten.leaky_relu %3010, %float1.015630e-01_381 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3012 = torch.prim.ListConstruct %2813, %2855, %2911, %2967, %3011 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_382 = torch.constant.int 1 | |
%3013 = torch.aten.cat %3012, %int1_382 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%3014 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3015 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_383 = torch.constant.int 12 | |
%3016 = torch.aten.item %3014 : !torch.vtensor<[],f32> -> !torch.float | |
%3017 = torch.aten.item %3015 : !torch.vtensor<[],si8> -> !torch.int | |
%3018 = torch.aten.quantize_per_tensor %3013, %3016, %3017, %int12_383 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3019 = torch.aten.int_repr %3018 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%3020 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3021 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3022 = torch.aten.item %3020 : !torch.vtensor<[],f32> -> !torch.float | |
%3023 = torch.aten.item %3021 : !torch.vtensor<[],si8> -> !torch.int | |
%3024 = torch.aten._make_per_tensor_quantized_tensor %3019, %3022, %3023 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3025 = torch.aten.dequantize.self %3024 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%3026 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3027 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_384 = torch.constant.int 12 | |
%3028 = torch.aten.item %3026 : !torch.vtensor<[],f32> -> !torch.float | |
%3029 = torch.aten.item %3027 : !torch.vtensor<[],si8> -> !torch.int | |
%3030 = torch.aten.quantize_per_tensor %40, %3028, %3029, %int12_384 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3031 = torch.aten.int_repr %3030 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%3032 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3033 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3034 = torch.aten.item %3032 : !torch.vtensor<[],f32> -> !torch.float | |
%3035 = torch.aten.item %3033 : !torch.vtensor<[],si8> -> !torch.int | |
%3036 = torch.aten._make_per_tensor_quantized_tensor %3031, %3034, %3035 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3037 = torch.aten.dequantize.self %3036 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%3038 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3039 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_385 = torch.constant.int 12 | |
%3040 = torch.aten.item %3038 : !torch.vtensor<[],f32> -> !torch.float | |
%3041 = torch.aten.item %3039 : !torch.vtensor<[],si8> -> !torch.int | |
%3042 = torch.aten.quantize_per_tensor %41, %3040, %3041, %int12_385 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3043 = torch.aten.int_repr %3042 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%3044 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3045 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3046 = torch.aten.item %3044 : !torch.vtensor<[],f32> -> !torch.float | |
%3047 = torch.aten.item %3045 : !torch.vtensor<[],si8> -> !torch.int | |
%3048 = torch.aten._make_per_tensor_quantized_tensor %3043, %3046, %3047 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3049 = torch.aten.dequantize.self %3048 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_386 = torch.constant.int 1 | |
%int1_387 = torch.constant.int 1 | |
%int1_388 = torch.constant.int 1 | |
%int1_389 = torch.constant.int 1 | |
%int1_390 = torch.constant.int 1 | |
%int1_391 = torch.constant.int 1 | |
%int0_392 = torch.constant.int 0 | |
%3050 = torch.prim.ListConstruct %int1_386, %int1_387 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3051 = torch.prim.ListConstruct %int1_388, %int1_389 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3052 = torch.prim.ListConstruct %int1_390, %int1_391 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3053 = torch.prim.ListConstruct %int0_392, %int0_392 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_393 = torch.constant.bool false | |
%int1_394 = torch.constant.int 1 | |
%3054 = torch.aten.convolution %3025, %3037, %3049, %3052, %3050, %3051, %false_393, %3053, %int1_394 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3055 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3056 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_395 = torch.constant.int 12 | |
%3057 = torch.aten.item %3055 : !torch.vtensor<[],f32> -> !torch.float | |
%3058 = torch.aten.item %3056 : !torch.vtensor<[],si8> -> !torch.int | |
%3059 = torch.aten.quantize_per_tensor %3054, %3057, %3058, %int12_395 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3060 = torch.aten.int_repr %3059 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3061 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3062 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3063 = torch.aten.item %3061 : !torch.vtensor<[],f32> -> !torch.float | |
%3064 = torch.aten.item %3062 : !torch.vtensor<[],si8> -> !torch.int | |
%3065 = torch.aten._make_per_tensor_quantized_tensor %3060, %3063, %3064 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3066 = torch.aten.dequantize.self %3065 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3067 = torch.aten.mul.Tensor %3066, %766 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%3068 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_396 = torch.constant.int 12 | |
%3070 = torch.aten.item %3068 : !torch.vtensor<[],f32> -> !torch.float | |
%3071 = torch.aten.item %3069 : !torch.vtensor<[],si8> -> !torch.int | |
%3072 = torch.aten.quantize_per_tensor %3067, %3070, %3071, %int12_396 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3073 = torch.aten.int_repr %3072 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3074 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3076 = torch.aten.item %3074 : !torch.vtensor<[],f32> -> !torch.float | |
%3077 = torch.aten.item %3075 : !torch.vtensor<[],si8> -> !torch.int | |
%3078 = torch.aten._make_per_tensor_quantized_tensor %3073, %3076, %3077 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3079 = torch.aten.dequantize.self %3078 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_397 = torch.constant.int 1 | |
%3080 = torch.aten.add.Tensor %3079, %2813, %int1_397 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3081 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3082 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_398 = torch.constant.int 12 | |
%3083 = torch.aten.item %3081 : !torch.vtensor<[],f32> -> !torch.float | |
%3084 = torch.aten.item %3082 : !torch.vtensor<[],si8> -> !torch.int | |
%3085 = torch.aten.quantize_per_tensor %3080, %3083, %3084, %int12_398 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3086 = torch.aten.int_repr %3085 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3087 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3088 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3089 = torch.aten.item %3087 : !torch.vtensor<[],f32> -> !torch.float | |
%3090 = torch.aten.item %3088 : !torch.vtensor<[],si8> -> !torch.int | |
%3091 = torch.aten._make_per_tensor_quantized_tensor %3086, %3089, %3090 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3092 = torch.aten.dequantize.self %3091 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3093 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3094 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_399 = torch.constant.int 12 | |
%3095 = torch.aten.item %3093 : !torch.vtensor<[],f32> -> !torch.float | |
%3096 = torch.aten.item %3094 : !torch.vtensor<[],si8> -> !torch.int | |
%3097 = torch.aten.quantize_per_tensor %42, %3095, %3096, %int12_399 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3098 = torch.aten.int_repr %3097 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%3099 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3100 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3101 = torch.aten.item %3099 : !torch.vtensor<[],f32> -> !torch.float | |
%3102 = torch.aten.item %3100 : !torch.vtensor<[],si8> -> !torch.int | |
%3103 = torch.aten._make_per_tensor_quantized_tensor %3098, %3101, %3102 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3104 = torch.aten.dequantize.self %3103 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%3105 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3106 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_400 = torch.constant.int 12 | |
%3107 = torch.aten.item %3105 : !torch.vtensor<[],f32> -> !torch.float | |
%3108 = torch.aten.item %3106 : !torch.vtensor<[],si8> -> !torch.int | |
%3109 = torch.aten.quantize_per_tensor %43, %3107, %3108, %int12_400 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3110 = torch.aten.int_repr %3109 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3111 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3112 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3113 = torch.aten.item %3111 : !torch.vtensor<[],f32> -> !torch.float | |
%3114 = torch.aten.item %3112 : !torch.vtensor<[],si8> -> !torch.int | |
%3115 = torch.aten._make_per_tensor_quantized_tensor %3110, %3113, %3114 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3116 = torch.aten.dequantize.self %3115 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_401 = torch.constant.int 1 | |
%int1_402 = torch.constant.int 1 | |
%int1_403 = torch.constant.int 1 | |
%int1_404 = torch.constant.int 1 | |
%int1_405 = torch.constant.int 1 | |
%int1_406 = torch.constant.int 1 | |
%int0_407 = torch.constant.int 0 | |
%3117 = torch.prim.ListConstruct %int1_401, %int1_402 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3118 = torch.prim.ListConstruct %int1_403, %int1_404 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3119 = torch.prim.ListConstruct %int1_405, %int1_406 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3120 = torch.prim.ListConstruct %int0_407, %int0_407 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_408 = torch.constant.bool false | |
%int1_409 = torch.constant.int 1 | |
%3121 = torch.aten.convolution %3092, %3104, %3116, %3119, %3117, %3118, %false_408, %3120, %int1_409 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_410 = torch.constant.float 0.1015625 | |
%3122 = torch.aten.leaky_relu %3121, %float1.015630e-01_410 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3123 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3124 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_411 = torch.constant.int 12 | |
%3125 = torch.aten.item %3123 : !torch.vtensor<[],f32> -> !torch.float | |
%3126 = torch.aten.item %3124 : !torch.vtensor<[],si8> -> !torch.int | |
%3127 = torch.aten.quantize_per_tensor %3122, %3125, %3126, %int12_411 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3128 = torch.aten.int_repr %3127 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3129 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3130 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3131 = torch.aten.item %3129 : !torch.vtensor<[],f32> -> !torch.float | |
%3132 = torch.aten.item %3130 : !torch.vtensor<[],si8> -> !torch.int | |
%3133 = torch.aten._make_per_tensor_quantized_tensor %3128, %3131, %3132 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3134 = torch.aten.dequantize.self %3133 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3135 = torch.prim.ListConstruct %3092, %3134 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_412 = torch.constant.int 1 | |
%3136 = torch.aten.cat %3135, %int1_412 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%3137 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3138 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_413 = torch.constant.int 12 | |
%3139 = torch.aten.item %3137 : !torch.vtensor<[],f32> -> !torch.float | |
%3140 = torch.aten.item %3138 : !torch.vtensor<[],si8> -> !torch.int | |
%3141 = torch.aten.quantize_per_tensor %3136, %3139, %3140, %int12_413 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3142 = torch.aten.int_repr %3141 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%3143 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3144 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3145 = torch.aten.item %3143 : !torch.vtensor<[],f32> -> !torch.float | |
%3146 = torch.aten.item %3144 : !torch.vtensor<[],si8> -> !torch.int | |
%3147 = torch.aten._make_per_tensor_quantized_tensor %3142, %3145, %3146 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3148 = torch.aten.dequantize.self %3147 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%3149 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3150 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_414 = torch.constant.int 12 | |
%3151 = torch.aten.item %3149 : !torch.vtensor<[],f32> -> !torch.float | |
%3152 = torch.aten.item %3150 : !torch.vtensor<[],si8> -> !torch.int | |
%3153 = torch.aten.quantize_per_tensor %44, %3151, %3152, %int12_414 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3154 = torch.aten.int_repr %3153 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%3155 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3156 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3157 = torch.aten.item %3155 : !torch.vtensor<[],f32> -> !torch.float | |
%3158 = torch.aten.item %3156 : !torch.vtensor<[],si8> -> !torch.int | |
%3159 = torch.aten._make_per_tensor_quantized_tensor %3154, %3157, %3158 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3160 = torch.aten.dequantize.self %3159 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%3161 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3162 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_415 = torch.constant.int 12 | |
%3163 = torch.aten.item %3161 : !torch.vtensor<[],f32> -> !torch.float | |
%3164 = torch.aten.item %3162 : !torch.vtensor<[],si8> -> !torch.int | |
%3165 = torch.aten.quantize_per_tensor %45, %3163, %3164, %int12_415 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3166 = torch.aten.int_repr %3165 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3167 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3168 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3169 = torch.aten.item %3167 : !torch.vtensor<[],f32> -> !torch.float | |
%3170 = torch.aten.item %3168 : !torch.vtensor<[],si8> -> !torch.int | |
%3171 = torch.aten._make_per_tensor_quantized_tensor %3166, %3169, %3170 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3172 = torch.aten.dequantize.self %3171 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_416 = torch.constant.int 1 | |
%int1_417 = torch.constant.int 1 | |
%int1_418 = torch.constant.int 1 | |
%int1_419 = torch.constant.int 1 | |
%int1_420 = torch.constant.int 1 | |
%int1_421 = torch.constant.int 1 | |
%int0_422 = torch.constant.int 0 | |
%3173 = torch.prim.ListConstruct %int1_416, %int1_417 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3174 = torch.prim.ListConstruct %int1_418, %int1_419 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3175 = torch.prim.ListConstruct %int1_420, %int1_421 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3176 = torch.prim.ListConstruct %int0_422, %int0_422 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_423 = torch.constant.bool false | |
%int1_424 = torch.constant.int 1 | |
%3177 = torch.aten.convolution %3148, %3160, %3172, %3175, %3173, %3174, %false_423, %3176, %int1_424 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_425 = torch.constant.float 0.1015625 | |
%3178 = torch.aten.leaky_relu %3177, %float1.015630e-01_425 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3179 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_426 = torch.constant.int 12 | |
%3181 = torch.aten.item %3179 : !torch.vtensor<[],f32> -> !torch.float | |
%3182 = torch.aten.item %3180 : !torch.vtensor<[],si8> -> !torch.int | |
%3183 = torch.aten.quantize_per_tensor %3178, %3181, %3182, %int12_426 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3184 = torch.aten.int_repr %3183 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3185 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3187 = torch.aten.item %3185 : !torch.vtensor<[],f32> -> !torch.float | |
%3188 = torch.aten.item %3186 : !torch.vtensor<[],si8> -> !torch.int | |
%3189 = torch.aten._make_per_tensor_quantized_tensor %3184, %3187, %3188 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3190 = torch.aten.dequantize.self %3189 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3191 = torch.prim.ListConstruct %3092, %3134, %3190 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_427 = torch.constant.int 1 | |
%3192 = torch.aten.cat %3191, %int1_427 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%3193 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_428 = torch.constant.int 12 | |
%3195 = torch.aten.item %3193 : !torch.vtensor<[],f32> -> !torch.float | |
%3196 = torch.aten.item %3194 : !torch.vtensor<[],si8> -> !torch.int | |
%3197 = torch.aten.quantize_per_tensor %3192, %3195, %3196, %int12_428 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3198 = torch.aten.int_repr %3197 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%3199 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3201 = torch.aten.item %3199 : !torch.vtensor<[],f32> -> !torch.float | |
%3202 = torch.aten.item %3200 : !torch.vtensor<[],si8> -> !torch.int | |
%3203 = torch.aten._make_per_tensor_quantized_tensor %3198, %3201, %3202 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3204 = torch.aten.dequantize.self %3203 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%3205 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_429 = torch.constant.int 12 | |
%3207 = torch.aten.item %3205 : !torch.vtensor<[],f32> -> !torch.float | |
%3208 = torch.aten.item %3206 : !torch.vtensor<[],si8> -> !torch.int | |
%3209 = torch.aten.quantize_per_tensor %46, %3207, %3208, %int12_429 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3210 = torch.aten.int_repr %3209 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%3211 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3213 = torch.aten.item %3211 : !torch.vtensor<[],f32> -> !torch.float | |
%3214 = torch.aten.item %3212 : !torch.vtensor<[],si8> -> !torch.int | |
%3215 = torch.aten._make_per_tensor_quantized_tensor %3210, %3213, %3214 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3216 = torch.aten.dequantize.self %3215 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%3217 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3218 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_430 = torch.constant.int 12 | |
%3219 = torch.aten.item %3217 : !torch.vtensor<[],f32> -> !torch.float | |
%3220 = torch.aten.item %3218 : !torch.vtensor<[],si8> -> !torch.int | |
%3221 = torch.aten.quantize_per_tensor %47, %3219, %3220, %int12_430 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3222 = torch.aten.int_repr %3221 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3223 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3224 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3225 = torch.aten.item %3223 : !torch.vtensor<[],f32> -> !torch.float | |
%3226 = torch.aten.item %3224 : !torch.vtensor<[],si8> -> !torch.int | |
%3227 = torch.aten._make_per_tensor_quantized_tensor %3222, %3225, %3226 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3228 = torch.aten.dequantize.self %3227 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_431 = torch.constant.int 1 | |
%int1_432 = torch.constant.int 1 | |
%int1_433 = torch.constant.int 1 | |
%int1_434 = torch.constant.int 1 | |
%int1_435 = torch.constant.int 1 | |
%int1_436 = torch.constant.int 1 | |
%int0_437 = torch.constant.int 0 | |
%3229 = torch.prim.ListConstruct %int1_431, %int1_432 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3230 = torch.prim.ListConstruct %int1_433, %int1_434 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3231 = torch.prim.ListConstruct %int1_435, %int1_436 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3232 = torch.prim.ListConstruct %int0_437, %int0_437 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_438 = torch.constant.bool false | |
%int1_439 = torch.constant.int 1 | |
%3233 = torch.aten.convolution %3204, %3216, %3228, %3231, %3229, %3230, %false_438, %3232, %int1_439 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_440 = torch.constant.float 0.1015625 | |
%3234 = torch.aten.leaky_relu %3233, %float1.015630e-01_440 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3235 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3236 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_441 = torch.constant.int 12 | |
%3237 = torch.aten.item %3235 : !torch.vtensor<[],f32> -> !torch.float | |
%3238 = torch.aten.item %3236 : !torch.vtensor<[],si8> -> !torch.int | |
%3239 = torch.aten.quantize_per_tensor %3234, %3237, %3238, %int12_441 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3240 = torch.aten.int_repr %3239 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3241 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3243 = torch.aten.item %3241 : !torch.vtensor<[],f32> -> !torch.float | |
%3244 = torch.aten.item %3242 : !torch.vtensor<[],si8> -> !torch.int | |
%3245 = torch.aten._make_per_tensor_quantized_tensor %3240, %3243, %3244 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3246 = torch.aten.dequantize.self %3245 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3247 = torch.prim.ListConstruct %3092, %3134, %3190, %3246 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_442 = torch.constant.int 1 | |
%3248 = torch.aten.cat %3247, %int1_442 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%3249 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3250 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_443 = torch.constant.int 12 | |
%3251 = torch.aten.item %3249 : !torch.vtensor<[],f32> -> !torch.float | |
%3252 = torch.aten.item %3250 : !torch.vtensor<[],si8> -> !torch.int | |
%3253 = torch.aten.quantize_per_tensor %3248, %3251, %3252, %int12_443 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3254 = torch.aten.int_repr %3253 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%3255 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3257 = torch.aten.item %3255 : !torch.vtensor<[],f32> -> !torch.float | |
%3258 = torch.aten.item %3256 : !torch.vtensor<[],si8> -> !torch.int | |
%3259 = torch.aten._make_per_tensor_quantized_tensor %3254, %3257, %3258 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3260 = torch.aten.dequantize.self %3259 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%3261 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3262 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_444 = torch.constant.int 12 | |
%3263 = torch.aten.item %3261 : !torch.vtensor<[],f32> -> !torch.float | |
%3264 = torch.aten.item %3262 : !torch.vtensor<[],si8> -> !torch.int | |
%3265 = torch.aten.quantize_per_tensor %48, %3263, %3264, %int12_444 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3266 = torch.aten.int_repr %3265 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%3267 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3268 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3269 = torch.aten.item %3267 : !torch.vtensor<[],f32> -> !torch.float | |
%3270 = torch.aten.item %3268 : !torch.vtensor<[],si8> -> !torch.int | |
%3271 = torch.aten._make_per_tensor_quantized_tensor %3266, %3269, %3270 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3272 = torch.aten.dequantize.self %3271 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%3273 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3274 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_445 = torch.constant.int 12 | |
%3275 = torch.aten.item %3273 : !torch.vtensor<[],f32> -> !torch.float | |
%3276 = torch.aten.item %3274 : !torch.vtensor<[],si8> -> !torch.int | |
%3277 = torch.aten.quantize_per_tensor %49, %3275, %3276, %int12_445 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3278 = torch.aten.int_repr %3277 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3279 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3280 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3281 = torch.aten.item %3279 : !torch.vtensor<[],f32> -> !torch.float | |
%3282 = torch.aten.item %3280 : !torch.vtensor<[],si8> -> !torch.int | |
%3283 = torch.aten._make_per_tensor_quantized_tensor %3278, %3281, %3282 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3284 = torch.aten.dequantize.self %3283 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_446 = torch.constant.int 1 | |
%int1_447 = torch.constant.int 1 | |
%int1_448 = torch.constant.int 1 | |
%int1_449 = torch.constant.int 1 | |
%int1_450 = torch.constant.int 1 | |
%int1_451 = torch.constant.int 1 | |
%int0_452 = torch.constant.int 0 | |
%3285 = torch.prim.ListConstruct %int1_446, %int1_447 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3286 = torch.prim.ListConstruct %int1_448, %int1_449 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3287 = torch.prim.ListConstruct %int1_450, %int1_451 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3288 = torch.prim.ListConstruct %int0_452, %int0_452 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_453 = torch.constant.bool false | |
%int1_454 = torch.constant.int 1 | |
%3289 = torch.aten.convolution %3260, %3272, %3284, %3287, %3285, %3286, %false_453, %3288, %int1_454 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_455 = torch.constant.float 0.1015625 | |
%3290 = torch.aten.leaky_relu %3289, %float1.015630e-01_455 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3291 = torch.prim.ListConstruct %3092, %3134, %3190, %3246, %3290 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_456 = torch.constant.int 1 | |
%3292 = torch.aten.cat %3291, %int1_456 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%3293 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3294 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_457 = torch.constant.int 12 | |
%3295 = torch.aten.item %3293 : !torch.vtensor<[],f32> -> !torch.float | |
%3296 = torch.aten.item %3294 : !torch.vtensor<[],si8> -> !torch.int | |
%3297 = torch.aten.quantize_per_tensor %3292, %3295, %3296, %int12_457 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3298 = torch.aten.int_repr %3297 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%3299 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3300 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3301 = torch.aten.item %3299 : !torch.vtensor<[],f32> -> !torch.float | |
%3302 = torch.aten.item %3300 : !torch.vtensor<[],si8> -> !torch.int | |
%3303 = torch.aten._make_per_tensor_quantized_tensor %3298, %3301, %3302 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3304 = torch.aten.dequantize.self %3303 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%3305 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3306 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_458 = torch.constant.int 12 | |
%3307 = torch.aten.item %3305 : !torch.vtensor<[],f32> -> !torch.float | |
%3308 = torch.aten.item %3306 : !torch.vtensor<[],si8> -> !torch.int | |
%3309 = torch.aten.quantize_per_tensor %50, %3307, %3308, %int12_458 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3310 = torch.aten.int_repr %3309 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%3311 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3312 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3313 = torch.aten.item %3311 : !torch.vtensor<[],f32> -> !torch.float | |
%3314 = torch.aten.item %3312 : !torch.vtensor<[],si8> -> !torch.int | |
%3315 = torch.aten._make_per_tensor_quantized_tensor %3310, %3313, %3314 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3316 = torch.aten.dequantize.self %3315 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%3317 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3318 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_459 = torch.constant.int 12 | |
%3319 = torch.aten.item %3317 : !torch.vtensor<[],f32> -> !torch.float | |
%3320 = torch.aten.item %3318 : !torch.vtensor<[],si8> -> !torch.int | |
%3321 = torch.aten.quantize_per_tensor %51, %3319, %3320, %int12_459 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3322 = torch.aten.int_repr %3321 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%3323 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3324 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3325 = torch.aten.item %3323 : !torch.vtensor<[],f32> -> !torch.float | |
%3326 = torch.aten.item %3324 : !torch.vtensor<[],si8> -> !torch.int | |
%3327 = torch.aten._make_per_tensor_quantized_tensor %3322, %3325, %3326 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3328 = torch.aten.dequantize.self %3327 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_460 = torch.constant.int 1 | |
%int1_461 = torch.constant.int 1 | |
%int1_462 = torch.constant.int 1 | |
%int1_463 = torch.constant.int 1 | |
%int1_464 = torch.constant.int 1 | |
%int1_465 = torch.constant.int 1 | |
%int0_466 = torch.constant.int 0 | |
%3329 = torch.prim.ListConstruct %int1_460, %int1_461 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3330 = torch.prim.ListConstruct %int1_462, %int1_463 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3331 = torch.prim.ListConstruct %int1_464, %int1_465 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3332 = torch.prim.ListConstruct %int0_466, %int0_466 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_467 = torch.constant.bool false | |
%int1_468 = torch.constant.int 1 | |
%3333 = torch.aten.convolution %3304, %3316, %3328, %3331, %3329, %3330, %false_467, %3332, %int1_468 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3334 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3335 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_469 = torch.constant.int 12 | |
%3336 = torch.aten.item %3334 : !torch.vtensor<[],f32> -> !torch.float | |
%3337 = torch.aten.item %3335 : !torch.vtensor<[],si8> -> !torch.int | |
%3338 = torch.aten.quantize_per_tensor %3333, %3336, %3337, %int12_469 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3339 = torch.aten.int_repr %3338 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3340 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3341 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3342 = torch.aten.item %3340 : !torch.vtensor<[],f32> -> !torch.float | |
%3343 = torch.aten.item %3341 : !torch.vtensor<[],si8> -> !torch.int | |
%3344 = torch.aten._make_per_tensor_quantized_tensor %3339, %3342, %3343 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3345 = torch.aten.dequantize.self %3344 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3346 = torch.aten.mul.Tensor %3345, %779 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%3347 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_470 = torch.constant.int 12 | |
%3349 = torch.aten.item %3347 : !torch.vtensor<[],f32> -> !torch.float | |
%3350 = torch.aten.item %3348 : !torch.vtensor<[],si8> -> !torch.int | |
%3351 = torch.aten.quantize_per_tensor %3346, %3349, %3350, %int12_470 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3352 = torch.aten.int_repr %3351 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3353 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3355 = torch.aten.item %3353 : !torch.vtensor<[],f32> -> !torch.float | |
%3356 = torch.aten.item %3354 : !torch.vtensor<[],si8> -> !torch.int | |
%3357 = torch.aten._make_per_tensor_quantized_tensor %3352, %3355, %3356 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3358 = torch.aten.dequantize.self %3357 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_471 = torch.constant.int 1 | |
%3359 = torch.aten.add.Tensor %3358, %3092, %int1_471 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3360 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3361 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_472 = torch.constant.int 12 | |
%3362 = torch.aten.item %3360 : !torch.vtensor<[],f32> -> !torch.float | |
%3363 = torch.aten.item %3361 : !torch.vtensor<[],si8> -> !torch.int | |
%3364 = torch.aten.quantize_per_tensor %3359, %3362, %3363, %int12_472 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3365 = torch.aten.int_repr %3364 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3366 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3367 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3368 = torch.aten.item %3366 : !torch.vtensor<[],f32> -> !torch.float | |
%3369 = torch.aten.item %3367 : !torch.vtensor<[],si8> -> !torch.int | |
%3370 = torch.aten._make_per_tensor_quantized_tensor %3365, %3368, %3369 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3371 = torch.aten.dequantize.self %3370 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3372 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_473 = torch.constant.int 12 | |
%3374 = torch.aten.item %3372 : !torch.vtensor<[],f32> -> !torch.float | |
%3375 = torch.aten.item %3373 : !torch.vtensor<[],si8> -> !torch.int | |
%3376 = torch.aten.quantize_per_tensor %52, %3374, %3375, %int12_473 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3377 = torch.aten.int_repr %3376 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%3378 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3380 = torch.aten.item %3378 : !torch.vtensor<[],f32> -> !torch.float | |
%3381 = torch.aten.item %3379 : !torch.vtensor<[],si8> -> !torch.int | |
%3382 = torch.aten._make_per_tensor_quantized_tensor %3377, %3380, %3381 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3383 = torch.aten.dequantize.self %3382 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%3384 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3385 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_474 = torch.constant.int 12 | |
%3386 = torch.aten.item %3384 : !torch.vtensor<[],f32> -> !torch.float | |
%3387 = torch.aten.item %3385 : !torch.vtensor<[],si8> -> !torch.int | |
%3388 = torch.aten.quantize_per_tensor %53, %3386, %3387, %int12_474 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3389 = torch.aten.int_repr %3388 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3390 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3391 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3392 = torch.aten.item %3390 : !torch.vtensor<[],f32> -> !torch.float | |
%3393 = torch.aten.item %3391 : !torch.vtensor<[],si8> -> !torch.int | |
%3394 = torch.aten._make_per_tensor_quantized_tensor %3389, %3392, %3393 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3395 = torch.aten.dequantize.self %3394 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_475 = torch.constant.int 1 | |
%int1_476 = torch.constant.int 1 | |
%int1_477 = torch.constant.int 1 | |
%int1_478 = torch.constant.int 1 | |
%int1_479 = torch.constant.int 1 | |
%int1_480 = torch.constant.int 1 | |
%int0_481 = torch.constant.int 0 | |
%3396 = torch.prim.ListConstruct %int1_475, %int1_476 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3397 = torch.prim.ListConstruct %int1_477, %int1_478 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3398 = torch.prim.ListConstruct %int1_479, %int1_480 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3399 = torch.prim.ListConstruct %int0_481, %int0_481 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_482 = torch.constant.bool false | |
%int1_483 = torch.constant.int 1 | |
%3400 = torch.aten.convolution %3371, %3383, %3395, %3398, %3396, %3397, %false_482, %3399, %int1_483 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_484 = torch.constant.float 0.1015625 | |
%3401 = torch.aten.leaky_relu %3400, %float1.015630e-01_484 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3402 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_485 = torch.constant.int 12 | |
%3404 = torch.aten.item %3402 : !torch.vtensor<[],f32> -> !torch.float | |
%3405 = torch.aten.item %3403 : !torch.vtensor<[],si8> -> !torch.int | |
%3406 = torch.aten.quantize_per_tensor %3401, %3404, %3405, %int12_485 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3407 = torch.aten.int_repr %3406 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3408 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3410 = torch.aten.item %3408 : !torch.vtensor<[],f32> -> !torch.float | |
%3411 = torch.aten.item %3409 : !torch.vtensor<[],si8> -> !torch.int | |
%3412 = torch.aten._make_per_tensor_quantized_tensor %3407, %3410, %3411 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3413 = torch.aten.dequantize.self %3412 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3414 = torch.prim.ListConstruct %3371, %3413 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_486 = torch.constant.int 1 | |
%3415 = torch.aten.cat %3414, %int1_486 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%3416 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_487 = torch.constant.int 12 | |
%3418 = torch.aten.item %3416 : !torch.vtensor<[],f32> -> !torch.float | |
%3419 = torch.aten.item %3417 : !torch.vtensor<[],si8> -> !torch.int | |
%3420 = torch.aten.quantize_per_tensor %3415, %3418, %3419, %int12_487 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3421 = torch.aten.int_repr %3420 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%3422 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3424 = torch.aten.item %3422 : !torch.vtensor<[],f32> -> !torch.float | |
%3425 = torch.aten.item %3423 : !torch.vtensor<[],si8> -> !torch.int | |
%3426 = torch.aten._make_per_tensor_quantized_tensor %3421, %3424, %3425 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3427 = torch.aten.dequantize.self %3426 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%3428 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_488 = torch.constant.int 12 | |
%3430 = torch.aten.item %3428 : !torch.vtensor<[],f32> -> !torch.float | |
%3431 = torch.aten.item %3429 : !torch.vtensor<[],si8> -> !torch.int | |
%3432 = torch.aten.quantize_per_tensor %54, %3430, %3431, %int12_488 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3433 = torch.aten.int_repr %3432 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%3434 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3436 = torch.aten.item %3434 : !torch.vtensor<[],f32> -> !torch.float | |
%3437 = torch.aten.item %3435 : !torch.vtensor<[],si8> -> !torch.int | |
%3438 = torch.aten._make_per_tensor_quantized_tensor %3433, %3436, %3437 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3439 = torch.aten.dequantize.self %3438 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%3440 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_489 = torch.constant.int 12 | |
%3442 = torch.aten.item %3440 : !torch.vtensor<[],f32> -> !torch.float | |
%3443 = torch.aten.item %3441 : !torch.vtensor<[],si8> -> !torch.int | |
%3444 = torch.aten.quantize_per_tensor %55, %3442, %3443, %int12_489 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3445 = torch.aten.int_repr %3444 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3446 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3448 = torch.aten.item %3446 : !torch.vtensor<[],f32> -> !torch.float | |
%3449 = torch.aten.item %3447 : !torch.vtensor<[],si8> -> !torch.int | |
%3450 = torch.aten._make_per_tensor_quantized_tensor %3445, %3448, %3449 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3451 = torch.aten.dequantize.self %3450 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_490 = torch.constant.int 1 | |
%int1_491 = torch.constant.int 1 | |
%int1_492 = torch.constant.int 1 | |
%int1_493 = torch.constant.int 1 | |
%int1_494 = torch.constant.int 1 | |
%int1_495 = torch.constant.int 1 | |
%int0_496 = torch.constant.int 0 | |
%3452 = torch.prim.ListConstruct %int1_490, %int1_491 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3453 = torch.prim.ListConstruct %int1_492, %int1_493 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3454 = torch.prim.ListConstruct %int1_494, %int1_495 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3455 = torch.prim.ListConstruct %int0_496, %int0_496 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_497 = torch.constant.bool false | |
%int1_498 = torch.constant.int 1 | |
%3456 = torch.aten.convolution %3427, %3439, %3451, %3454, %3452, %3453, %false_497, %3455, %int1_498 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_499 = torch.constant.float 0.1015625 | |
%3457 = torch.aten.leaky_relu %3456, %float1.015630e-01_499 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3458 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_500 = torch.constant.int 12 | |
%3460 = torch.aten.item %3458 : !torch.vtensor<[],f32> -> !torch.float | |
%3461 = torch.aten.item %3459 : !torch.vtensor<[],si8> -> !torch.int | |
%3462 = torch.aten.quantize_per_tensor %3457, %3460, %3461, %int12_500 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3463 = torch.aten.int_repr %3462 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3464 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3466 = torch.aten.item %3464 : !torch.vtensor<[],f32> -> !torch.float | |
%3467 = torch.aten.item %3465 : !torch.vtensor<[],si8> -> !torch.int | |
%3468 = torch.aten._make_per_tensor_quantized_tensor %3463, %3466, %3467 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3469 = torch.aten.dequantize.self %3468 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3470 = torch.prim.ListConstruct %3371, %3413, %3469 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_501 = torch.constant.int 1 | |
%3471 = torch.aten.cat %3470, %int1_501 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%3472 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3473 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_502 = torch.constant.int 12 | |
%3474 = torch.aten.item %3472 : !torch.vtensor<[],f32> -> !torch.float | |
%3475 = torch.aten.item %3473 : !torch.vtensor<[],si8> -> !torch.int | |
%3476 = torch.aten.quantize_per_tensor %3471, %3474, %3475, %int12_502 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3477 = torch.aten.int_repr %3476 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%3478 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3479 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3480 = torch.aten.item %3478 : !torch.vtensor<[],f32> -> !torch.float | |
%3481 = torch.aten.item %3479 : !torch.vtensor<[],si8> -> !torch.int | |
%3482 = torch.aten._make_per_tensor_quantized_tensor %3477, %3480, %3481 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3483 = torch.aten.dequantize.self %3482 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%3484 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3485 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_503 = torch.constant.int 12 | |
%3486 = torch.aten.item %3484 : !torch.vtensor<[],f32> -> !torch.float | |
%3487 = torch.aten.item %3485 : !torch.vtensor<[],si8> -> !torch.int | |
%3488 = torch.aten.quantize_per_tensor %56, %3486, %3487, %int12_503 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3489 = torch.aten.int_repr %3488 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%3490 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3491 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3492 = torch.aten.item %3490 : !torch.vtensor<[],f32> -> !torch.float | |
%3493 = torch.aten.item %3491 : !torch.vtensor<[],si8> -> !torch.int | |
%3494 = torch.aten._make_per_tensor_quantized_tensor %3489, %3492, %3493 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3495 = torch.aten.dequantize.self %3494 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%3496 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3497 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_504 = torch.constant.int 12 | |
%3498 = torch.aten.item %3496 : !torch.vtensor<[],f32> -> !torch.float | |
%3499 = torch.aten.item %3497 : !torch.vtensor<[],si8> -> !torch.int | |
%3500 = torch.aten.quantize_per_tensor %57, %3498, %3499, %int12_504 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3501 = torch.aten.int_repr %3500 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3502 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3503 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3504 = torch.aten.item %3502 : !torch.vtensor<[],f32> -> !torch.float | |
%3505 = torch.aten.item %3503 : !torch.vtensor<[],si8> -> !torch.int | |
%3506 = torch.aten._make_per_tensor_quantized_tensor %3501, %3504, %3505 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3507 = torch.aten.dequantize.self %3506 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_505 = torch.constant.int 1 | |
%int1_506 = torch.constant.int 1 | |
%int1_507 = torch.constant.int 1 | |
%int1_508 = torch.constant.int 1 | |
%int1_509 = torch.constant.int 1 | |
%int1_510 = torch.constant.int 1 | |
%int0_511 = torch.constant.int 0 | |
%3508 = torch.prim.ListConstruct %int1_505, %int1_506 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3509 = torch.prim.ListConstruct %int1_507, %int1_508 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3510 = torch.prim.ListConstruct %int1_509, %int1_510 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3511 = torch.prim.ListConstruct %int0_511, %int0_511 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_512 = torch.constant.bool false | |
%int1_513 = torch.constant.int 1 | |
%3512 = torch.aten.convolution %3483, %3495, %3507, %3510, %3508, %3509, %false_512, %3511, %int1_513 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_514 = torch.constant.float 0.1015625 | |
%3513 = torch.aten.leaky_relu %3512, %float1.015630e-01_514 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3514 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3515 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_515 = torch.constant.int 12 | |
%3516 = torch.aten.item %3514 : !torch.vtensor<[],f32> -> !torch.float | |
%3517 = torch.aten.item %3515 : !torch.vtensor<[],si8> -> !torch.int | |
%3518 = torch.aten.quantize_per_tensor %3513, %3516, %3517, %int12_515 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3519 = torch.aten.int_repr %3518 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3520 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3522 = torch.aten.item %3520 : !torch.vtensor<[],f32> -> !torch.float | |
%3523 = torch.aten.item %3521 : !torch.vtensor<[],si8> -> !torch.int | |
%3524 = torch.aten._make_per_tensor_quantized_tensor %3519, %3522, %3523 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3525 = torch.aten.dequantize.self %3524 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3526 = torch.prim.ListConstruct %3371, %3413, %3469, %3525 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_516 = torch.constant.int 1 | |
%3527 = torch.aten.cat %3526, %int1_516 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%3528 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_517 = torch.constant.int 12 | |
%3530 = torch.aten.item %3528 : !torch.vtensor<[],f32> -> !torch.float | |
%3531 = torch.aten.item %3529 : !torch.vtensor<[],si8> -> !torch.int | |
%3532 = torch.aten.quantize_per_tensor %3527, %3530, %3531, %int12_517 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3533 = torch.aten.int_repr %3532 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%3534 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3535 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3536 = torch.aten.item %3534 : !torch.vtensor<[],f32> -> !torch.float | |
%3537 = torch.aten.item %3535 : !torch.vtensor<[],si8> -> !torch.int | |
%3538 = torch.aten._make_per_tensor_quantized_tensor %3533, %3536, %3537 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3539 = torch.aten.dequantize.self %3538 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%3540 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3541 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_518 = torch.constant.int 12 | |
%3542 = torch.aten.item %3540 : !torch.vtensor<[],f32> -> !torch.float | |
%3543 = torch.aten.item %3541 : !torch.vtensor<[],si8> -> !torch.int | |
%3544 = torch.aten.quantize_per_tensor %58, %3542, %3543, %int12_518 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3545 = torch.aten.int_repr %3544 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%3546 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3547 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3548 = torch.aten.item %3546 : !torch.vtensor<[],f32> -> !torch.float | |
%3549 = torch.aten.item %3547 : !torch.vtensor<[],si8> -> !torch.int | |
%3550 = torch.aten._make_per_tensor_quantized_tensor %3545, %3548, %3549 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3551 = torch.aten.dequantize.self %3550 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%3552 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3553 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_519 = torch.constant.int 12 | |
%3554 = torch.aten.item %3552 : !torch.vtensor<[],f32> -> !torch.float | |
%3555 = torch.aten.item %3553 : !torch.vtensor<[],si8> -> !torch.int | |
%3556 = torch.aten.quantize_per_tensor %59, %3554, %3555, %int12_519 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3557 = torch.aten.int_repr %3556 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3558 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3559 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3560 = torch.aten.item %3558 : !torch.vtensor<[],f32> -> !torch.float | |
%3561 = torch.aten.item %3559 : !torch.vtensor<[],si8> -> !torch.int | |
%3562 = torch.aten._make_per_tensor_quantized_tensor %3557, %3560, %3561 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3563 = torch.aten.dequantize.self %3562 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_520 = torch.constant.int 1 | |
%int1_521 = torch.constant.int 1 | |
%int1_522 = torch.constant.int 1 | |
%int1_523 = torch.constant.int 1 | |
%int1_524 = torch.constant.int 1 | |
%int1_525 = torch.constant.int 1 | |
%int0_526 = torch.constant.int 0 | |
%3564 = torch.prim.ListConstruct %int1_520, %int1_521 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3565 = torch.prim.ListConstruct %int1_522, %int1_523 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3566 = torch.prim.ListConstruct %int1_524, %int1_525 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3567 = torch.prim.ListConstruct %int0_526, %int0_526 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_527 = torch.constant.bool false | |
%int1_528 = torch.constant.int 1 | |
%3568 = torch.aten.convolution %3539, %3551, %3563, %3566, %3564, %3565, %false_527, %3567, %int1_528 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_529 = torch.constant.float 0.1015625 | |
%3569 = torch.aten.leaky_relu %3568, %float1.015630e-01_529 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3570 = torch.prim.ListConstruct %3371, %3413, %3469, %3525, %3569 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_530 = torch.constant.int 1 | |
%3571 = torch.aten.cat %3570, %int1_530 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%3572 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3573 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_531 = torch.constant.int 12 | |
%3574 = torch.aten.item %3572 : !torch.vtensor<[],f32> -> !torch.float | |
%3575 = torch.aten.item %3573 : !torch.vtensor<[],si8> -> !torch.int | |
%3576 = torch.aten.quantize_per_tensor %3571, %3574, %3575, %int12_531 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3577 = torch.aten.int_repr %3576 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%3578 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3579 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3580 = torch.aten.item %3578 : !torch.vtensor<[],f32> -> !torch.float | |
%3581 = torch.aten.item %3579 : !torch.vtensor<[],si8> -> !torch.int | |
%3582 = torch.aten._make_per_tensor_quantized_tensor %3577, %3580, %3581 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3583 = torch.aten.dequantize.self %3582 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%3584 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3585 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_532 = torch.constant.int 12 | |
%3586 = torch.aten.item %3584 : !torch.vtensor<[],f32> -> !torch.float | |
%3587 = torch.aten.item %3585 : !torch.vtensor<[],si8> -> !torch.int | |
%3588 = torch.aten.quantize_per_tensor %60, %3586, %3587, %int12_532 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3589 = torch.aten.int_repr %3588 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%3590 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3591 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3592 = torch.aten.item %3590 : !torch.vtensor<[],f32> -> !torch.float | |
%3593 = torch.aten.item %3591 : !torch.vtensor<[],si8> -> !torch.int | |
%3594 = torch.aten._make_per_tensor_quantized_tensor %3589, %3592, %3593 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3595 = torch.aten.dequantize.self %3594 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%3596 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3597 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_533 = torch.constant.int 12 | |
%3598 = torch.aten.item %3596 : !torch.vtensor<[],f32> -> !torch.float | |
%3599 = torch.aten.item %3597 : !torch.vtensor<[],si8> -> !torch.int | |
%3600 = torch.aten.quantize_per_tensor %61, %3598, %3599, %int12_533 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3601 = torch.aten.int_repr %3600 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%3602 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3603 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3604 = torch.aten.item %3602 : !torch.vtensor<[],f32> -> !torch.float | |
%3605 = torch.aten.item %3603 : !torch.vtensor<[],si8> -> !torch.int | |
%3606 = torch.aten._make_per_tensor_quantized_tensor %3601, %3604, %3605 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3607 = torch.aten.dequantize.self %3606 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_534 = torch.constant.int 1 | |
%int1_535 = torch.constant.int 1 | |
%int1_536 = torch.constant.int 1 | |
%int1_537 = torch.constant.int 1 | |
%int1_538 = torch.constant.int 1 | |
%int1_539 = torch.constant.int 1 | |
%int0_540 = torch.constant.int 0 | |
%3608 = torch.prim.ListConstruct %int1_534, %int1_535 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3609 = torch.prim.ListConstruct %int1_536, %int1_537 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3610 = torch.prim.ListConstruct %int1_538, %int1_539 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3611 = torch.prim.ListConstruct %int0_540, %int0_540 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_541 = torch.constant.bool false | |
%int1_542 = torch.constant.int 1 | |
%3612 = torch.aten.convolution %3583, %3595, %3607, %3610, %3608, %3609, %false_541, %3611, %int1_542 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3613 = torch.vtensor.literal(dense<8.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3614 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_543 = torch.constant.int 12 | |
%3615 = torch.aten.item %3613 : !torch.vtensor<[],f32> -> !torch.float | |
%3616 = torch.aten.item %3614 : !torch.vtensor<[],si8> -> !torch.int | |
%3617 = torch.aten.quantize_per_tensor %3612, %3615, %3616, %int12_543 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3618 = torch.aten.int_repr %3617 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3619 = torch.vtensor.literal(dense<8.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3620 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3621 = torch.aten.item %3619 : !torch.vtensor<[],f32> -> !torch.float | |
%3622 = torch.aten.item %3620 : !torch.vtensor<[],si8> -> !torch.int | |
%3623 = torch.aten._make_per_tensor_quantized_tensor %3618, %3621, %3622 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3624 = torch.aten.dequantize.self %3623 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3625 = torch.aten.mul.Tensor %3624, %792 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%3626 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_544 = torch.constant.int 12 | |
%3628 = torch.aten.item %3626 : !torch.vtensor<[],f32> -> !torch.float | |
%3629 = torch.aten.item %3627 : !torch.vtensor<[],si8> -> !torch.int | |
%3630 = torch.aten.quantize_per_tensor %3625, %3628, %3629, %int12_544 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3631 = torch.aten.int_repr %3630 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3632 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3634 = torch.aten.item %3632 : !torch.vtensor<[],f32> -> !torch.float | |
%3635 = torch.aten.item %3633 : !torch.vtensor<[],si8> -> !torch.int | |
%3636 = torch.aten._make_per_tensor_quantized_tensor %3631, %3634, %3635 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3637 = torch.aten.dequantize.self %3636 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_545 = torch.constant.int 1 | |
%3638 = torch.aten.add.Tensor %3637, %3371, %int1_545 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3639 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3640 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_546 = torch.constant.int 12 | |
%3641 = torch.aten.item %3639 : !torch.vtensor<[],f32> -> !torch.float | |
%3642 = torch.aten.item %3640 : !torch.vtensor<[],si8> -> !torch.int | |
%3643 = torch.aten.quantize_per_tensor %3638, %3641, %3642, %int12_546 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3644 = torch.aten.int_repr %3643 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3645 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3647 = torch.aten.item %3645 : !torch.vtensor<[],f32> -> !torch.float | |
%3648 = torch.aten.item %3646 : !torch.vtensor<[],si8> -> !torch.int | |
%3649 = torch.aten._make_per_tensor_quantized_tensor %3644, %3647, %3648 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3650 = torch.aten.dequantize.self %3649 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3651 = torch.aten.mul.Tensor %3650, %805 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%3652 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3653 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_547 = torch.constant.int 12 | |
%3654 = torch.aten.item %3652 : !torch.vtensor<[],f32> -> !torch.float | |
%3655 = torch.aten.item %3653 : !torch.vtensor<[],si8> -> !torch.int | |
%3656 = torch.aten.quantize_per_tensor %3651, %3654, %3655, %int12_547 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3657 = torch.aten.int_repr %3656 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3658 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3659 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3660 = torch.aten.item %3658 : !torch.vtensor<[],f32> -> !torch.float | |
%3661 = torch.aten.item %3659 : !torch.vtensor<[],si8> -> !torch.int | |
%3662 = torch.aten._make_per_tensor_quantized_tensor %3657, %3660, %3661 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3663 = torch.aten.dequantize.self %3662 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_548 = torch.constant.int 1 | |
%3664 = torch.aten.add.Tensor %3663, %2813, %int1_548 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3665 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3666 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_549 = torch.constant.int 12 | |
%3667 = torch.aten.item %3665 : !torch.vtensor<[],f32> -> !torch.float | |
%3668 = torch.aten.item %3666 : !torch.vtensor<[],si8> -> !torch.int | |
%3669 = torch.aten.quantize_per_tensor %3664, %3667, %3668, %int12_549 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3670 = torch.aten.int_repr %3669 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3671 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3672 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3673 = torch.aten.item %3671 : !torch.vtensor<[],f32> -> !torch.float | |
%3674 = torch.aten.item %3672 : !torch.vtensor<[],si8> -> !torch.int | |
%3675 = torch.aten._make_per_tensor_quantized_tensor %3670, %3673, %3674 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3676 = torch.aten.dequantize.self %3675 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3677 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3678 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_550 = torch.constant.int 12 | |
%3679 = torch.aten.item %3677 : !torch.vtensor<[],f32> -> !torch.float | |
%3680 = torch.aten.item %3678 : !torch.vtensor<[],si8> -> !torch.int | |
%3681 = torch.aten.quantize_per_tensor %62, %3679, %3680, %int12_550 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3682 = torch.aten.int_repr %3681 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%3683 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3684 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3685 = torch.aten.item %3683 : !torch.vtensor<[],f32> -> !torch.float | |
%3686 = torch.aten.item %3684 : !torch.vtensor<[],si8> -> !torch.int | |
%3687 = torch.aten._make_per_tensor_quantized_tensor %3682, %3685, %3686 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3688 = torch.aten.dequantize.self %3687 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%3689 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3690 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_551 = torch.constant.int 12 | |
%3691 = torch.aten.item %3689 : !torch.vtensor<[],f32> -> !torch.float | |
%3692 = torch.aten.item %3690 : !torch.vtensor<[],si8> -> !torch.int | |
%3693 = torch.aten.quantize_per_tensor %63, %3691, %3692, %int12_551 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3694 = torch.aten.int_repr %3693 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3695 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3696 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3697 = torch.aten.item %3695 : !torch.vtensor<[],f32> -> !torch.float | |
%3698 = torch.aten.item %3696 : !torch.vtensor<[],si8> -> !torch.int | |
%3699 = torch.aten._make_per_tensor_quantized_tensor %3694, %3697, %3698 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3700 = torch.aten.dequantize.self %3699 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_552 = torch.constant.int 1 | |
%int1_553 = torch.constant.int 1 | |
%int1_554 = torch.constant.int 1 | |
%int1_555 = torch.constant.int 1 | |
%int1_556 = torch.constant.int 1 | |
%int1_557 = torch.constant.int 1 | |
%int0_558 = torch.constant.int 0 | |
%3701 = torch.prim.ListConstruct %int1_552, %int1_553 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3702 = torch.prim.ListConstruct %int1_554, %int1_555 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3703 = torch.prim.ListConstruct %int1_556, %int1_557 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3704 = torch.prim.ListConstruct %int0_558, %int0_558 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_559 = torch.constant.bool false | |
%int1_560 = torch.constant.int 1 | |
%3705 = torch.aten.convolution %3676, %3688, %3700, %3703, %3701, %3702, %false_559, %3704, %int1_560 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_561 = torch.constant.float 0.1015625 | |
%3706 = torch.aten.leaky_relu %3705, %float1.015630e-01_561 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3707 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3708 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_562 = torch.constant.int 12 | |
%3709 = torch.aten.item %3707 : !torch.vtensor<[],f32> -> !torch.float | |
%3710 = torch.aten.item %3708 : !torch.vtensor<[],si8> -> !torch.int | |
%3711 = torch.aten.quantize_per_tensor %3706, %3709, %3710, %int12_562 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3712 = torch.aten.int_repr %3711 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3713 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3714 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3715 = torch.aten.item %3713 : !torch.vtensor<[],f32> -> !torch.float | |
%3716 = torch.aten.item %3714 : !torch.vtensor<[],si8> -> !torch.int | |
%3717 = torch.aten._make_per_tensor_quantized_tensor %3712, %3715, %3716 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3718 = torch.aten.dequantize.self %3717 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3719 = torch.prim.ListConstruct %3676, %3718 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_563 = torch.constant.int 1 | |
%3720 = torch.aten.cat %3719, %int1_563 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%3721 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3722 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_564 = torch.constant.int 12 | |
%3723 = torch.aten.item %3721 : !torch.vtensor<[],f32> -> !torch.float | |
%3724 = torch.aten.item %3722 : !torch.vtensor<[],si8> -> !torch.int | |
%3725 = torch.aten.quantize_per_tensor %3720, %3723, %3724, %int12_564 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3726 = torch.aten.int_repr %3725 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%3727 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3729 = torch.aten.item %3727 : !torch.vtensor<[],f32> -> !torch.float | |
%3730 = torch.aten.item %3728 : !torch.vtensor<[],si8> -> !torch.int | |
%3731 = torch.aten._make_per_tensor_quantized_tensor %3726, %3729, %3730 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%3732 = torch.aten.dequantize.self %3731 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%3733 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_565 = torch.constant.int 12 | |
%3735 = torch.aten.item %3733 : !torch.vtensor<[],f32> -> !torch.float | |
%3736 = torch.aten.item %3734 : !torch.vtensor<[],si8> -> !torch.int | |
%3737 = torch.aten.quantize_per_tensor %64, %3735, %3736, %int12_565 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3738 = torch.aten.int_repr %3737 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%3739 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3741 = torch.aten.item %3739 : !torch.vtensor<[],f32> -> !torch.float | |
%3742 = torch.aten.item %3740 : !torch.vtensor<[],si8> -> !torch.int | |
%3743 = torch.aten._make_per_tensor_quantized_tensor %3738, %3741, %3742 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%3744 = torch.aten.dequantize.self %3743 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%3745 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3746 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_566 = torch.constant.int 12 | |
%3747 = torch.aten.item %3745 : !torch.vtensor<[],f32> -> !torch.float | |
%3748 = torch.aten.item %3746 : !torch.vtensor<[],si8> -> !torch.int | |
%3749 = torch.aten.quantize_per_tensor %65, %3747, %3748, %int12_566 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3750 = torch.aten.int_repr %3749 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3751 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3752 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3753 = torch.aten.item %3751 : !torch.vtensor<[],f32> -> !torch.float | |
%3754 = torch.aten.item %3752 : !torch.vtensor<[],si8> -> !torch.int | |
%3755 = torch.aten._make_per_tensor_quantized_tensor %3750, %3753, %3754 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3756 = torch.aten.dequantize.self %3755 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_567 = torch.constant.int 1 | |
%int1_568 = torch.constant.int 1 | |
%int1_569 = torch.constant.int 1 | |
%int1_570 = torch.constant.int 1 | |
%int1_571 = torch.constant.int 1 | |
%int1_572 = torch.constant.int 1 | |
%int0_573 = torch.constant.int 0 | |
%3757 = torch.prim.ListConstruct %int1_567, %int1_568 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3758 = torch.prim.ListConstruct %int1_569, %int1_570 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3759 = torch.prim.ListConstruct %int1_571, %int1_572 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3760 = torch.prim.ListConstruct %int0_573, %int0_573 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_574 = torch.constant.bool false | |
%int1_575 = torch.constant.int 1 | |
%3761 = torch.aten.convolution %3732, %3744, %3756, %3759, %3757, %3758, %false_574, %3760, %int1_575 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_576 = torch.constant.float 0.1015625 | |
%3762 = torch.aten.leaky_relu %3761, %float1.015630e-01_576 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3763 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3764 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_577 = torch.constant.int 12 | |
%3765 = torch.aten.item %3763 : !torch.vtensor<[],f32> -> !torch.float | |
%3766 = torch.aten.item %3764 : !torch.vtensor<[],si8> -> !torch.int | |
%3767 = torch.aten.quantize_per_tensor %3762, %3765, %3766, %int12_577 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3768 = torch.aten.int_repr %3767 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3769 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3771 = torch.aten.item %3769 : !torch.vtensor<[],f32> -> !torch.float | |
%3772 = torch.aten.item %3770 : !torch.vtensor<[],si8> -> !torch.int | |
%3773 = torch.aten._make_per_tensor_quantized_tensor %3768, %3771, %3772 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3774 = torch.aten.dequantize.self %3773 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3775 = torch.prim.ListConstruct %3676, %3718, %3774 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_578 = torch.constant.int 1 | |
%3776 = torch.aten.cat %3775, %int1_578 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%3777 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3778 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_579 = torch.constant.int 12 | |
%3779 = torch.aten.item %3777 : !torch.vtensor<[],f32> -> !torch.float | |
%3780 = torch.aten.item %3778 : !torch.vtensor<[],si8> -> !torch.int | |
%3781 = torch.aten.quantize_per_tensor %3776, %3779, %3780, %int12_579 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3782 = torch.aten.int_repr %3781 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%3783 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3784 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3785 = torch.aten.item %3783 : !torch.vtensor<[],f32> -> !torch.float | |
%3786 = torch.aten.item %3784 : !torch.vtensor<[],si8> -> !torch.int | |
%3787 = torch.aten._make_per_tensor_quantized_tensor %3782, %3785, %3786 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%3788 = torch.aten.dequantize.self %3787 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%3789 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3790 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_580 = torch.constant.int 12 | |
%3791 = torch.aten.item %3789 : !torch.vtensor<[],f32> -> !torch.float | |
%3792 = torch.aten.item %3790 : !torch.vtensor<[],si8> -> !torch.int | |
%3793 = torch.aten.quantize_per_tensor %66, %3791, %3792, %int12_580 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3794 = torch.aten.int_repr %3793 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%3795 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3796 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3797 = torch.aten.item %3795 : !torch.vtensor<[],f32> -> !torch.float | |
%3798 = torch.aten.item %3796 : !torch.vtensor<[],si8> -> !torch.int | |
%3799 = torch.aten._make_per_tensor_quantized_tensor %3794, %3797, %3798 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%3800 = torch.aten.dequantize.self %3799 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%3801 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3802 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_581 = torch.constant.int 12 | |
%3803 = torch.aten.item %3801 : !torch.vtensor<[],f32> -> !torch.float | |
%3804 = torch.aten.item %3802 : !torch.vtensor<[],si8> -> !torch.int | |
%3805 = torch.aten.quantize_per_tensor %67, %3803, %3804, %int12_581 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3806 = torch.aten.int_repr %3805 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3807 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3808 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3809 = torch.aten.item %3807 : !torch.vtensor<[],f32> -> !torch.float | |
%3810 = torch.aten.item %3808 : !torch.vtensor<[],si8> -> !torch.int | |
%3811 = torch.aten._make_per_tensor_quantized_tensor %3806, %3809, %3810 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3812 = torch.aten.dequantize.self %3811 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_582 = torch.constant.int 1 | |
%int1_583 = torch.constant.int 1 | |
%int1_584 = torch.constant.int 1 | |
%int1_585 = torch.constant.int 1 | |
%int1_586 = torch.constant.int 1 | |
%int1_587 = torch.constant.int 1 | |
%int0_588 = torch.constant.int 0 | |
%3813 = torch.prim.ListConstruct %int1_582, %int1_583 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3814 = torch.prim.ListConstruct %int1_584, %int1_585 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3815 = torch.prim.ListConstruct %int1_586, %int1_587 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3816 = torch.prim.ListConstruct %int0_588, %int0_588 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_589 = torch.constant.bool false | |
%int1_590 = torch.constant.int 1 | |
%3817 = torch.aten.convolution %3788, %3800, %3812, %3815, %3813, %3814, %false_589, %3816, %int1_590 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_591 = torch.constant.float 0.1015625 | |
%3818 = torch.aten.leaky_relu %3817, %float1.015630e-01_591 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3819 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3820 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_592 = torch.constant.int 12 | |
%3821 = torch.aten.item %3819 : !torch.vtensor<[],f32> -> !torch.float | |
%3822 = torch.aten.item %3820 : !torch.vtensor<[],si8> -> !torch.int | |
%3823 = torch.aten.quantize_per_tensor %3818, %3821, %3822, %int12_592 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3824 = torch.aten.int_repr %3823 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3825 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3827 = torch.aten.item %3825 : !torch.vtensor<[],f32> -> !torch.float | |
%3828 = torch.aten.item %3826 : !torch.vtensor<[],si8> -> !torch.int | |
%3829 = torch.aten._make_per_tensor_quantized_tensor %3824, %3827, %3828 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3830 = torch.aten.dequantize.self %3829 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3831 = torch.prim.ListConstruct %3676, %3718, %3774, %3830 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_593 = torch.constant.int 1 | |
%3832 = torch.aten.cat %3831, %int1_593 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%3833 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3834 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_594 = torch.constant.int 12 | |
%3835 = torch.aten.item %3833 : !torch.vtensor<[],f32> -> !torch.float | |
%3836 = torch.aten.item %3834 : !torch.vtensor<[],si8> -> !torch.int | |
%3837 = torch.aten.quantize_per_tensor %3832, %3835, %3836, %int12_594 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3838 = torch.aten.int_repr %3837 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%3839 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3840 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3841 = torch.aten.item %3839 : !torch.vtensor<[],f32> -> !torch.float | |
%3842 = torch.aten.item %3840 : !torch.vtensor<[],si8> -> !torch.int | |
%3843 = torch.aten._make_per_tensor_quantized_tensor %3838, %3841, %3842 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%3844 = torch.aten.dequantize.self %3843 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%3845 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3846 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_595 = torch.constant.int 12 | |
%3847 = torch.aten.item %3845 : !torch.vtensor<[],f32> -> !torch.float | |
%3848 = torch.aten.item %3846 : !torch.vtensor<[],si8> -> !torch.int | |
%3849 = torch.aten.quantize_per_tensor %68, %3847, %3848, %int12_595 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3850 = torch.aten.int_repr %3849 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%3851 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3852 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3853 = torch.aten.item %3851 : !torch.vtensor<[],f32> -> !torch.float | |
%3854 = torch.aten.item %3852 : !torch.vtensor<[],si8> -> !torch.int | |
%3855 = torch.aten._make_per_tensor_quantized_tensor %3850, %3853, %3854 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%3856 = torch.aten.dequantize.self %3855 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%3857 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3858 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_596 = torch.constant.int 12 | |
%3859 = torch.aten.item %3857 : !torch.vtensor<[],f32> -> !torch.float | |
%3860 = torch.aten.item %3858 : !torch.vtensor<[],si8> -> !torch.int | |
%3861 = torch.aten.quantize_per_tensor %69, %3859, %3860, %int12_596 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3862 = torch.aten.int_repr %3861 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3863 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3864 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3865 = torch.aten.item %3863 : !torch.vtensor<[],f32> -> !torch.float | |
%3866 = torch.aten.item %3864 : !torch.vtensor<[],si8> -> !torch.int | |
%3867 = torch.aten._make_per_tensor_quantized_tensor %3862, %3865, %3866 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3868 = torch.aten.dequantize.self %3867 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_597 = torch.constant.int 1 | |
%int1_598 = torch.constant.int 1 | |
%int1_599 = torch.constant.int 1 | |
%int1_600 = torch.constant.int 1 | |
%int1_601 = torch.constant.int 1 | |
%int1_602 = torch.constant.int 1 | |
%int0_603 = torch.constant.int 0 | |
%3869 = torch.prim.ListConstruct %int1_597, %int1_598 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3870 = torch.prim.ListConstruct %int1_599, %int1_600 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3871 = torch.prim.ListConstruct %int1_601, %int1_602 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3872 = torch.prim.ListConstruct %int0_603, %int0_603 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_604 = torch.constant.bool false | |
%int1_605 = torch.constant.int 1 | |
%3873 = torch.aten.convolution %3844, %3856, %3868, %3871, %3869, %3870, %false_604, %3872, %int1_605 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_606 = torch.constant.float 0.1015625 | |
%3874 = torch.aten.leaky_relu %3873, %float1.015630e-01_606 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3875 = torch.prim.ListConstruct %3676, %3718, %3774, %3830, %3874 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_607 = torch.constant.int 1 | |
%3876 = torch.aten.cat %3875, %int1_607 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%3877 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3878 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_608 = torch.constant.int 12 | |
%3879 = torch.aten.item %3877 : !torch.vtensor<[],f32> -> !torch.float | |
%3880 = torch.aten.item %3878 : !torch.vtensor<[],si8> -> !torch.int | |
%3881 = torch.aten.quantize_per_tensor %3876, %3879, %3880, %int12_608 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3882 = torch.aten.int_repr %3881 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%3883 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3884 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3885 = torch.aten.item %3883 : !torch.vtensor<[],f32> -> !torch.float | |
%3886 = torch.aten.item %3884 : !torch.vtensor<[],si8> -> !torch.int | |
%3887 = torch.aten._make_per_tensor_quantized_tensor %3882, %3885, %3886 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%3888 = torch.aten.dequantize.self %3887 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%3889 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3890 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_609 = torch.constant.int 12 | |
%3891 = torch.aten.item %3889 : !torch.vtensor<[],f32> -> !torch.float | |
%3892 = torch.aten.item %3890 : !torch.vtensor<[],si8> -> !torch.int | |
%3893 = torch.aten.quantize_per_tensor %70, %3891, %3892, %int12_609 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3894 = torch.aten.int_repr %3893 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%3895 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3896 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3897 = torch.aten.item %3895 : !torch.vtensor<[],f32> -> !torch.float | |
%3898 = torch.aten.item %3896 : !torch.vtensor<[],si8> -> !torch.int | |
%3899 = torch.aten._make_per_tensor_quantized_tensor %3894, %3897, %3898 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%3900 = torch.aten.dequantize.self %3899 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%3901 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3902 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_610 = torch.constant.int 12 | |
%3903 = torch.aten.item %3901 : !torch.vtensor<[],f32> -> !torch.float | |
%3904 = torch.aten.item %3902 : !torch.vtensor<[],si8> -> !torch.int | |
%3905 = torch.aten.quantize_per_tensor %71, %3903, %3904, %int12_610 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3906 = torch.aten.int_repr %3905 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%3907 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3908 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3909 = torch.aten.item %3907 : !torch.vtensor<[],f32> -> !torch.float | |
%3910 = torch.aten.item %3908 : !torch.vtensor<[],si8> -> !torch.int | |
%3911 = torch.aten._make_per_tensor_quantized_tensor %3906, %3909, %3910 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%3912 = torch.aten.dequantize.self %3911 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_611 = torch.constant.int 1 | |
%int1_612 = torch.constant.int 1 | |
%int1_613 = torch.constant.int 1 | |
%int1_614 = torch.constant.int 1 | |
%int1_615 = torch.constant.int 1 | |
%int1_616 = torch.constant.int 1 | |
%int0_617 = torch.constant.int 0 | |
%3913 = torch.prim.ListConstruct %int1_611, %int1_612 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3914 = torch.prim.ListConstruct %int1_613, %int1_614 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3915 = torch.prim.ListConstruct %int1_615, %int1_616 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3916 = torch.prim.ListConstruct %int0_617, %int0_617 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_618 = torch.constant.bool false | |
%int1_619 = torch.constant.int 1 | |
%3917 = torch.aten.convolution %3888, %3900, %3912, %3915, %3913, %3914, %false_618, %3916, %int1_619 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3918 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3919 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_620 = torch.constant.int 12 | |
%3920 = torch.aten.item %3918 : !torch.vtensor<[],f32> -> !torch.float | |
%3921 = torch.aten.item %3919 : !torch.vtensor<[],si8> -> !torch.int | |
%3922 = torch.aten.quantize_per_tensor %3917, %3920, %3921, %int12_620 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3923 = torch.aten.int_repr %3922 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3924 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3925 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3926 = torch.aten.item %3924 : !torch.vtensor<[],f32> -> !torch.float | |
%3927 = torch.aten.item %3925 : !torch.vtensor<[],si8> -> !torch.int | |
%3928 = torch.aten._make_per_tensor_quantized_tensor %3923, %3926, %3927 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3929 = torch.aten.dequantize.self %3928 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3930 = torch.aten.mul.Tensor %3929, %818 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%3931 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3932 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_621 = torch.constant.int 12 | |
%3933 = torch.aten.item %3931 : !torch.vtensor<[],f32> -> !torch.float | |
%3934 = torch.aten.item %3932 : !torch.vtensor<[],si8> -> !torch.int | |
%3935 = torch.aten.quantize_per_tensor %3930, %3933, %3934, %int12_621 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3936 = torch.aten.int_repr %3935 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3937 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3938 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3939 = torch.aten.item %3937 : !torch.vtensor<[],f32> -> !torch.float | |
%3940 = torch.aten.item %3938 : !torch.vtensor<[],si8> -> !torch.int | |
%3941 = torch.aten._make_per_tensor_quantized_tensor %3936, %3939, %3940 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3942 = torch.aten.dequantize.self %3941 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_622 = torch.constant.int 1 | |
%3943 = torch.aten.add.Tensor %3942, %3676, %int1_622 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%3944 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_623 = torch.constant.int 12 | |
%3946 = torch.aten.item %3944 : !torch.vtensor<[],f32> -> !torch.float | |
%3947 = torch.aten.item %3945 : !torch.vtensor<[],si8> -> !torch.int | |
%3948 = torch.aten.quantize_per_tensor %3943, %3946, %3947, %int12_623 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3949 = torch.aten.int_repr %3948 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%3950 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3952 = torch.aten.item %3950 : !torch.vtensor<[],f32> -> !torch.float | |
%3953 = torch.aten.item %3951 : !torch.vtensor<[],si8> -> !torch.int | |
%3954 = torch.aten._make_per_tensor_quantized_tensor %3949, %3952, %3953 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%3955 = torch.aten.dequantize.self %3954 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%3956 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_624 = torch.constant.int 12 | |
%3958 = torch.aten.item %3956 : !torch.vtensor<[],f32> -> !torch.float | |
%3959 = torch.aten.item %3957 : !torch.vtensor<[],si8> -> !torch.int | |
%3960 = torch.aten.quantize_per_tensor %72, %3958, %3959, %int12_624 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3961 = torch.aten.int_repr %3960 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%3962 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3963 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3964 = torch.aten.item %3962 : !torch.vtensor<[],f32> -> !torch.float | |
%3965 = torch.aten.item %3963 : !torch.vtensor<[],si8> -> !torch.int | |
%3966 = torch.aten._make_per_tensor_quantized_tensor %3961, %3964, %3965 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%3967 = torch.aten.dequantize.self %3966 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%3968 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3969 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_625 = torch.constant.int 12 | |
%3970 = torch.aten.item %3968 : !torch.vtensor<[],f32> -> !torch.float | |
%3971 = torch.aten.item %3969 : !torch.vtensor<[],si8> -> !torch.int | |
%3972 = torch.aten.quantize_per_tensor %73, %3970, %3971, %int12_625 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3973 = torch.aten.int_repr %3972 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%3974 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3975 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3976 = torch.aten.item %3974 : !torch.vtensor<[],f32> -> !torch.float | |
%3977 = torch.aten.item %3975 : !torch.vtensor<[],si8> -> !torch.int | |
%3978 = torch.aten._make_per_tensor_quantized_tensor %3973, %3976, %3977 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%3979 = torch.aten.dequantize.self %3978 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_626 = torch.constant.int 1 | |
%int1_627 = torch.constant.int 1 | |
%int1_628 = torch.constant.int 1 | |
%int1_629 = torch.constant.int 1 | |
%int1_630 = torch.constant.int 1 | |
%int1_631 = torch.constant.int 1 | |
%int0_632 = torch.constant.int 0 | |
%3980 = torch.prim.ListConstruct %int1_626, %int1_627 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3981 = torch.prim.ListConstruct %int1_628, %int1_629 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3982 = torch.prim.ListConstruct %int1_630, %int1_631 : (!torch.int, !torch.int) -> !torch.list<int> | |
%3983 = torch.prim.ListConstruct %int0_632, %int0_632 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_633 = torch.constant.bool false | |
%int1_634 = torch.constant.int 1 | |
%3984 = torch.aten.convolution %3955, %3967, %3979, %3982, %3980, %3981, %false_633, %3983, %int1_634 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_635 = torch.constant.float 0.1015625 | |
%3985 = torch.aten.leaky_relu %3984, %float1.015630e-01_635 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%3986 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3987 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_636 = torch.constant.int 12 | |
%3988 = torch.aten.item %3986 : !torch.vtensor<[],f32> -> !torch.float | |
%3989 = torch.aten.item %3987 : !torch.vtensor<[],si8> -> !torch.int | |
%3990 = torch.aten.quantize_per_tensor %3985, %3988, %3989, %int12_636 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3991 = torch.aten.int_repr %3990 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%3992 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%3993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%3994 = torch.aten.item %3992 : !torch.vtensor<[],f32> -> !torch.float | |
%3995 = torch.aten.item %3993 : !torch.vtensor<[],si8> -> !torch.int | |
%3996 = torch.aten._make_per_tensor_quantized_tensor %3991, %3994, %3995 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%3997 = torch.aten.dequantize.self %3996 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%3998 = torch.prim.ListConstruct %3955, %3997 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_637 = torch.constant.int 1 | |
%3999 = torch.aten.cat %3998, %int1_637 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%4000 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4001 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_638 = torch.constant.int 12 | |
%4002 = torch.aten.item %4000 : !torch.vtensor<[],f32> -> !torch.float | |
%4003 = torch.aten.item %4001 : !torch.vtensor<[],si8> -> !torch.int | |
%4004 = torch.aten.quantize_per_tensor %3999, %4002, %4003, %int12_638 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4005 = torch.aten.int_repr %4004 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%4006 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4007 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4008 = torch.aten.item %4006 : !torch.vtensor<[],f32> -> !torch.float | |
%4009 = torch.aten.item %4007 : !torch.vtensor<[],si8> -> !torch.int | |
%4010 = torch.aten._make_per_tensor_quantized_tensor %4005, %4008, %4009 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4011 = torch.aten.dequantize.self %4010 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%4012 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_639 = torch.constant.int 12 | |
%4014 = torch.aten.item %4012 : !torch.vtensor<[],f32> -> !torch.float | |
%4015 = torch.aten.item %4013 : !torch.vtensor<[],si8> -> !torch.int | |
%4016 = torch.aten.quantize_per_tensor %74, %4014, %4015, %int12_639 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4017 = torch.aten.int_repr %4016 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%4018 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4020 = torch.aten.item %4018 : !torch.vtensor<[],f32> -> !torch.float | |
%4021 = torch.aten.item %4019 : !torch.vtensor<[],si8> -> !torch.int | |
%4022 = torch.aten._make_per_tensor_quantized_tensor %4017, %4020, %4021 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4023 = torch.aten.dequantize.self %4022 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%4024 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_640 = torch.constant.int 12 | |
%4026 = torch.aten.item %4024 : !torch.vtensor<[],f32> -> !torch.float | |
%4027 = torch.aten.item %4025 : !torch.vtensor<[],si8> -> !torch.int | |
%4028 = torch.aten.quantize_per_tensor %75, %4026, %4027, %int12_640 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4029 = torch.aten.int_repr %4028 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4030 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4031 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4032 = torch.aten.item %4030 : !torch.vtensor<[],f32> -> !torch.float | |
%4033 = torch.aten.item %4031 : !torch.vtensor<[],si8> -> !torch.int | |
%4034 = torch.aten._make_per_tensor_quantized_tensor %4029, %4032, %4033 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4035 = torch.aten.dequantize.self %4034 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_641 = torch.constant.int 1 | |
%int1_642 = torch.constant.int 1 | |
%int1_643 = torch.constant.int 1 | |
%int1_644 = torch.constant.int 1 | |
%int1_645 = torch.constant.int 1 | |
%int1_646 = torch.constant.int 1 | |
%int0_647 = torch.constant.int 0 | |
%4036 = torch.prim.ListConstruct %int1_641, %int1_642 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4037 = torch.prim.ListConstruct %int1_643, %int1_644 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4038 = torch.prim.ListConstruct %int1_645, %int1_646 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4039 = torch.prim.ListConstruct %int0_647, %int0_647 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_648 = torch.constant.bool false | |
%int1_649 = torch.constant.int 1 | |
%4040 = torch.aten.convolution %4011, %4023, %4035, %4038, %4036, %4037, %false_648, %4039, %int1_649 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_650 = torch.constant.float 0.1015625 | |
%4041 = torch.aten.leaky_relu %4040, %float1.015630e-01_650 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4042 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4043 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_651 = torch.constant.int 12 | |
%4044 = torch.aten.item %4042 : !torch.vtensor<[],f32> -> !torch.float | |
%4045 = torch.aten.item %4043 : !torch.vtensor<[],si8> -> !torch.int | |
%4046 = torch.aten.quantize_per_tensor %4041, %4044, %4045, %int12_651 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4047 = torch.aten.int_repr %4046 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4048 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4050 = torch.aten.item %4048 : !torch.vtensor<[],f32> -> !torch.float | |
%4051 = torch.aten.item %4049 : !torch.vtensor<[],si8> -> !torch.int | |
%4052 = torch.aten._make_per_tensor_quantized_tensor %4047, %4050, %4051 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4053 = torch.aten.dequantize.self %4052 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4054 = torch.prim.ListConstruct %3955, %3997, %4053 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_652 = torch.constant.int 1 | |
%4055 = torch.aten.cat %4054, %int1_652 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%4056 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4057 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_653 = torch.constant.int 12 | |
%4058 = torch.aten.item %4056 : !torch.vtensor<[],f32> -> !torch.float | |
%4059 = torch.aten.item %4057 : !torch.vtensor<[],si8> -> !torch.int | |
%4060 = torch.aten.quantize_per_tensor %4055, %4058, %4059, %int12_653 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4061 = torch.aten.int_repr %4060 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%4062 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4063 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4064 = torch.aten.item %4062 : !torch.vtensor<[],f32> -> !torch.float | |
%4065 = torch.aten.item %4063 : !torch.vtensor<[],si8> -> !torch.int | |
%4066 = torch.aten._make_per_tensor_quantized_tensor %4061, %4064, %4065 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4067 = torch.aten.dequantize.self %4066 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%4068 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_654 = torch.constant.int 12 | |
%4070 = torch.aten.item %4068 : !torch.vtensor<[],f32> -> !torch.float | |
%4071 = torch.aten.item %4069 : !torch.vtensor<[],si8> -> !torch.int | |
%4072 = torch.aten.quantize_per_tensor %76, %4070, %4071, %int12_654 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4073 = torch.aten.int_repr %4072 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%4074 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4076 = torch.aten.item %4074 : !torch.vtensor<[],f32> -> !torch.float | |
%4077 = torch.aten.item %4075 : !torch.vtensor<[],si8> -> !torch.int | |
%4078 = torch.aten._make_per_tensor_quantized_tensor %4073, %4076, %4077 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4079 = torch.aten.dequantize.self %4078 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%4080 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_655 = torch.constant.int 12 | |
%4082 = torch.aten.item %4080 : !torch.vtensor<[],f32> -> !torch.float | |
%4083 = torch.aten.item %4081 : !torch.vtensor<[],si8> -> !torch.int | |
%4084 = torch.aten.quantize_per_tensor %77, %4082, %4083, %int12_655 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4085 = torch.aten.int_repr %4084 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4086 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4088 = torch.aten.item %4086 : !torch.vtensor<[],f32> -> !torch.float | |
%4089 = torch.aten.item %4087 : !torch.vtensor<[],si8> -> !torch.int | |
%4090 = torch.aten._make_per_tensor_quantized_tensor %4085, %4088, %4089 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4091 = torch.aten.dequantize.self %4090 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_656 = torch.constant.int 1 | |
%int1_657 = torch.constant.int 1 | |
%int1_658 = torch.constant.int 1 | |
%int1_659 = torch.constant.int 1 | |
%int1_660 = torch.constant.int 1 | |
%int1_661 = torch.constant.int 1 | |
%int0_662 = torch.constant.int 0 | |
%4092 = torch.prim.ListConstruct %int1_656, %int1_657 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4093 = torch.prim.ListConstruct %int1_658, %int1_659 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4094 = torch.prim.ListConstruct %int1_660, %int1_661 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4095 = torch.prim.ListConstruct %int0_662, %int0_662 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_663 = torch.constant.bool false | |
%int1_664 = torch.constant.int 1 | |
%4096 = torch.aten.convolution %4067, %4079, %4091, %4094, %4092, %4093, %false_663, %4095, %int1_664 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_665 = torch.constant.float 0.1015625 | |
%4097 = torch.aten.leaky_relu %4096, %float1.015630e-01_665 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4098 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4099 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_666 = torch.constant.int 12 | |
%4100 = torch.aten.item %4098 : !torch.vtensor<[],f32> -> !torch.float | |
%4101 = torch.aten.item %4099 : !torch.vtensor<[],si8> -> !torch.int | |
%4102 = torch.aten.quantize_per_tensor %4097, %4100, %4101, %int12_666 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4103 = torch.aten.int_repr %4102 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4104 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4105 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4106 = torch.aten.item %4104 : !torch.vtensor<[],f32> -> !torch.float | |
%4107 = torch.aten.item %4105 : !torch.vtensor<[],si8> -> !torch.int | |
%4108 = torch.aten._make_per_tensor_quantized_tensor %4103, %4106, %4107 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4109 = torch.aten.dequantize.self %4108 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4110 = torch.prim.ListConstruct %3955, %3997, %4053, %4109 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_667 = torch.constant.int 1 | |
%4111 = torch.aten.cat %4110, %int1_667 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%4112 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4113 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_668 = torch.constant.int 12 | |
%4114 = torch.aten.item %4112 : !torch.vtensor<[],f32> -> !torch.float | |
%4115 = torch.aten.item %4113 : !torch.vtensor<[],si8> -> !torch.int | |
%4116 = torch.aten.quantize_per_tensor %4111, %4114, %4115, %int12_668 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4117 = torch.aten.int_repr %4116 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%4118 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4119 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4120 = torch.aten.item %4118 : !torch.vtensor<[],f32> -> !torch.float | |
%4121 = torch.aten.item %4119 : !torch.vtensor<[],si8> -> !torch.int | |
%4122 = torch.aten._make_per_tensor_quantized_tensor %4117, %4120, %4121 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4123 = torch.aten.dequantize.self %4122 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%4124 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4125 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_669 = torch.constant.int 12 | |
%4126 = torch.aten.item %4124 : !torch.vtensor<[],f32> -> !torch.float | |
%4127 = torch.aten.item %4125 : !torch.vtensor<[],si8> -> !torch.int | |
%4128 = torch.aten.quantize_per_tensor %78, %4126, %4127, %int12_669 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4129 = torch.aten.int_repr %4128 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%4130 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4131 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4132 = torch.aten.item %4130 : !torch.vtensor<[],f32> -> !torch.float | |
%4133 = torch.aten.item %4131 : !torch.vtensor<[],si8> -> !torch.int | |
%4134 = torch.aten._make_per_tensor_quantized_tensor %4129, %4132, %4133 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4135 = torch.aten.dequantize.self %4134 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%4136 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4137 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_670 = torch.constant.int 12 | |
%4138 = torch.aten.item %4136 : !torch.vtensor<[],f32> -> !torch.float | |
%4139 = torch.aten.item %4137 : !torch.vtensor<[],si8> -> !torch.int | |
%4140 = torch.aten.quantize_per_tensor %79, %4138, %4139, %int12_670 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4141 = torch.aten.int_repr %4140 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4142 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4143 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4144 = torch.aten.item %4142 : !torch.vtensor<[],f32> -> !torch.float | |
%4145 = torch.aten.item %4143 : !torch.vtensor<[],si8> -> !torch.int | |
%4146 = torch.aten._make_per_tensor_quantized_tensor %4141, %4144, %4145 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4147 = torch.aten.dequantize.self %4146 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_671 = torch.constant.int 1 | |
%int1_672 = torch.constant.int 1 | |
%int1_673 = torch.constant.int 1 | |
%int1_674 = torch.constant.int 1 | |
%int1_675 = torch.constant.int 1 | |
%int1_676 = torch.constant.int 1 | |
%int0_677 = torch.constant.int 0 | |
%4148 = torch.prim.ListConstruct %int1_671, %int1_672 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4149 = torch.prim.ListConstruct %int1_673, %int1_674 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4150 = torch.prim.ListConstruct %int1_675, %int1_676 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4151 = torch.prim.ListConstruct %int0_677, %int0_677 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_678 = torch.constant.bool false | |
%int1_679 = torch.constant.int 1 | |
%4152 = torch.aten.convolution %4123, %4135, %4147, %4150, %4148, %4149, %false_678, %4151, %int1_679 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_680 = torch.constant.float 0.1015625 | |
%4153 = torch.aten.leaky_relu %4152, %float1.015630e-01_680 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4154 = torch.prim.ListConstruct %3955, %3997, %4053, %4109, %4153 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_681 = torch.constant.int 1 | |
%4155 = torch.aten.cat %4154, %int1_681 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%4156 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4157 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_682 = torch.constant.int 12 | |
%4158 = torch.aten.item %4156 : !torch.vtensor<[],f32> -> !torch.float | |
%4159 = torch.aten.item %4157 : !torch.vtensor<[],si8> -> !torch.int | |
%4160 = torch.aten.quantize_per_tensor %4155, %4158, %4159, %int12_682 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4161 = torch.aten.int_repr %4160 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%4162 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4163 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4164 = torch.aten.item %4162 : !torch.vtensor<[],f32> -> !torch.float | |
%4165 = torch.aten.item %4163 : !torch.vtensor<[],si8> -> !torch.int | |
%4166 = torch.aten._make_per_tensor_quantized_tensor %4161, %4164, %4165 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4167 = torch.aten.dequantize.self %4166 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%4168 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4169 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_683 = torch.constant.int 12 | |
%4170 = torch.aten.item %4168 : !torch.vtensor<[],f32> -> !torch.float | |
%4171 = torch.aten.item %4169 : !torch.vtensor<[],si8> -> !torch.int | |
%4172 = torch.aten.quantize_per_tensor %80, %4170, %4171, %int12_683 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4173 = torch.aten.int_repr %4172 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%4174 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4175 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4176 = torch.aten.item %4174 : !torch.vtensor<[],f32> -> !torch.float | |
%4177 = torch.aten.item %4175 : !torch.vtensor<[],si8> -> !torch.int | |
%4178 = torch.aten._make_per_tensor_quantized_tensor %4173, %4176, %4177 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4179 = torch.aten.dequantize.self %4178 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%4180 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4181 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_684 = torch.constant.int 12 | |
%4182 = torch.aten.item %4180 : !torch.vtensor<[],f32> -> !torch.float | |
%4183 = torch.aten.item %4181 : !torch.vtensor<[],si8> -> !torch.int | |
%4184 = torch.aten.quantize_per_tensor %81, %4182, %4183, %int12_684 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4185 = torch.aten.int_repr %4184 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%4186 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4187 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4188 = torch.aten.item %4186 : !torch.vtensor<[],f32> -> !torch.float | |
%4189 = torch.aten.item %4187 : !torch.vtensor<[],si8> -> !torch.int | |
%4190 = torch.aten._make_per_tensor_quantized_tensor %4185, %4188, %4189 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4191 = torch.aten.dequantize.self %4190 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_685 = torch.constant.int 1 | |
%int1_686 = torch.constant.int 1 | |
%int1_687 = torch.constant.int 1 | |
%int1_688 = torch.constant.int 1 | |
%int1_689 = torch.constant.int 1 | |
%int1_690 = torch.constant.int 1 | |
%int0_691 = torch.constant.int 0 | |
%4192 = torch.prim.ListConstruct %int1_685, %int1_686 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4193 = torch.prim.ListConstruct %int1_687, %int1_688 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4194 = torch.prim.ListConstruct %int1_689, %int1_690 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4195 = torch.prim.ListConstruct %int0_691, %int0_691 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_692 = torch.constant.bool false | |
%int1_693 = torch.constant.int 1 | |
%4196 = torch.aten.convolution %4167, %4179, %4191, %4194, %4192, %4193, %false_692, %4195, %int1_693 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4197 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4198 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_694 = torch.constant.int 12 | |
%4199 = torch.aten.item %4197 : !torch.vtensor<[],f32> -> !torch.float | |
%4200 = torch.aten.item %4198 : !torch.vtensor<[],si8> -> !torch.int | |
%4201 = torch.aten.quantize_per_tensor %4196, %4199, %4200, %int12_694 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4202 = torch.aten.int_repr %4201 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4203 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4204 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4205 = torch.aten.item %4203 : !torch.vtensor<[],f32> -> !torch.float | |
%4206 = torch.aten.item %4204 : !torch.vtensor<[],si8> -> !torch.int | |
%4207 = torch.aten._make_per_tensor_quantized_tensor %4202, %4205, %4206 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4208 = torch.aten.dequantize.self %4207 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4209 = torch.aten.mul.Tensor %4208, %831 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%4210 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_695 = torch.constant.int 12 | |
%4212 = torch.aten.item %4210 : !torch.vtensor<[],f32> -> !torch.float | |
%4213 = torch.aten.item %4211 : !torch.vtensor<[],si8> -> !torch.int | |
%4214 = torch.aten.quantize_per_tensor %4209, %4212, %4213, %int12_695 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4215 = torch.aten.int_repr %4214 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4216 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4218 = torch.aten.item %4216 : !torch.vtensor<[],f32> -> !torch.float | |
%4219 = torch.aten.item %4217 : !torch.vtensor<[],si8> -> !torch.int | |
%4220 = torch.aten._make_per_tensor_quantized_tensor %4215, %4218, %4219 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4221 = torch.aten.dequantize.self %4220 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_696 = torch.constant.int 1 | |
%4222 = torch.aten.add.Tensor %4221, %3955, %int1_696 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4223 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4224 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_697 = torch.constant.int 12 | |
%4225 = torch.aten.item %4223 : !torch.vtensor<[],f32> -> !torch.float | |
%4226 = torch.aten.item %4224 : !torch.vtensor<[],si8> -> !torch.int | |
%4227 = torch.aten.quantize_per_tensor %4222, %4225, %4226, %int12_697 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4228 = torch.aten.int_repr %4227 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4229 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4230 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4231 = torch.aten.item %4229 : !torch.vtensor<[],f32> -> !torch.float | |
%4232 = torch.aten.item %4230 : !torch.vtensor<[],si8> -> !torch.int | |
%4233 = torch.aten._make_per_tensor_quantized_tensor %4228, %4231, %4232 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4234 = torch.aten.dequantize.self %4233 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4235 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4236 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_698 = torch.constant.int 12 | |
%4237 = torch.aten.item %4235 : !torch.vtensor<[],f32> -> !torch.float | |
%4238 = torch.aten.item %4236 : !torch.vtensor<[],si8> -> !torch.int | |
%4239 = torch.aten.quantize_per_tensor %82, %4237, %4238, %int12_698 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4240 = torch.aten.int_repr %4239 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%4241 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4243 = torch.aten.item %4241 : !torch.vtensor<[],f32> -> !torch.float | |
%4244 = torch.aten.item %4242 : !torch.vtensor<[],si8> -> !torch.int | |
%4245 = torch.aten._make_per_tensor_quantized_tensor %4240, %4243, %4244 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4246 = torch.aten.dequantize.self %4245 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%4247 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4248 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_699 = torch.constant.int 12 | |
%4249 = torch.aten.item %4247 : !torch.vtensor<[],f32> -> !torch.float | |
%4250 = torch.aten.item %4248 : !torch.vtensor<[],si8> -> !torch.int | |
%4251 = torch.aten.quantize_per_tensor %83, %4249, %4250, %int12_699 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4252 = torch.aten.int_repr %4251 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4253 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4254 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4255 = torch.aten.item %4253 : !torch.vtensor<[],f32> -> !torch.float | |
%4256 = torch.aten.item %4254 : !torch.vtensor<[],si8> -> !torch.int | |
%4257 = torch.aten._make_per_tensor_quantized_tensor %4252, %4255, %4256 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4258 = torch.aten.dequantize.self %4257 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_700 = torch.constant.int 1 | |
%int1_701 = torch.constant.int 1 | |
%int1_702 = torch.constant.int 1 | |
%int1_703 = torch.constant.int 1 | |
%int1_704 = torch.constant.int 1 | |
%int1_705 = torch.constant.int 1 | |
%int0_706 = torch.constant.int 0 | |
%4259 = torch.prim.ListConstruct %int1_700, %int1_701 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4260 = torch.prim.ListConstruct %int1_702, %int1_703 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4261 = torch.prim.ListConstruct %int1_704, %int1_705 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4262 = torch.prim.ListConstruct %int0_706, %int0_706 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_707 = torch.constant.bool false | |
%int1_708 = torch.constant.int 1 | |
%4263 = torch.aten.convolution %4234, %4246, %4258, %4261, %4259, %4260, %false_707, %4262, %int1_708 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_709 = torch.constant.float 0.1015625 | |
%4264 = torch.aten.leaky_relu %4263, %float1.015630e-01_709 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4265 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4266 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_710 = torch.constant.int 12 | |
%4267 = torch.aten.item %4265 : !torch.vtensor<[],f32> -> !torch.float | |
%4268 = torch.aten.item %4266 : !torch.vtensor<[],si8> -> !torch.int | |
%4269 = torch.aten.quantize_per_tensor %4264, %4267, %4268, %int12_710 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4270 = torch.aten.int_repr %4269 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4271 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4272 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4273 = torch.aten.item %4271 : !torch.vtensor<[],f32> -> !torch.float | |
%4274 = torch.aten.item %4272 : !torch.vtensor<[],si8> -> !torch.int | |
%4275 = torch.aten._make_per_tensor_quantized_tensor %4270, %4273, %4274 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4276 = torch.aten.dequantize.self %4275 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4277 = torch.prim.ListConstruct %4234, %4276 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_711 = torch.constant.int 1 | |
%4278 = torch.aten.cat %4277, %int1_711 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%4279 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4280 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_712 = torch.constant.int 12 | |
%4281 = torch.aten.item %4279 : !torch.vtensor<[],f32> -> !torch.float | |
%4282 = torch.aten.item %4280 : !torch.vtensor<[],si8> -> !torch.int | |
%4283 = torch.aten.quantize_per_tensor %4278, %4281, %4282, %int12_712 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4284 = torch.aten.int_repr %4283 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%4285 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4286 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4287 = torch.aten.item %4285 : !torch.vtensor<[],f32> -> !torch.float | |
%4288 = torch.aten.item %4286 : !torch.vtensor<[],si8> -> !torch.int | |
%4289 = torch.aten._make_per_tensor_quantized_tensor %4284, %4287, %4288 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4290 = torch.aten.dequantize.self %4289 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%4291 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4292 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_713 = torch.constant.int 12 | |
%4293 = torch.aten.item %4291 : !torch.vtensor<[],f32> -> !torch.float | |
%4294 = torch.aten.item %4292 : !torch.vtensor<[],si8> -> !torch.int | |
%4295 = torch.aten.quantize_per_tensor %84, %4293, %4294, %int12_713 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4296 = torch.aten.int_repr %4295 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%4297 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4298 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4299 = torch.aten.item %4297 : !torch.vtensor<[],f32> -> !torch.float | |
%4300 = torch.aten.item %4298 : !torch.vtensor<[],si8> -> !torch.int | |
%4301 = torch.aten._make_per_tensor_quantized_tensor %4296, %4299, %4300 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4302 = torch.aten.dequantize.self %4301 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%4303 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_714 = torch.constant.int 12 | |
%4305 = torch.aten.item %4303 : !torch.vtensor<[],f32> -> !torch.float | |
%4306 = torch.aten.item %4304 : !torch.vtensor<[],si8> -> !torch.int | |
%4307 = torch.aten.quantize_per_tensor %85, %4305, %4306, %int12_714 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4308 = torch.aten.int_repr %4307 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4309 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4311 = torch.aten.item %4309 : !torch.vtensor<[],f32> -> !torch.float | |
%4312 = torch.aten.item %4310 : !torch.vtensor<[],si8> -> !torch.int | |
%4313 = torch.aten._make_per_tensor_quantized_tensor %4308, %4311, %4312 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4314 = torch.aten.dequantize.self %4313 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_715 = torch.constant.int 1 | |
%int1_716 = torch.constant.int 1 | |
%int1_717 = torch.constant.int 1 | |
%int1_718 = torch.constant.int 1 | |
%int1_719 = torch.constant.int 1 | |
%int1_720 = torch.constant.int 1 | |
%int0_721 = torch.constant.int 0 | |
%4315 = torch.prim.ListConstruct %int1_715, %int1_716 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4316 = torch.prim.ListConstruct %int1_717, %int1_718 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4317 = torch.prim.ListConstruct %int1_719, %int1_720 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4318 = torch.prim.ListConstruct %int0_721, %int0_721 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_722 = torch.constant.bool false | |
%int1_723 = torch.constant.int 1 | |
%4319 = torch.aten.convolution %4290, %4302, %4314, %4317, %4315, %4316, %false_722, %4318, %int1_723 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_724 = torch.constant.float 0.1015625 | |
%4320 = torch.aten.leaky_relu %4319, %float1.015630e-01_724 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4321 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4322 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_725 = torch.constant.int 12 | |
%4323 = torch.aten.item %4321 : !torch.vtensor<[],f32> -> !torch.float | |
%4324 = torch.aten.item %4322 : !torch.vtensor<[],si8> -> !torch.int | |
%4325 = torch.aten.quantize_per_tensor %4320, %4323, %4324, %int12_725 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4326 = torch.aten.int_repr %4325 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4327 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4329 = torch.aten.item %4327 : !torch.vtensor<[],f32> -> !torch.float | |
%4330 = torch.aten.item %4328 : !torch.vtensor<[],si8> -> !torch.int | |
%4331 = torch.aten._make_per_tensor_quantized_tensor %4326, %4329, %4330 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4332 = torch.aten.dequantize.self %4331 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4333 = torch.prim.ListConstruct %4234, %4276, %4332 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_726 = torch.constant.int 1 | |
%4334 = torch.aten.cat %4333, %int1_726 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%4335 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4336 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_727 = torch.constant.int 12 | |
%4337 = torch.aten.item %4335 : !torch.vtensor<[],f32> -> !torch.float | |
%4338 = torch.aten.item %4336 : !torch.vtensor<[],si8> -> !torch.int | |
%4339 = torch.aten.quantize_per_tensor %4334, %4337, %4338, %int12_727 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4340 = torch.aten.int_repr %4339 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%4341 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4342 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4343 = torch.aten.item %4341 : !torch.vtensor<[],f32> -> !torch.float | |
%4344 = torch.aten.item %4342 : !torch.vtensor<[],si8> -> !torch.int | |
%4345 = torch.aten._make_per_tensor_quantized_tensor %4340, %4343, %4344 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4346 = torch.aten.dequantize.self %4345 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%4347 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_728 = torch.constant.int 12 | |
%4349 = torch.aten.item %4347 : !torch.vtensor<[],f32> -> !torch.float | |
%4350 = torch.aten.item %4348 : !torch.vtensor<[],si8> -> !torch.int | |
%4351 = torch.aten.quantize_per_tensor %86, %4349, %4350, %int12_728 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4352 = torch.aten.int_repr %4351 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%4353 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4355 = torch.aten.item %4353 : !torch.vtensor<[],f32> -> !torch.float | |
%4356 = torch.aten.item %4354 : !torch.vtensor<[],si8> -> !torch.int | |
%4357 = torch.aten._make_per_tensor_quantized_tensor %4352, %4355, %4356 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4358 = torch.aten.dequantize.self %4357 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%4359 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_729 = torch.constant.int 12 | |
%4361 = torch.aten.item %4359 : !torch.vtensor<[],f32> -> !torch.float | |
%4362 = torch.aten.item %4360 : !torch.vtensor<[],si8> -> !torch.int | |
%4363 = torch.aten.quantize_per_tensor %87, %4361, %4362, %int12_729 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4364 = torch.aten.int_repr %4363 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4365 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4367 = torch.aten.item %4365 : !torch.vtensor<[],f32> -> !torch.float | |
%4368 = torch.aten.item %4366 : !torch.vtensor<[],si8> -> !torch.int | |
%4369 = torch.aten._make_per_tensor_quantized_tensor %4364, %4367, %4368 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4370 = torch.aten.dequantize.self %4369 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_730 = torch.constant.int 1 | |
%int1_731 = torch.constant.int 1 | |
%int1_732 = torch.constant.int 1 | |
%int1_733 = torch.constant.int 1 | |
%int1_734 = torch.constant.int 1 | |
%int1_735 = torch.constant.int 1 | |
%int0_736 = torch.constant.int 0 | |
%4371 = torch.prim.ListConstruct %int1_730, %int1_731 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4372 = torch.prim.ListConstruct %int1_732, %int1_733 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4373 = torch.prim.ListConstruct %int1_734, %int1_735 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4374 = torch.prim.ListConstruct %int0_736, %int0_736 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_737 = torch.constant.bool false | |
%int1_738 = torch.constant.int 1 | |
%4375 = torch.aten.convolution %4346, %4358, %4370, %4373, %4371, %4372, %false_737, %4374, %int1_738 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_739 = torch.constant.float 0.1015625 | |
%4376 = torch.aten.leaky_relu %4375, %float1.015630e-01_739 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4377 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4378 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_740 = torch.constant.int 12 | |
%4379 = torch.aten.item %4377 : !torch.vtensor<[],f32> -> !torch.float | |
%4380 = torch.aten.item %4378 : !torch.vtensor<[],si8> -> !torch.int | |
%4381 = torch.aten.quantize_per_tensor %4376, %4379, %4380, %int12_740 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4382 = torch.aten.int_repr %4381 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4383 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4384 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4385 = torch.aten.item %4383 : !torch.vtensor<[],f32> -> !torch.float | |
%4386 = torch.aten.item %4384 : !torch.vtensor<[],si8> -> !torch.int | |
%4387 = torch.aten._make_per_tensor_quantized_tensor %4382, %4385, %4386 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4388 = torch.aten.dequantize.self %4387 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4389 = torch.prim.ListConstruct %4234, %4276, %4332, %4388 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_741 = torch.constant.int 1 | |
%4390 = torch.aten.cat %4389, %int1_741 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%4391 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4392 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_742 = torch.constant.int 12 | |
%4393 = torch.aten.item %4391 : !torch.vtensor<[],f32> -> !torch.float | |
%4394 = torch.aten.item %4392 : !torch.vtensor<[],si8> -> !torch.int | |
%4395 = torch.aten.quantize_per_tensor %4390, %4393, %4394, %int12_742 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4396 = torch.aten.int_repr %4395 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%4397 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4398 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4399 = torch.aten.item %4397 : !torch.vtensor<[],f32> -> !torch.float | |
%4400 = torch.aten.item %4398 : !torch.vtensor<[],si8> -> !torch.int | |
%4401 = torch.aten._make_per_tensor_quantized_tensor %4396, %4399, %4400 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4402 = torch.aten.dequantize.self %4401 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%4403 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4404 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_743 = torch.constant.int 12 | |
%4405 = torch.aten.item %4403 : !torch.vtensor<[],f32> -> !torch.float | |
%4406 = torch.aten.item %4404 : !torch.vtensor<[],si8> -> !torch.int | |
%4407 = torch.aten.quantize_per_tensor %88, %4405, %4406, %int12_743 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4408 = torch.aten.int_repr %4407 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%4409 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4410 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4411 = torch.aten.item %4409 : !torch.vtensor<[],f32> -> !torch.float | |
%4412 = torch.aten.item %4410 : !torch.vtensor<[],si8> -> !torch.int | |
%4413 = torch.aten._make_per_tensor_quantized_tensor %4408, %4411, %4412 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4414 = torch.aten.dequantize.self %4413 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%4415 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4416 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_744 = torch.constant.int 12 | |
%4417 = torch.aten.item %4415 : !torch.vtensor<[],f32> -> !torch.float | |
%4418 = torch.aten.item %4416 : !torch.vtensor<[],si8> -> !torch.int | |
%4419 = torch.aten.quantize_per_tensor %89, %4417, %4418, %int12_744 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4420 = torch.aten.int_repr %4419 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4421 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4422 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4423 = torch.aten.item %4421 : !torch.vtensor<[],f32> -> !torch.float | |
%4424 = torch.aten.item %4422 : !torch.vtensor<[],si8> -> !torch.int | |
%4425 = torch.aten._make_per_tensor_quantized_tensor %4420, %4423, %4424 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4426 = torch.aten.dequantize.self %4425 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_745 = torch.constant.int 1 | |
%int1_746 = torch.constant.int 1 | |
%int1_747 = torch.constant.int 1 | |
%int1_748 = torch.constant.int 1 | |
%int1_749 = torch.constant.int 1 | |
%int1_750 = torch.constant.int 1 | |
%int0_751 = torch.constant.int 0 | |
%4427 = torch.prim.ListConstruct %int1_745, %int1_746 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4428 = torch.prim.ListConstruct %int1_747, %int1_748 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4429 = torch.prim.ListConstruct %int1_749, %int1_750 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4430 = torch.prim.ListConstruct %int0_751, %int0_751 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_752 = torch.constant.bool false | |
%int1_753 = torch.constant.int 1 | |
%4431 = torch.aten.convolution %4402, %4414, %4426, %4429, %4427, %4428, %false_752, %4430, %int1_753 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_754 = torch.constant.float 0.1015625 | |
%4432 = torch.aten.leaky_relu %4431, %float1.015630e-01_754 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4433 = torch.prim.ListConstruct %4234, %4276, %4332, %4388, %4432 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_755 = torch.constant.int 1 | |
%4434 = torch.aten.cat %4433, %int1_755 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%4435 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4436 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_756 = torch.constant.int 12 | |
%4437 = torch.aten.item %4435 : !torch.vtensor<[],f32> -> !torch.float | |
%4438 = torch.aten.item %4436 : !torch.vtensor<[],si8> -> !torch.int | |
%4439 = torch.aten.quantize_per_tensor %4434, %4437, %4438, %int12_756 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4440 = torch.aten.int_repr %4439 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%4441 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4442 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4443 = torch.aten.item %4441 : !torch.vtensor<[],f32> -> !torch.float | |
%4444 = torch.aten.item %4442 : !torch.vtensor<[],si8> -> !torch.int | |
%4445 = torch.aten._make_per_tensor_quantized_tensor %4440, %4443, %4444 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4446 = torch.aten.dequantize.self %4445 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%4447 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4448 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_757 = torch.constant.int 12 | |
%4449 = torch.aten.item %4447 : !torch.vtensor<[],f32> -> !torch.float | |
%4450 = torch.aten.item %4448 : !torch.vtensor<[],si8> -> !torch.int | |
%4451 = torch.aten.quantize_per_tensor %90, %4449, %4450, %int12_757 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4452 = torch.aten.int_repr %4451 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%4453 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4454 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4455 = torch.aten.item %4453 : !torch.vtensor<[],f32> -> !torch.float | |
%4456 = torch.aten.item %4454 : !torch.vtensor<[],si8> -> !torch.int | |
%4457 = torch.aten._make_per_tensor_quantized_tensor %4452, %4455, %4456 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4458 = torch.aten.dequantize.self %4457 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%4459 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4460 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_758 = torch.constant.int 12 | |
%4461 = torch.aten.item %4459 : !torch.vtensor<[],f32> -> !torch.float | |
%4462 = torch.aten.item %4460 : !torch.vtensor<[],si8> -> !torch.int | |
%4463 = torch.aten.quantize_per_tensor %91, %4461, %4462, %int12_758 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4464 = torch.aten.int_repr %4463 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%4465 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4466 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4467 = torch.aten.item %4465 : !torch.vtensor<[],f32> -> !torch.float | |
%4468 = torch.aten.item %4466 : !torch.vtensor<[],si8> -> !torch.int | |
%4469 = torch.aten._make_per_tensor_quantized_tensor %4464, %4467, %4468 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4470 = torch.aten.dequantize.self %4469 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_759 = torch.constant.int 1 | |
%int1_760 = torch.constant.int 1 | |
%int1_761 = torch.constant.int 1 | |
%int1_762 = torch.constant.int 1 | |
%int1_763 = torch.constant.int 1 | |
%int1_764 = torch.constant.int 1 | |
%int0_765 = torch.constant.int 0 | |
%4471 = torch.prim.ListConstruct %int1_759, %int1_760 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4472 = torch.prim.ListConstruct %int1_761, %int1_762 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4473 = torch.prim.ListConstruct %int1_763, %int1_764 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4474 = torch.prim.ListConstruct %int0_765, %int0_765 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_766 = torch.constant.bool false | |
%int1_767 = torch.constant.int 1 | |
%4475 = torch.aten.convolution %4446, %4458, %4470, %4473, %4471, %4472, %false_766, %4474, %int1_767 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4476 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_768 = torch.constant.int 12 | |
%4478 = torch.aten.item %4476 : !torch.vtensor<[],f32> -> !torch.float | |
%4479 = torch.aten.item %4477 : !torch.vtensor<[],si8> -> !torch.int | |
%4480 = torch.aten.quantize_per_tensor %4475, %4478, %4479, %int12_768 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4481 = torch.aten.int_repr %4480 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4482 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4483 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4484 = torch.aten.item %4482 : !torch.vtensor<[],f32> -> !torch.float | |
%4485 = torch.aten.item %4483 : !torch.vtensor<[],si8> -> !torch.int | |
%4486 = torch.aten._make_per_tensor_quantized_tensor %4481, %4484, %4485 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4487 = torch.aten.dequantize.self %4486 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4488 = torch.aten.mul.Tensor %4487, %844 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%4489 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4490 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_769 = torch.constant.int 12 | |
%4491 = torch.aten.item %4489 : !torch.vtensor<[],f32> -> !torch.float | |
%4492 = torch.aten.item %4490 : !torch.vtensor<[],si8> -> !torch.int | |
%4493 = torch.aten.quantize_per_tensor %4488, %4491, %4492, %int12_769 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4494 = torch.aten.int_repr %4493 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4495 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4496 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4497 = torch.aten.item %4495 : !torch.vtensor<[],f32> -> !torch.float | |
%4498 = torch.aten.item %4496 : !torch.vtensor<[],si8> -> !torch.int | |
%4499 = torch.aten._make_per_tensor_quantized_tensor %4494, %4497, %4498 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4500 = torch.aten.dequantize.self %4499 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_770 = torch.constant.int 1 | |
%4501 = torch.aten.add.Tensor %4500, %4234, %int1_770 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4502 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4503 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_771 = torch.constant.int 12 | |
%4504 = torch.aten.item %4502 : !torch.vtensor<[],f32> -> !torch.float | |
%4505 = torch.aten.item %4503 : !torch.vtensor<[],si8> -> !torch.int | |
%4506 = torch.aten.quantize_per_tensor %4501, %4504, %4505, %int12_771 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4507 = torch.aten.int_repr %4506 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4508 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4509 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4510 = torch.aten.item %4508 : !torch.vtensor<[],f32> -> !torch.float | |
%4511 = torch.aten.item %4509 : !torch.vtensor<[],si8> -> !torch.int | |
%4512 = torch.aten._make_per_tensor_quantized_tensor %4507, %4510, %4511 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4513 = torch.aten.dequantize.self %4512 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4514 = torch.aten.mul.Tensor %4513, %857 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%4515 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_772 = torch.constant.int 12 | |
%4517 = torch.aten.item %4515 : !torch.vtensor<[],f32> -> !torch.float | |
%4518 = torch.aten.item %4516 : !torch.vtensor<[],si8> -> !torch.int | |
%4519 = torch.aten.quantize_per_tensor %4514, %4517, %4518, %int12_772 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4520 = torch.aten.int_repr %4519 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4521 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4522 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4523 = torch.aten.item %4521 : !torch.vtensor<[],f32> -> !torch.float | |
%4524 = torch.aten.item %4522 : !torch.vtensor<[],si8> -> !torch.int | |
%4525 = torch.aten._make_per_tensor_quantized_tensor %4520, %4523, %4524 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4526 = torch.aten.dequantize.self %4525 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_773 = torch.constant.int 1 | |
%4527 = torch.aten.add.Tensor %4526, %3676, %int1_773 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4528 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_774 = torch.constant.int 12 | |
%4530 = torch.aten.item %4528 : !torch.vtensor<[],f32> -> !torch.float | |
%4531 = torch.aten.item %4529 : !torch.vtensor<[],si8> -> !torch.int | |
%4532 = torch.aten.quantize_per_tensor %4527, %4530, %4531, %int12_774 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4533 = torch.aten.int_repr %4532 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4534 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4535 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4536 = torch.aten.item %4534 : !torch.vtensor<[],f32> -> !torch.float | |
%4537 = torch.aten.item %4535 : !torch.vtensor<[],si8> -> !torch.int | |
%4538 = torch.aten._make_per_tensor_quantized_tensor %4533, %4536, %4537 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4539 = torch.aten.dequantize.self %4538 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4540 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4541 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_775 = torch.constant.int 12 | |
%4542 = torch.aten.item %4540 : !torch.vtensor<[],f32> -> !torch.float | |
%4543 = torch.aten.item %4541 : !torch.vtensor<[],si8> -> !torch.int | |
%4544 = torch.aten.quantize_per_tensor %92, %4542, %4543, %int12_775 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4545 = torch.aten.int_repr %4544 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%4546 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4547 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4548 = torch.aten.item %4546 : !torch.vtensor<[],f32> -> !torch.float | |
%4549 = torch.aten.item %4547 : !torch.vtensor<[],si8> -> !torch.int | |
%4550 = torch.aten._make_per_tensor_quantized_tensor %4545, %4548, %4549 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4551 = torch.aten.dequantize.self %4550 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%4552 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4553 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_776 = torch.constant.int 12 | |
%4554 = torch.aten.item %4552 : !torch.vtensor<[],f32> -> !torch.float | |
%4555 = torch.aten.item %4553 : !torch.vtensor<[],si8> -> !torch.int | |
%4556 = torch.aten.quantize_per_tensor %93, %4554, %4555, %int12_776 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4557 = torch.aten.int_repr %4556 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4558 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4559 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4560 = torch.aten.item %4558 : !torch.vtensor<[],f32> -> !torch.float | |
%4561 = torch.aten.item %4559 : !torch.vtensor<[],si8> -> !torch.int | |
%4562 = torch.aten._make_per_tensor_quantized_tensor %4557, %4560, %4561 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4563 = torch.aten.dequantize.self %4562 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_777 = torch.constant.int 1 | |
%int1_778 = torch.constant.int 1 | |
%int1_779 = torch.constant.int 1 | |
%int1_780 = torch.constant.int 1 | |
%int1_781 = torch.constant.int 1 | |
%int1_782 = torch.constant.int 1 | |
%int0_783 = torch.constant.int 0 | |
%4564 = torch.prim.ListConstruct %int1_777, %int1_778 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4565 = torch.prim.ListConstruct %int1_779, %int1_780 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4566 = torch.prim.ListConstruct %int1_781, %int1_782 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4567 = torch.prim.ListConstruct %int0_783, %int0_783 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_784 = torch.constant.bool false | |
%int1_785 = torch.constant.int 1 | |
%4568 = torch.aten.convolution %4539, %4551, %4563, %4566, %4564, %4565, %false_784, %4567, %int1_785 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_786 = torch.constant.float 0.1015625 | |
%4569 = torch.aten.leaky_relu %4568, %float1.015630e-01_786 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4570 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4571 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_787 = torch.constant.int 12 | |
%4572 = torch.aten.item %4570 : !torch.vtensor<[],f32> -> !torch.float | |
%4573 = torch.aten.item %4571 : !torch.vtensor<[],si8> -> !torch.int | |
%4574 = torch.aten.quantize_per_tensor %4569, %4572, %4573, %int12_787 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4575 = torch.aten.int_repr %4574 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4576 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4577 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4578 = torch.aten.item %4576 : !torch.vtensor<[],f32> -> !torch.float | |
%4579 = torch.aten.item %4577 : !torch.vtensor<[],si8> -> !torch.int | |
%4580 = torch.aten._make_per_tensor_quantized_tensor %4575, %4578, %4579 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4581 = torch.aten.dequantize.self %4580 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4582 = torch.prim.ListConstruct %4539, %4581 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_788 = torch.constant.int 1 | |
%4583 = torch.aten.cat %4582, %int1_788 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%4584 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4585 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_789 = torch.constant.int 12 | |
%4586 = torch.aten.item %4584 : !torch.vtensor<[],f32> -> !torch.float | |
%4587 = torch.aten.item %4585 : !torch.vtensor<[],si8> -> !torch.int | |
%4588 = torch.aten.quantize_per_tensor %4583, %4586, %4587, %int12_789 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4589 = torch.aten.int_repr %4588 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%4590 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4591 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4592 = torch.aten.item %4590 : !torch.vtensor<[],f32> -> !torch.float | |
%4593 = torch.aten.item %4591 : !torch.vtensor<[],si8> -> !torch.int | |
%4594 = torch.aten._make_per_tensor_quantized_tensor %4589, %4592, %4593 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4595 = torch.aten.dequantize.self %4594 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%4596 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4597 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_790 = torch.constant.int 12 | |
%4598 = torch.aten.item %4596 : !torch.vtensor<[],f32> -> !torch.float | |
%4599 = torch.aten.item %4597 : !torch.vtensor<[],si8> -> !torch.int | |
%4600 = torch.aten.quantize_per_tensor %94, %4598, %4599, %int12_790 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4601 = torch.aten.int_repr %4600 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%4602 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4603 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4604 = torch.aten.item %4602 : !torch.vtensor<[],f32> -> !torch.float | |
%4605 = torch.aten.item %4603 : !torch.vtensor<[],si8> -> !torch.int | |
%4606 = torch.aten._make_per_tensor_quantized_tensor %4601, %4604, %4605 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4607 = torch.aten.dequantize.self %4606 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%4608 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4609 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_791 = torch.constant.int 12 | |
%4610 = torch.aten.item %4608 : !torch.vtensor<[],f32> -> !torch.float | |
%4611 = torch.aten.item %4609 : !torch.vtensor<[],si8> -> !torch.int | |
%4612 = torch.aten.quantize_per_tensor %95, %4610, %4611, %int12_791 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4613 = torch.aten.int_repr %4612 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4614 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4615 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4616 = torch.aten.item %4614 : !torch.vtensor<[],f32> -> !torch.float | |
%4617 = torch.aten.item %4615 : !torch.vtensor<[],si8> -> !torch.int | |
%4618 = torch.aten._make_per_tensor_quantized_tensor %4613, %4616, %4617 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4619 = torch.aten.dequantize.self %4618 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_792 = torch.constant.int 1 | |
%int1_793 = torch.constant.int 1 | |
%int1_794 = torch.constant.int 1 | |
%int1_795 = torch.constant.int 1 | |
%int1_796 = torch.constant.int 1 | |
%int1_797 = torch.constant.int 1 | |
%int0_798 = torch.constant.int 0 | |
%4620 = torch.prim.ListConstruct %int1_792, %int1_793 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4621 = torch.prim.ListConstruct %int1_794, %int1_795 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4622 = torch.prim.ListConstruct %int1_796, %int1_797 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4623 = torch.prim.ListConstruct %int0_798, %int0_798 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_799 = torch.constant.bool false | |
%int1_800 = torch.constant.int 1 | |
%4624 = torch.aten.convolution %4595, %4607, %4619, %4622, %4620, %4621, %false_799, %4623, %int1_800 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_801 = torch.constant.float 0.1015625 | |
%4625 = torch.aten.leaky_relu %4624, %float1.015630e-01_801 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4626 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_802 = torch.constant.int 12 | |
%4628 = torch.aten.item %4626 : !torch.vtensor<[],f32> -> !torch.float | |
%4629 = torch.aten.item %4627 : !torch.vtensor<[],si8> -> !torch.int | |
%4630 = torch.aten.quantize_per_tensor %4625, %4628, %4629, %int12_802 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4631 = torch.aten.int_repr %4630 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4632 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4634 = torch.aten.item %4632 : !torch.vtensor<[],f32> -> !torch.float | |
%4635 = torch.aten.item %4633 : !torch.vtensor<[],si8> -> !torch.int | |
%4636 = torch.aten._make_per_tensor_quantized_tensor %4631, %4634, %4635 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4637 = torch.aten.dequantize.self %4636 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4638 = torch.prim.ListConstruct %4539, %4581, %4637 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_803 = torch.constant.int 1 | |
%4639 = torch.aten.cat %4638, %int1_803 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%4640 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4641 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_804 = torch.constant.int 12 | |
%4642 = torch.aten.item %4640 : !torch.vtensor<[],f32> -> !torch.float | |
%4643 = torch.aten.item %4641 : !torch.vtensor<[],si8> -> !torch.int | |
%4644 = torch.aten.quantize_per_tensor %4639, %4642, %4643, %int12_804 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4645 = torch.aten.int_repr %4644 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%4646 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4647 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4648 = torch.aten.item %4646 : !torch.vtensor<[],f32> -> !torch.float | |
%4649 = torch.aten.item %4647 : !torch.vtensor<[],si8> -> !torch.int | |
%4650 = torch.aten._make_per_tensor_quantized_tensor %4645, %4648, %4649 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4651 = torch.aten.dequantize.self %4650 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%4652 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4653 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_805 = torch.constant.int 12 | |
%4654 = torch.aten.item %4652 : !torch.vtensor<[],f32> -> !torch.float | |
%4655 = torch.aten.item %4653 : !torch.vtensor<[],si8> -> !torch.int | |
%4656 = torch.aten.quantize_per_tensor %96, %4654, %4655, %int12_805 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4657 = torch.aten.int_repr %4656 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%4658 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4659 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4660 = torch.aten.item %4658 : !torch.vtensor<[],f32> -> !torch.float | |
%4661 = torch.aten.item %4659 : !torch.vtensor<[],si8> -> !torch.int | |
%4662 = torch.aten._make_per_tensor_quantized_tensor %4657, %4660, %4661 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4663 = torch.aten.dequantize.self %4662 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%4664 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4665 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_806 = torch.constant.int 12 | |
%4666 = torch.aten.item %4664 : !torch.vtensor<[],f32> -> !torch.float | |
%4667 = torch.aten.item %4665 : !torch.vtensor<[],si8> -> !torch.int | |
%4668 = torch.aten.quantize_per_tensor %97, %4666, %4667, %int12_806 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4669 = torch.aten.int_repr %4668 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4670 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4671 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4672 = torch.aten.item %4670 : !torch.vtensor<[],f32> -> !torch.float | |
%4673 = torch.aten.item %4671 : !torch.vtensor<[],si8> -> !torch.int | |
%4674 = torch.aten._make_per_tensor_quantized_tensor %4669, %4672, %4673 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4675 = torch.aten.dequantize.self %4674 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_807 = torch.constant.int 1 | |
%int1_808 = torch.constant.int 1 | |
%int1_809 = torch.constant.int 1 | |
%int1_810 = torch.constant.int 1 | |
%int1_811 = torch.constant.int 1 | |
%int1_812 = torch.constant.int 1 | |
%int0_813 = torch.constant.int 0 | |
%4676 = torch.prim.ListConstruct %int1_807, %int1_808 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4677 = torch.prim.ListConstruct %int1_809, %int1_810 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4678 = torch.prim.ListConstruct %int1_811, %int1_812 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4679 = torch.prim.ListConstruct %int0_813, %int0_813 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_814 = torch.constant.bool false | |
%int1_815 = torch.constant.int 1 | |
%4680 = torch.aten.convolution %4651, %4663, %4675, %4678, %4676, %4677, %false_814, %4679, %int1_815 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_816 = torch.constant.float 0.1015625 | |
%4681 = torch.aten.leaky_relu %4680, %float1.015630e-01_816 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4682 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4683 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_817 = torch.constant.int 12 | |
%4684 = torch.aten.item %4682 : !torch.vtensor<[],f32> -> !torch.float | |
%4685 = torch.aten.item %4683 : !torch.vtensor<[],si8> -> !torch.int | |
%4686 = torch.aten.quantize_per_tensor %4681, %4684, %4685, %int12_817 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4687 = torch.aten.int_repr %4686 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4688 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4689 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4690 = torch.aten.item %4688 : !torch.vtensor<[],f32> -> !torch.float | |
%4691 = torch.aten.item %4689 : !torch.vtensor<[],si8> -> !torch.int | |
%4692 = torch.aten._make_per_tensor_quantized_tensor %4687, %4690, %4691 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4693 = torch.aten.dequantize.self %4692 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4694 = torch.prim.ListConstruct %4539, %4581, %4637, %4693 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_818 = torch.constant.int 1 | |
%4695 = torch.aten.cat %4694, %int1_818 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%4696 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4697 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_819 = torch.constant.int 12 | |
%4698 = torch.aten.item %4696 : !torch.vtensor<[],f32> -> !torch.float | |
%4699 = torch.aten.item %4697 : !torch.vtensor<[],si8> -> !torch.int | |
%4700 = torch.aten.quantize_per_tensor %4695, %4698, %4699, %int12_819 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4701 = torch.aten.int_repr %4700 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%4702 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4703 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4704 = torch.aten.item %4702 : !torch.vtensor<[],f32> -> !torch.float | |
%4705 = torch.aten.item %4703 : !torch.vtensor<[],si8> -> !torch.int | |
%4706 = torch.aten._make_per_tensor_quantized_tensor %4701, %4704, %4705 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4707 = torch.aten.dequantize.self %4706 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%4708 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4709 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_820 = torch.constant.int 12 | |
%4710 = torch.aten.item %4708 : !torch.vtensor<[],f32> -> !torch.float | |
%4711 = torch.aten.item %4709 : !torch.vtensor<[],si8> -> !torch.int | |
%4712 = torch.aten.quantize_per_tensor %98, %4710, %4711, %int12_820 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4713 = torch.aten.int_repr %4712 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%4714 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4715 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4716 = torch.aten.item %4714 : !torch.vtensor<[],f32> -> !torch.float | |
%4717 = torch.aten.item %4715 : !torch.vtensor<[],si8> -> !torch.int | |
%4718 = torch.aten._make_per_tensor_quantized_tensor %4713, %4716, %4717 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4719 = torch.aten.dequantize.self %4718 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%4720 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4721 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_821 = torch.constant.int 12 | |
%4722 = torch.aten.item %4720 : !torch.vtensor<[],f32> -> !torch.float | |
%4723 = torch.aten.item %4721 : !torch.vtensor<[],si8> -> !torch.int | |
%4724 = torch.aten.quantize_per_tensor %99, %4722, %4723, %int12_821 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4725 = torch.aten.int_repr %4724 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4726 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4727 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4728 = torch.aten.item %4726 : !torch.vtensor<[],f32> -> !torch.float | |
%4729 = torch.aten.item %4727 : !torch.vtensor<[],si8> -> !torch.int | |
%4730 = torch.aten._make_per_tensor_quantized_tensor %4725, %4728, %4729 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4731 = torch.aten.dequantize.self %4730 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_822 = torch.constant.int 1 | |
%int1_823 = torch.constant.int 1 | |
%int1_824 = torch.constant.int 1 | |
%int1_825 = torch.constant.int 1 | |
%int1_826 = torch.constant.int 1 | |
%int1_827 = torch.constant.int 1 | |
%int0_828 = torch.constant.int 0 | |
%4732 = torch.prim.ListConstruct %int1_822, %int1_823 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4733 = torch.prim.ListConstruct %int1_824, %int1_825 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4734 = torch.prim.ListConstruct %int1_826, %int1_827 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4735 = torch.prim.ListConstruct %int0_828, %int0_828 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_829 = torch.constant.bool false | |
%int1_830 = torch.constant.int 1 | |
%4736 = torch.aten.convolution %4707, %4719, %4731, %4734, %4732, %4733, %false_829, %4735, %int1_830 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_831 = torch.constant.float 0.1015625 | |
%4737 = torch.aten.leaky_relu %4736, %float1.015630e-01_831 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4738 = torch.prim.ListConstruct %4539, %4581, %4637, %4693, %4737 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_832 = torch.constant.int 1 | |
%4739 = torch.aten.cat %4738, %int1_832 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%4740 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_833 = torch.constant.int 12 | |
%4742 = torch.aten.item %4740 : !torch.vtensor<[],f32> -> !torch.float | |
%4743 = torch.aten.item %4741 : !torch.vtensor<[],si8> -> !torch.int | |
%4744 = torch.aten.quantize_per_tensor %4739, %4742, %4743, %int12_833 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4745 = torch.aten.int_repr %4744 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%4746 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4748 = torch.aten.item %4746 : !torch.vtensor<[],f32> -> !torch.float | |
%4749 = torch.aten.item %4747 : !torch.vtensor<[],si8> -> !torch.int | |
%4750 = torch.aten._make_per_tensor_quantized_tensor %4745, %4748, %4749 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%4751 = torch.aten.dequantize.self %4750 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%4752 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_834 = torch.constant.int 12 | |
%4754 = torch.aten.item %4752 : !torch.vtensor<[],f32> -> !torch.float | |
%4755 = torch.aten.item %4753 : !torch.vtensor<[],si8> -> !torch.int | |
%4756 = torch.aten.quantize_per_tensor %100, %4754, %4755, %int12_834 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4757 = torch.aten.int_repr %4756 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%4758 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4759 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4760 = torch.aten.item %4758 : !torch.vtensor<[],f32> -> !torch.float | |
%4761 = torch.aten.item %4759 : !torch.vtensor<[],si8> -> !torch.int | |
%4762 = torch.aten._make_per_tensor_quantized_tensor %4757, %4760, %4761 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%4763 = torch.aten.dequantize.self %4762 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%4764 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4765 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_835 = torch.constant.int 12 | |
%4766 = torch.aten.item %4764 : !torch.vtensor<[],f32> -> !torch.float | |
%4767 = torch.aten.item %4765 : !torch.vtensor<[],si8> -> !torch.int | |
%4768 = torch.aten.quantize_per_tensor %101, %4766, %4767, %int12_835 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4769 = torch.aten.int_repr %4768 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%4770 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4772 = torch.aten.item %4770 : !torch.vtensor<[],f32> -> !torch.float | |
%4773 = torch.aten.item %4771 : !torch.vtensor<[],si8> -> !torch.int | |
%4774 = torch.aten._make_per_tensor_quantized_tensor %4769, %4772, %4773 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%4775 = torch.aten.dequantize.self %4774 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_836 = torch.constant.int 1 | |
%int1_837 = torch.constant.int 1 | |
%int1_838 = torch.constant.int 1 | |
%int1_839 = torch.constant.int 1 | |
%int1_840 = torch.constant.int 1 | |
%int1_841 = torch.constant.int 1 | |
%int0_842 = torch.constant.int 0 | |
%4776 = torch.prim.ListConstruct %int1_836, %int1_837 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4777 = torch.prim.ListConstruct %int1_838, %int1_839 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4778 = torch.prim.ListConstruct %int1_840, %int1_841 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4779 = torch.prim.ListConstruct %int0_842, %int0_842 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_843 = torch.constant.bool false | |
%int1_844 = torch.constant.int 1 | |
%4780 = torch.aten.convolution %4751, %4763, %4775, %4778, %4776, %4777, %false_843, %4779, %int1_844 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4781 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4782 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_845 = torch.constant.int 12 | |
%4783 = torch.aten.item %4781 : !torch.vtensor<[],f32> -> !torch.float | |
%4784 = torch.aten.item %4782 : !torch.vtensor<[],si8> -> !torch.int | |
%4785 = torch.aten.quantize_per_tensor %4780, %4783, %4784, %int12_845 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4786 = torch.aten.int_repr %4785 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4787 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4788 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4789 = torch.aten.item %4787 : !torch.vtensor<[],f32> -> !torch.float | |
%4790 = torch.aten.item %4788 : !torch.vtensor<[],si8> -> !torch.int | |
%4791 = torch.aten._make_per_tensor_quantized_tensor %4786, %4789, %4790 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4792 = torch.aten.dequantize.self %4791 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4793 = torch.aten.mul.Tensor %4792, %870 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%4794 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_846 = torch.constant.int 12 | |
%4796 = torch.aten.item %4794 : !torch.vtensor<[],f32> -> !torch.float | |
%4797 = torch.aten.item %4795 : !torch.vtensor<[],si8> -> !torch.int | |
%4798 = torch.aten.quantize_per_tensor %4793, %4796, %4797, %int12_846 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4799 = torch.aten.int_repr %4798 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4800 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4802 = torch.aten.item %4800 : !torch.vtensor<[],f32> -> !torch.float | |
%4803 = torch.aten.item %4801 : !torch.vtensor<[],si8> -> !torch.int | |
%4804 = torch.aten._make_per_tensor_quantized_tensor %4799, %4802, %4803 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4805 = torch.aten.dequantize.self %4804 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_847 = torch.constant.int 1 | |
%4806 = torch.aten.add.Tensor %4805, %4539, %int1_847 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%4807 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4808 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_848 = torch.constant.int 12 | |
%4809 = torch.aten.item %4807 : !torch.vtensor<[],f32> -> !torch.float | |
%4810 = torch.aten.item %4808 : !torch.vtensor<[],si8> -> !torch.int | |
%4811 = torch.aten.quantize_per_tensor %4806, %4809, %4810, %int12_848 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4812 = torch.aten.int_repr %4811 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%4813 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4814 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4815 = torch.aten.item %4813 : !torch.vtensor<[],f32> -> !torch.float | |
%4816 = torch.aten.item %4814 : !torch.vtensor<[],si8> -> !torch.int | |
%4817 = torch.aten._make_per_tensor_quantized_tensor %4812, %4815, %4816 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%4818 = torch.aten.dequantize.self %4817 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%4819 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4820 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_849 = torch.constant.int 12 | |
%4821 = torch.aten.item %4819 : !torch.vtensor<[],f32> -> !torch.float | |
%4822 = torch.aten.item %4820 : !torch.vtensor<[],si8> -> !torch.int | |
%4823 = torch.aten.quantize_per_tensor %102, %4821, %4822, %int12_849 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4824 = torch.aten.int_repr %4823 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%4825 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4827 = torch.aten.item %4825 : !torch.vtensor<[],f32> -> !torch.float | |
%4828 = torch.aten.item %4826 : !torch.vtensor<[],si8> -> !torch.int | |
%4829 = torch.aten._make_per_tensor_quantized_tensor %4824, %4827, %4828 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%4830 = torch.aten.dequantize.self %4829 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%4831 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_850 = torch.constant.int 12 | |
%4833 = torch.aten.item %4831 : !torch.vtensor<[],f32> -> !torch.float | |
%4834 = torch.aten.item %4832 : !torch.vtensor<[],si8> -> !torch.int | |
%4835 = torch.aten.quantize_per_tensor %103, %4833, %4834, %int12_850 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4836 = torch.aten.int_repr %4835 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4837 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4839 = torch.aten.item %4837 : !torch.vtensor<[],f32> -> !torch.float | |
%4840 = torch.aten.item %4838 : !torch.vtensor<[],si8> -> !torch.int | |
%4841 = torch.aten._make_per_tensor_quantized_tensor %4836, %4839, %4840 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4842 = torch.aten.dequantize.self %4841 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_851 = torch.constant.int 1 | |
%int1_852 = torch.constant.int 1 | |
%int1_853 = torch.constant.int 1 | |
%int1_854 = torch.constant.int 1 | |
%int1_855 = torch.constant.int 1 | |
%int1_856 = torch.constant.int 1 | |
%int0_857 = torch.constant.int 0 | |
%4843 = torch.prim.ListConstruct %int1_851, %int1_852 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4844 = torch.prim.ListConstruct %int1_853, %int1_854 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4845 = torch.prim.ListConstruct %int1_855, %int1_856 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4846 = torch.prim.ListConstruct %int0_857, %int0_857 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_858 = torch.constant.bool false | |
%int1_859 = torch.constant.int 1 | |
%4847 = torch.aten.convolution %4818, %4830, %4842, %4845, %4843, %4844, %false_858, %4846, %int1_859 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_860 = torch.constant.float 0.1015625 | |
%4848 = torch.aten.leaky_relu %4847, %float1.015630e-01_860 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4849 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4850 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_861 = torch.constant.int 12 | |
%4851 = torch.aten.item %4849 : !torch.vtensor<[],f32> -> !torch.float | |
%4852 = torch.aten.item %4850 : !torch.vtensor<[],si8> -> !torch.int | |
%4853 = torch.aten.quantize_per_tensor %4848, %4851, %4852, %int12_861 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4854 = torch.aten.int_repr %4853 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4855 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4856 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4857 = torch.aten.item %4855 : !torch.vtensor<[],f32> -> !torch.float | |
%4858 = torch.aten.item %4856 : !torch.vtensor<[],si8> -> !torch.int | |
%4859 = torch.aten._make_per_tensor_quantized_tensor %4854, %4857, %4858 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4860 = torch.aten.dequantize.self %4859 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4861 = torch.prim.ListConstruct %4818, %4860 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_862 = torch.constant.int 1 | |
%4862 = torch.aten.cat %4861, %int1_862 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%4863 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4864 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_863 = torch.constant.int 12 | |
%4865 = torch.aten.item %4863 : !torch.vtensor<[],f32> -> !torch.float | |
%4866 = torch.aten.item %4864 : !torch.vtensor<[],si8> -> !torch.int | |
%4867 = torch.aten.quantize_per_tensor %4862, %4865, %4866, %int12_863 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4868 = torch.aten.int_repr %4867 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%4869 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4870 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4871 = torch.aten.item %4869 : !torch.vtensor<[],f32> -> !torch.float | |
%4872 = torch.aten.item %4870 : !torch.vtensor<[],si8> -> !torch.int | |
%4873 = torch.aten._make_per_tensor_quantized_tensor %4868, %4871, %4872 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%4874 = torch.aten.dequantize.self %4873 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%4875 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4876 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_864 = torch.constant.int 12 | |
%4877 = torch.aten.item %4875 : !torch.vtensor<[],f32> -> !torch.float | |
%4878 = torch.aten.item %4876 : !torch.vtensor<[],si8> -> !torch.int | |
%4879 = torch.aten.quantize_per_tensor %104, %4877, %4878, %int12_864 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4880 = torch.aten.int_repr %4879 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%4881 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4882 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4883 = torch.aten.item %4881 : !torch.vtensor<[],f32> -> !torch.float | |
%4884 = torch.aten.item %4882 : !torch.vtensor<[],si8> -> !torch.int | |
%4885 = torch.aten._make_per_tensor_quantized_tensor %4880, %4883, %4884 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%4886 = torch.aten.dequantize.self %4885 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%4887 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4888 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_865 = torch.constant.int 12 | |
%4889 = torch.aten.item %4887 : !torch.vtensor<[],f32> -> !torch.float | |
%4890 = torch.aten.item %4888 : !torch.vtensor<[],si8> -> !torch.int | |
%4891 = torch.aten.quantize_per_tensor %105, %4889, %4890, %int12_865 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4892 = torch.aten.int_repr %4891 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4893 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4894 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4895 = torch.aten.item %4893 : !torch.vtensor<[],f32> -> !torch.float | |
%4896 = torch.aten.item %4894 : !torch.vtensor<[],si8> -> !torch.int | |
%4897 = torch.aten._make_per_tensor_quantized_tensor %4892, %4895, %4896 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4898 = torch.aten.dequantize.self %4897 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_866 = torch.constant.int 1 | |
%int1_867 = torch.constant.int 1 | |
%int1_868 = torch.constant.int 1 | |
%int1_869 = torch.constant.int 1 | |
%int1_870 = torch.constant.int 1 | |
%int1_871 = torch.constant.int 1 | |
%int0_872 = torch.constant.int 0 | |
%4899 = torch.prim.ListConstruct %int1_866, %int1_867 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4900 = torch.prim.ListConstruct %int1_868, %int1_869 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4901 = torch.prim.ListConstruct %int1_870, %int1_871 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4902 = torch.prim.ListConstruct %int0_872, %int0_872 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_873 = torch.constant.bool false | |
%int1_874 = torch.constant.int 1 | |
%4903 = torch.aten.convolution %4874, %4886, %4898, %4901, %4899, %4900, %false_873, %4902, %int1_874 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_875 = torch.constant.float 0.1015625 | |
%4904 = torch.aten.leaky_relu %4903, %float1.015630e-01_875 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4905 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4906 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_876 = torch.constant.int 12 | |
%4907 = torch.aten.item %4905 : !torch.vtensor<[],f32> -> !torch.float | |
%4908 = torch.aten.item %4906 : !torch.vtensor<[],si8> -> !torch.int | |
%4909 = torch.aten.quantize_per_tensor %4904, %4907, %4908, %int12_876 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4910 = torch.aten.int_repr %4909 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4911 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4913 = torch.aten.item %4911 : !torch.vtensor<[],f32> -> !torch.float | |
%4914 = torch.aten.item %4912 : !torch.vtensor<[],si8> -> !torch.int | |
%4915 = torch.aten._make_per_tensor_quantized_tensor %4910, %4913, %4914 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4916 = torch.aten.dequantize.self %4915 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4917 = torch.prim.ListConstruct %4818, %4860, %4916 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_877 = torch.constant.int 1 | |
%4918 = torch.aten.cat %4917, %int1_877 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%4919 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4920 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_878 = torch.constant.int 12 | |
%4921 = torch.aten.item %4919 : !torch.vtensor<[],f32> -> !torch.float | |
%4922 = torch.aten.item %4920 : !torch.vtensor<[],si8> -> !torch.int | |
%4923 = torch.aten.quantize_per_tensor %4918, %4921, %4922, %int12_878 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4924 = torch.aten.int_repr %4923 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%4925 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4926 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4927 = torch.aten.item %4925 : !torch.vtensor<[],f32> -> !torch.float | |
%4928 = torch.aten.item %4926 : !torch.vtensor<[],si8> -> !torch.int | |
%4929 = torch.aten._make_per_tensor_quantized_tensor %4924, %4927, %4928 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%4930 = torch.aten.dequantize.self %4929 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%4931 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4932 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_879 = torch.constant.int 12 | |
%4933 = torch.aten.item %4931 : !torch.vtensor<[],f32> -> !torch.float | |
%4934 = torch.aten.item %4932 : !torch.vtensor<[],si8> -> !torch.int | |
%4935 = torch.aten.quantize_per_tensor %106, %4933, %4934, %int12_879 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4936 = torch.aten.int_repr %4935 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%4937 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4938 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4939 = torch.aten.item %4937 : !torch.vtensor<[],f32> -> !torch.float | |
%4940 = torch.aten.item %4938 : !torch.vtensor<[],si8> -> !torch.int | |
%4941 = torch.aten._make_per_tensor_quantized_tensor %4936, %4939, %4940 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%4942 = torch.aten.dequantize.self %4941 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%4943 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4944 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_880 = torch.constant.int 12 | |
%4945 = torch.aten.item %4943 : !torch.vtensor<[],f32> -> !torch.float | |
%4946 = torch.aten.item %4944 : !torch.vtensor<[],si8> -> !torch.int | |
%4947 = torch.aten.quantize_per_tensor %107, %4945, %4946, %int12_880 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4948 = torch.aten.int_repr %4947 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%4949 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4950 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4951 = torch.aten.item %4949 : !torch.vtensor<[],f32> -> !torch.float | |
%4952 = torch.aten.item %4950 : !torch.vtensor<[],si8> -> !torch.int | |
%4953 = torch.aten._make_per_tensor_quantized_tensor %4948, %4951, %4952 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%4954 = torch.aten.dequantize.self %4953 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_881 = torch.constant.int 1 | |
%int1_882 = torch.constant.int 1 | |
%int1_883 = torch.constant.int 1 | |
%int1_884 = torch.constant.int 1 | |
%int1_885 = torch.constant.int 1 | |
%int1_886 = torch.constant.int 1 | |
%int0_887 = torch.constant.int 0 | |
%4955 = torch.prim.ListConstruct %int1_881, %int1_882 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4956 = torch.prim.ListConstruct %int1_883, %int1_884 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4957 = torch.prim.ListConstruct %int1_885, %int1_886 : (!torch.int, !torch.int) -> !torch.list<int> | |
%4958 = torch.prim.ListConstruct %int0_887, %int0_887 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_888 = torch.constant.bool false | |
%int1_889 = torch.constant.int 1 | |
%4959 = torch.aten.convolution %4930, %4942, %4954, %4957, %4955, %4956, %false_888, %4958, %int1_889 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_890 = torch.constant.float 0.1015625 | |
%4960 = torch.aten.leaky_relu %4959, %float1.015630e-01_890 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%4961 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4962 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_891 = torch.constant.int 12 | |
%4963 = torch.aten.item %4961 : !torch.vtensor<[],f32> -> !torch.float | |
%4964 = torch.aten.item %4962 : !torch.vtensor<[],si8> -> !torch.int | |
%4965 = torch.aten.quantize_per_tensor %4960, %4963, %4964, %int12_891 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4966 = torch.aten.int_repr %4965 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%4967 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4968 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4969 = torch.aten.item %4967 : !torch.vtensor<[],f32> -> !torch.float | |
%4970 = torch.aten.item %4968 : !torch.vtensor<[],si8> -> !torch.int | |
%4971 = torch.aten._make_per_tensor_quantized_tensor %4966, %4969, %4970 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%4972 = torch.aten.dequantize.self %4971 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%4973 = torch.prim.ListConstruct %4818, %4860, %4916, %4972 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_892 = torch.constant.int 1 | |
%4974 = torch.aten.cat %4973, %int1_892 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%4975 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4976 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_893 = torch.constant.int 12 | |
%4977 = torch.aten.item %4975 : !torch.vtensor<[],f32> -> !torch.float | |
%4978 = torch.aten.item %4976 : !torch.vtensor<[],si8> -> !torch.int | |
%4979 = torch.aten.quantize_per_tensor %4974, %4977, %4978, %int12_893 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4980 = torch.aten.int_repr %4979 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%4981 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4982 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4983 = torch.aten.item %4981 : !torch.vtensor<[],f32> -> !torch.float | |
%4984 = torch.aten.item %4982 : !torch.vtensor<[],si8> -> !torch.int | |
%4985 = torch.aten._make_per_tensor_quantized_tensor %4980, %4983, %4984 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%4986 = torch.aten.dequantize.self %4985 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%4987 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4988 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_894 = torch.constant.int 12 | |
%4989 = torch.aten.item %4987 : !torch.vtensor<[],f32> -> !torch.float | |
%4990 = torch.aten.item %4988 : !torch.vtensor<[],si8> -> !torch.int | |
%4991 = torch.aten.quantize_per_tensor %108, %4989, %4990, %int12_894 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4992 = torch.aten.int_repr %4991 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%4993 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%4994 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%4995 = torch.aten.item %4993 : !torch.vtensor<[],f32> -> !torch.float | |
%4996 = torch.aten.item %4994 : !torch.vtensor<[],si8> -> !torch.int | |
%4997 = torch.aten._make_per_tensor_quantized_tensor %4992, %4995, %4996 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%4998 = torch.aten.dequantize.self %4997 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%4999 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5000 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_895 = torch.constant.int 12 | |
%5001 = torch.aten.item %4999 : !torch.vtensor<[],f32> -> !torch.float | |
%5002 = torch.aten.item %5000 : !torch.vtensor<[],si8> -> !torch.int | |
%5003 = torch.aten.quantize_per_tensor %109, %5001, %5002, %int12_895 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5004 = torch.aten.int_repr %5003 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5005 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5006 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5007 = torch.aten.item %5005 : !torch.vtensor<[],f32> -> !torch.float | |
%5008 = torch.aten.item %5006 : !torch.vtensor<[],si8> -> !torch.int | |
%5009 = torch.aten._make_per_tensor_quantized_tensor %5004, %5007, %5008 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5010 = torch.aten.dequantize.self %5009 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_896 = torch.constant.int 1 | |
%int1_897 = torch.constant.int 1 | |
%int1_898 = torch.constant.int 1 | |
%int1_899 = torch.constant.int 1 | |
%int1_900 = torch.constant.int 1 | |
%int1_901 = torch.constant.int 1 | |
%int0_902 = torch.constant.int 0 | |
%5011 = torch.prim.ListConstruct %int1_896, %int1_897 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5012 = torch.prim.ListConstruct %int1_898, %int1_899 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5013 = torch.prim.ListConstruct %int1_900, %int1_901 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5014 = torch.prim.ListConstruct %int0_902, %int0_902 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_903 = torch.constant.bool false | |
%int1_904 = torch.constant.int 1 | |
%5015 = torch.aten.convolution %4986, %4998, %5010, %5013, %5011, %5012, %false_903, %5014, %int1_904 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_905 = torch.constant.float 0.1015625 | |
%5016 = torch.aten.leaky_relu %5015, %float1.015630e-01_905 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5017 = torch.prim.ListConstruct %4818, %4860, %4916, %4972, %5016 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_906 = torch.constant.int 1 | |
%5018 = torch.aten.cat %5017, %int1_906 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%5019 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5020 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_907 = torch.constant.int 12 | |
%5021 = torch.aten.item %5019 : !torch.vtensor<[],f32> -> !torch.float | |
%5022 = torch.aten.item %5020 : !torch.vtensor<[],si8> -> !torch.int | |
%5023 = torch.aten.quantize_per_tensor %5018, %5021, %5022, %int12_907 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5024 = torch.aten.int_repr %5023 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%5025 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5026 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5027 = torch.aten.item %5025 : !torch.vtensor<[],f32> -> !torch.float | |
%5028 = torch.aten.item %5026 : !torch.vtensor<[],si8> -> !torch.int | |
%5029 = torch.aten._make_per_tensor_quantized_tensor %5024, %5027, %5028 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5030 = torch.aten.dequantize.self %5029 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%5031 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5032 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_908 = torch.constant.int 12 | |
%5033 = torch.aten.item %5031 : !torch.vtensor<[],f32> -> !torch.float | |
%5034 = torch.aten.item %5032 : !torch.vtensor<[],si8> -> !torch.int | |
%5035 = torch.aten.quantize_per_tensor %110, %5033, %5034, %int12_908 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5036 = torch.aten.int_repr %5035 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%5037 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5038 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5039 = torch.aten.item %5037 : !torch.vtensor<[],f32> -> !torch.float | |
%5040 = torch.aten.item %5038 : !torch.vtensor<[],si8> -> !torch.int | |
%5041 = torch.aten._make_per_tensor_quantized_tensor %5036, %5039, %5040 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5042 = torch.aten.dequantize.self %5041 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%5043 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5044 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_909 = torch.constant.int 12 | |
%5045 = torch.aten.item %5043 : !torch.vtensor<[],f32> -> !torch.float | |
%5046 = torch.aten.item %5044 : !torch.vtensor<[],si8> -> !torch.int | |
%5047 = torch.aten.quantize_per_tensor %111, %5045, %5046, %int12_909 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5048 = torch.aten.int_repr %5047 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%5049 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5050 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5051 = torch.aten.item %5049 : !torch.vtensor<[],f32> -> !torch.float | |
%5052 = torch.aten.item %5050 : !torch.vtensor<[],si8> -> !torch.int | |
%5053 = torch.aten._make_per_tensor_quantized_tensor %5048, %5051, %5052 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5054 = torch.aten.dequantize.self %5053 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_910 = torch.constant.int 1 | |
%int1_911 = torch.constant.int 1 | |
%int1_912 = torch.constant.int 1 | |
%int1_913 = torch.constant.int 1 | |
%int1_914 = torch.constant.int 1 | |
%int1_915 = torch.constant.int 1 | |
%int0_916 = torch.constant.int 0 | |
%5055 = torch.prim.ListConstruct %int1_910, %int1_911 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5056 = torch.prim.ListConstruct %int1_912, %int1_913 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5057 = torch.prim.ListConstruct %int1_914, %int1_915 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5058 = torch.prim.ListConstruct %int0_916, %int0_916 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_917 = torch.constant.bool false | |
%int1_918 = torch.constant.int 1 | |
%5059 = torch.aten.convolution %5030, %5042, %5054, %5057, %5055, %5056, %false_917, %5058, %int1_918 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5060 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5061 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_919 = torch.constant.int 12 | |
%5062 = torch.aten.item %5060 : !torch.vtensor<[],f32> -> !torch.float | |
%5063 = torch.aten.item %5061 : !torch.vtensor<[],si8> -> !torch.int | |
%5064 = torch.aten.quantize_per_tensor %5059, %5062, %5063, %int12_919 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5065 = torch.aten.int_repr %5064 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5066 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5067 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5068 = torch.aten.item %5066 : !torch.vtensor<[],f32> -> !torch.float | |
%5069 = torch.aten.item %5067 : !torch.vtensor<[],si8> -> !torch.int | |
%5070 = torch.aten._make_per_tensor_quantized_tensor %5065, %5068, %5069 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5071 = torch.aten.dequantize.self %5070 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5072 = torch.aten.mul.Tensor %5071, %883 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%5073 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5074 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_920 = torch.constant.int 12 | |
%5075 = torch.aten.item %5073 : !torch.vtensor<[],f32> -> !torch.float | |
%5076 = torch.aten.item %5074 : !torch.vtensor<[],si8> -> !torch.int | |
%5077 = torch.aten.quantize_per_tensor %5072, %5075, %5076, %int12_920 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5078 = torch.aten.int_repr %5077 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5079 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5080 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5081 = torch.aten.item %5079 : !torch.vtensor<[],f32> -> !torch.float | |
%5082 = torch.aten.item %5080 : !torch.vtensor<[],si8> -> !torch.int | |
%5083 = torch.aten._make_per_tensor_quantized_tensor %5078, %5081, %5082 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5084 = torch.aten.dequantize.self %5083 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_921 = torch.constant.int 1 | |
%5085 = torch.aten.add.Tensor %5084, %4818, %int1_921 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5086 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_922 = torch.constant.int 12 | |
%5088 = torch.aten.item %5086 : !torch.vtensor<[],f32> -> !torch.float | |
%5089 = torch.aten.item %5087 : !torch.vtensor<[],si8> -> !torch.int | |
%5090 = torch.aten.quantize_per_tensor %5085, %5088, %5089, %int12_922 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5091 = torch.aten.int_repr %5090 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5092 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5093 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5094 = torch.aten.item %5092 : !torch.vtensor<[],f32> -> !torch.float | |
%5095 = torch.aten.item %5093 : !torch.vtensor<[],si8> -> !torch.int | |
%5096 = torch.aten._make_per_tensor_quantized_tensor %5091, %5094, %5095 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5097 = torch.aten.dequantize.self %5096 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5098 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5099 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_923 = torch.constant.int 12 | |
%5100 = torch.aten.item %5098 : !torch.vtensor<[],f32> -> !torch.float | |
%5101 = torch.aten.item %5099 : !torch.vtensor<[],si8> -> !torch.int | |
%5102 = torch.aten.quantize_per_tensor %112, %5100, %5101, %int12_923 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5103 = torch.aten.int_repr %5102 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%5104 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5105 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5106 = torch.aten.item %5104 : !torch.vtensor<[],f32> -> !torch.float | |
%5107 = torch.aten.item %5105 : !torch.vtensor<[],si8> -> !torch.int | |
%5108 = torch.aten._make_per_tensor_quantized_tensor %5103, %5106, %5107 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5109 = torch.aten.dequantize.self %5108 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%5110 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5111 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_924 = torch.constant.int 12 | |
%5112 = torch.aten.item %5110 : !torch.vtensor<[],f32> -> !torch.float | |
%5113 = torch.aten.item %5111 : !torch.vtensor<[],si8> -> !torch.int | |
%5114 = torch.aten.quantize_per_tensor %113, %5112, %5113, %int12_924 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5115 = torch.aten.int_repr %5114 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5116 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5117 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5118 = torch.aten.item %5116 : !torch.vtensor<[],f32> -> !torch.float | |
%5119 = torch.aten.item %5117 : !torch.vtensor<[],si8> -> !torch.int | |
%5120 = torch.aten._make_per_tensor_quantized_tensor %5115, %5118, %5119 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5121 = torch.aten.dequantize.self %5120 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_925 = torch.constant.int 1 | |
%int1_926 = torch.constant.int 1 | |
%int1_927 = torch.constant.int 1 | |
%int1_928 = torch.constant.int 1 | |
%int1_929 = torch.constant.int 1 | |
%int1_930 = torch.constant.int 1 | |
%int0_931 = torch.constant.int 0 | |
%5122 = torch.prim.ListConstruct %int1_925, %int1_926 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5123 = torch.prim.ListConstruct %int1_927, %int1_928 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5124 = torch.prim.ListConstruct %int1_929, %int1_930 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5125 = torch.prim.ListConstruct %int0_931, %int0_931 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_932 = torch.constant.bool false | |
%int1_933 = torch.constant.int 1 | |
%5126 = torch.aten.convolution %5097, %5109, %5121, %5124, %5122, %5123, %false_932, %5125, %int1_933 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_934 = torch.constant.float 0.1015625 | |
%5127 = torch.aten.leaky_relu %5126, %float1.015630e-01_934 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5128 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5129 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_935 = torch.constant.int 12 | |
%5130 = torch.aten.item %5128 : !torch.vtensor<[],f32> -> !torch.float | |
%5131 = torch.aten.item %5129 : !torch.vtensor<[],si8> -> !torch.int | |
%5132 = torch.aten.quantize_per_tensor %5127, %5130, %5131, %int12_935 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5133 = torch.aten.int_repr %5132 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5134 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5135 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5136 = torch.aten.item %5134 : !torch.vtensor<[],f32> -> !torch.float | |
%5137 = torch.aten.item %5135 : !torch.vtensor<[],si8> -> !torch.int | |
%5138 = torch.aten._make_per_tensor_quantized_tensor %5133, %5136, %5137 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5139 = torch.aten.dequantize.self %5138 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5140 = torch.prim.ListConstruct %5097, %5139 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_936 = torch.constant.int 1 | |
%5141 = torch.aten.cat %5140, %int1_936 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%5142 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5143 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_937 = torch.constant.int 12 | |
%5144 = torch.aten.item %5142 : !torch.vtensor<[],f32> -> !torch.float | |
%5145 = torch.aten.item %5143 : !torch.vtensor<[],si8> -> !torch.int | |
%5146 = torch.aten.quantize_per_tensor %5141, %5144, %5145, %int12_937 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5147 = torch.aten.int_repr %5146 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%5148 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5149 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5150 = torch.aten.item %5148 : !torch.vtensor<[],f32> -> !torch.float | |
%5151 = torch.aten.item %5149 : !torch.vtensor<[],si8> -> !torch.int | |
%5152 = torch.aten._make_per_tensor_quantized_tensor %5147, %5150, %5151 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5153 = torch.aten.dequantize.self %5152 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%5154 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5155 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_938 = torch.constant.int 12 | |
%5156 = torch.aten.item %5154 : !torch.vtensor<[],f32> -> !torch.float | |
%5157 = torch.aten.item %5155 : !torch.vtensor<[],si8> -> !torch.int | |
%5158 = torch.aten.quantize_per_tensor %114, %5156, %5157, %int12_938 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5159 = torch.aten.int_repr %5158 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%5160 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5161 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5162 = torch.aten.item %5160 : !torch.vtensor<[],f32> -> !torch.float | |
%5163 = torch.aten.item %5161 : !torch.vtensor<[],si8> -> !torch.int | |
%5164 = torch.aten._make_per_tensor_quantized_tensor %5159, %5162, %5163 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5165 = torch.aten.dequantize.self %5164 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%5166 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5167 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_939 = torch.constant.int 12 | |
%5168 = torch.aten.item %5166 : !torch.vtensor<[],f32> -> !torch.float | |
%5169 = torch.aten.item %5167 : !torch.vtensor<[],si8> -> !torch.int | |
%5170 = torch.aten.quantize_per_tensor %115, %5168, %5169, %int12_939 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5171 = torch.aten.int_repr %5170 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5172 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5173 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5174 = torch.aten.item %5172 : !torch.vtensor<[],f32> -> !torch.float | |
%5175 = torch.aten.item %5173 : !torch.vtensor<[],si8> -> !torch.int | |
%5176 = torch.aten._make_per_tensor_quantized_tensor %5171, %5174, %5175 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5177 = torch.aten.dequantize.self %5176 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_940 = torch.constant.int 1 | |
%int1_941 = torch.constant.int 1 | |
%int1_942 = torch.constant.int 1 | |
%int1_943 = torch.constant.int 1 | |
%int1_944 = torch.constant.int 1 | |
%int1_945 = torch.constant.int 1 | |
%int0_946 = torch.constant.int 0 | |
%5178 = torch.prim.ListConstruct %int1_940, %int1_941 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5179 = torch.prim.ListConstruct %int1_942, %int1_943 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5180 = torch.prim.ListConstruct %int1_944, %int1_945 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5181 = torch.prim.ListConstruct %int0_946, %int0_946 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_947 = torch.constant.bool false | |
%int1_948 = torch.constant.int 1 | |
%5182 = torch.aten.convolution %5153, %5165, %5177, %5180, %5178, %5179, %false_947, %5181, %int1_948 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_949 = torch.constant.float 0.1015625 | |
%5183 = torch.aten.leaky_relu %5182, %float1.015630e-01_949 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5184 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5185 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_950 = torch.constant.int 12 | |
%5186 = torch.aten.item %5184 : !torch.vtensor<[],f32> -> !torch.float | |
%5187 = torch.aten.item %5185 : !torch.vtensor<[],si8> -> !torch.int | |
%5188 = torch.aten.quantize_per_tensor %5183, %5186, %5187, %int12_950 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5189 = torch.aten.int_repr %5188 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5190 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5191 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5192 = torch.aten.item %5190 : !torch.vtensor<[],f32> -> !torch.float | |
%5193 = torch.aten.item %5191 : !torch.vtensor<[],si8> -> !torch.int | |
%5194 = torch.aten._make_per_tensor_quantized_tensor %5189, %5192, %5193 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5195 = torch.aten.dequantize.self %5194 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5196 = torch.prim.ListConstruct %5097, %5139, %5195 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_951 = torch.constant.int 1 | |
%5197 = torch.aten.cat %5196, %int1_951 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%5198 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5199 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_952 = torch.constant.int 12 | |
%5200 = torch.aten.item %5198 : !torch.vtensor<[],f32> -> !torch.float | |
%5201 = torch.aten.item %5199 : !torch.vtensor<[],si8> -> !torch.int | |
%5202 = torch.aten.quantize_per_tensor %5197, %5200, %5201, %int12_952 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5203 = torch.aten.int_repr %5202 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%5204 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5205 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5206 = torch.aten.item %5204 : !torch.vtensor<[],f32> -> !torch.float | |
%5207 = torch.aten.item %5205 : !torch.vtensor<[],si8> -> !torch.int | |
%5208 = torch.aten._make_per_tensor_quantized_tensor %5203, %5206, %5207 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5209 = torch.aten.dequantize.self %5208 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%5210 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_953 = torch.constant.int 12 | |
%5212 = torch.aten.item %5210 : !torch.vtensor<[],f32> -> !torch.float | |
%5213 = torch.aten.item %5211 : !torch.vtensor<[],si8> -> !torch.int | |
%5214 = torch.aten.quantize_per_tensor %116, %5212, %5213, %int12_953 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5215 = torch.aten.int_repr %5214 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%5216 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5218 = torch.aten.item %5216 : !torch.vtensor<[],f32> -> !torch.float | |
%5219 = torch.aten.item %5217 : !torch.vtensor<[],si8> -> !torch.int | |
%5220 = torch.aten._make_per_tensor_quantized_tensor %5215, %5218, %5219 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5221 = torch.aten.dequantize.self %5220 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%5222 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5223 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_954 = torch.constant.int 12 | |
%5224 = torch.aten.item %5222 : !torch.vtensor<[],f32> -> !torch.float | |
%5225 = torch.aten.item %5223 : !torch.vtensor<[],si8> -> !torch.int | |
%5226 = torch.aten.quantize_per_tensor %117, %5224, %5225, %int12_954 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5227 = torch.aten.int_repr %5226 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5228 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5230 = torch.aten.item %5228 : !torch.vtensor<[],f32> -> !torch.float | |
%5231 = torch.aten.item %5229 : !torch.vtensor<[],si8> -> !torch.int | |
%5232 = torch.aten._make_per_tensor_quantized_tensor %5227, %5230, %5231 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5233 = torch.aten.dequantize.self %5232 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_955 = torch.constant.int 1 | |
%int1_956 = torch.constant.int 1 | |
%int1_957 = torch.constant.int 1 | |
%int1_958 = torch.constant.int 1 | |
%int1_959 = torch.constant.int 1 | |
%int1_960 = torch.constant.int 1 | |
%int0_961 = torch.constant.int 0 | |
%5234 = torch.prim.ListConstruct %int1_955, %int1_956 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5235 = torch.prim.ListConstruct %int1_957, %int1_958 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5236 = torch.prim.ListConstruct %int1_959, %int1_960 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5237 = torch.prim.ListConstruct %int0_961, %int0_961 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_962 = torch.constant.bool false | |
%int1_963 = torch.constant.int 1 | |
%5238 = torch.aten.convolution %5209, %5221, %5233, %5236, %5234, %5235, %false_962, %5237, %int1_963 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_964 = torch.constant.float 0.1015625 | |
%5239 = torch.aten.leaky_relu %5238, %float1.015630e-01_964 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5240 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5241 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_965 = torch.constant.int 12 | |
%5242 = torch.aten.item %5240 : !torch.vtensor<[],f32> -> !torch.float | |
%5243 = torch.aten.item %5241 : !torch.vtensor<[],si8> -> !torch.int | |
%5244 = torch.aten.quantize_per_tensor %5239, %5242, %5243, %int12_965 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5245 = torch.aten.int_repr %5244 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5246 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5247 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5248 = torch.aten.item %5246 : !torch.vtensor<[],f32> -> !torch.float | |
%5249 = torch.aten.item %5247 : !torch.vtensor<[],si8> -> !torch.int | |
%5250 = torch.aten._make_per_tensor_quantized_tensor %5245, %5248, %5249 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5251 = torch.aten.dequantize.self %5250 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5252 = torch.prim.ListConstruct %5097, %5139, %5195, %5251 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_966 = torch.constant.int 1 | |
%5253 = torch.aten.cat %5252, %int1_966 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%5254 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_967 = torch.constant.int 12 | |
%5256 = torch.aten.item %5254 : !torch.vtensor<[],f32> -> !torch.float | |
%5257 = torch.aten.item %5255 : !torch.vtensor<[],si8> -> !torch.int | |
%5258 = torch.aten.quantize_per_tensor %5253, %5256, %5257, %int12_967 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5259 = torch.aten.int_repr %5258 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%5260 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5262 = torch.aten.item %5260 : !torch.vtensor<[],f32> -> !torch.float | |
%5263 = torch.aten.item %5261 : !torch.vtensor<[],si8> -> !torch.int | |
%5264 = torch.aten._make_per_tensor_quantized_tensor %5259, %5262, %5263 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5265 = torch.aten.dequantize.self %5264 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%5266 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_968 = torch.constant.int 12 | |
%5268 = torch.aten.item %5266 : !torch.vtensor<[],f32> -> !torch.float | |
%5269 = torch.aten.item %5267 : !torch.vtensor<[],si8> -> !torch.int | |
%5270 = torch.aten.quantize_per_tensor %118, %5268, %5269, %int12_968 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5271 = torch.aten.int_repr %5270 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%5272 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5273 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5274 = torch.aten.item %5272 : !torch.vtensor<[],f32> -> !torch.float | |
%5275 = torch.aten.item %5273 : !torch.vtensor<[],si8> -> !torch.int | |
%5276 = torch.aten._make_per_tensor_quantized_tensor %5271, %5274, %5275 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5277 = torch.aten.dequantize.self %5276 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%5278 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5279 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_969 = torch.constant.int 12 | |
%5280 = torch.aten.item %5278 : !torch.vtensor<[],f32> -> !torch.float | |
%5281 = torch.aten.item %5279 : !torch.vtensor<[],si8> -> !torch.int | |
%5282 = torch.aten.quantize_per_tensor %119, %5280, %5281, %int12_969 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5283 = torch.aten.int_repr %5282 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5284 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5285 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5286 = torch.aten.item %5284 : !torch.vtensor<[],f32> -> !torch.float | |
%5287 = torch.aten.item %5285 : !torch.vtensor<[],si8> -> !torch.int | |
%5288 = torch.aten._make_per_tensor_quantized_tensor %5283, %5286, %5287 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5289 = torch.aten.dequantize.self %5288 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_970 = torch.constant.int 1 | |
%int1_971 = torch.constant.int 1 | |
%int1_972 = torch.constant.int 1 | |
%int1_973 = torch.constant.int 1 | |
%int1_974 = torch.constant.int 1 | |
%int1_975 = torch.constant.int 1 | |
%int0_976 = torch.constant.int 0 | |
%5290 = torch.prim.ListConstruct %int1_970, %int1_971 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5291 = torch.prim.ListConstruct %int1_972, %int1_973 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5292 = torch.prim.ListConstruct %int1_974, %int1_975 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5293 = torch.prim.ListConstruct %int0_976, %int0_976 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_977 = torch.constant.bool false | |
%int1_978 = torch.constant.int 1 | |
%5294 = torch.aten.convolution %5265, %5277, %5289, %5292, %5290, %5291, %false_977, %5293, %int1_978 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_979 = torch.constant.float 0.1015625 | |
%5295 = torch.aten.leaky_relu %5294, %float1.015630e-01_979 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5296 = torch.prim.ListConstruct %5097, %5139, %5195, %5251, %5295 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_980 = torch.constant.int 1 | |
%5297 = torch.aten.cat %5296, %int1_980 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%5298 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5299 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_981 = torch.constant.int 12 | |
%5300 = torch.aten.item %5298 : !torch.vtensor<[],f32> -> !torch.float | |
%5301 = torch.aten.item %5299 : !torch.vtensor<[],si8> -> !torch.int | |
%5302 = torch.aten.quantize_per_tensor %5297, %5300, %5301, %int12_981 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5303 = torch.aten.int_repr %5302 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%5304 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5305 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5306 = torch.aten.item %5304 : !torch.vtensor<[],f32> -> !torch.float | |
%5307 = torch.aten.item %5305 : !torch.vtensor<[],si8> -> !torch.int | |
%5308 = torch.aten._make_per_tensor_quantized_tensor %5303, %5306, %5307 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5309 = torch.aten.dequantize.self %5308 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%5310 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5311 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_982 = torch.constant.int 12 | |
%5312 = torch.aten.item %5310 : !torch.vtensor<[],f32> -> !torch.float | |
%5313 = torch.aten.item %5311 : !torch.vtensor<[],si8> -> !torch.int | |
%5314 = torch.aten.quantize_per_tensor %120, %5312, %5313, %int12_982 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5315 = torch.aten.int_repr %5314 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%5316 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5317 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5318 = torch.aten.item %5316 : !torch.vtensor<[],f32> -> !torch.float | |
%5319 = torch.aten.item %5317 : !torch.vtensor<[],si8> -> !torch.int | |
%5320 = torch.aten._make_per_tensor_quantized_tensor %5315, %5318, %5319 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5321 = torch.aten.dequantize.self %5320 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%5322 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_983 = torch.constant.int 12 | |
%5324 = torch.aten.item %5322 : !torch.vtensor<[],f32> -> !torch.float | |
%5325 = torch.aten.item %5323 : !torch.vtensor<[],si8> -> !torch.int | |
%5326 = torch.aten.quantize_per_tensor %121, %5324, %5325, %int12_983 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5327 = torch.aten.int_repr %5326 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%5328 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5329 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5330 = torch.aten.item %5328 : !torch.vtensor<[],f32> -> !torch.float | |
%5331 = torch.aten.item %5329 : !torch.vtensor<[],si8> -> !torch.int | |
%5332 = torch.aten._make_per_tensor_quantized_tensor %5327, %5330, %5331 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5333 = torch.aten.dequantize.self %5332 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_984 = torch.constant.int 1 | |
%int1_985 = torch.constant.int 1 | |
%int1_986 = torch.constant.int 1 | |
%int1_987 = torch.constant.int 1 | |
%int1_988 = torch.constant.int 1 | |
%int1_989 = torch.constant.int 1 | |
%int0_990 = torch.constant.int 0 | |
%5334 = torch.prim.ListConstruct %int1_984, %int1_985 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5335 = torch.prim.ListConstruct %int1_986, %int1_987 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5336 = torch.prim.ListConstruct %int1_988, %int1_989 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5337 = torch.prim.ListConstruct %int0_990, %int0_990 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_991 = torch.constant.bool false | |
%int1_992 = torch.constant.int 1 | |
%5338 = torch.aten.convolution %5309, %5321, %5333, %5336, %5334, %5335, %false_991, %5337, %int1_992 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5339 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_993 = torch.constant.int 12 | |
%5341 = torch.aten.item %5339 : !torch.vtensor<[],f32> -> !torch.float | |
%5342 = torch.aten.item %5340 : !torch.vtensor<[],si8> -> !torch.int | |
%5343 = torch.aten.quantize_per_tensor %5338, %5341, %5342, %int12_993 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5344 = torch.aten.int_repr %5343 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5345 = torch.vtensor.literal(dense<4.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5347 = torch.aten.item %5345 : !torch.vtensor<[],f32> -> !torch.float | |
%5348 = torch.aten.item %5346 : !torch.vtensor<[],si8> -> !torch.int | |
%5349 = torch.aten._make_per_tensor_quantized_tensor %5344, %5347, %5348 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5350 = torch.aten.dequantize.self %5349 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5351 = torch.aten.mul.Tensor %5350, %896 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%5352 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5353 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_994 = torch.constant.int 12 | |
%5354 = torch.aten.item %5352 : !torch.vtensor<[],f32> -> !torch.float | |
%5355 = torch.aten.item %5353 : !torch.vtensor<[],si8> -> !torch.int | |
%5356 = torch.aten.quantize_per_tensor %5351, %5354, %5355, %int12_994 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5357 = torch.aten.int_repr %5356 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5358 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5359 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5360 = torch.aten.item %5358 : !torch.vtensor<[],f32> -> !torch.float | |
%5361 = torch.aten.item %5359 : !torch.vtensor<[],si8> -> !torch.int | |
%5362 = torch.aten._make_per_tensor_quantized_tensor %5357, %5360, %5361 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5363 = torch.aten.dequantize.self %5362 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_995 = torch.constant.int 1 | |
%5364 = torch.aten.add.Tensor %5363, %5097, %int1_995 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5365 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_996 = torch.constant.int 12 | |
%5367 = torch.aten.item %5365 : !torch.vtensor<[],f32> -> !torch.float | |
%5368 = torch.aten.item %5366 : !torch.vtensor<[],si8> -> !torch.int | |
%5369 = torch.aten.quantize_per_tensor %5364, %5367, %5368, %int12_996 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5370 = torch.aten.int_repr %5369 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5371 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5372 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5373 = torch.aten.item %5371 : !torch.vtensor<[],f32> -> !torch.float | |
%5374 = torch.aten.item %5372 : !torch.vtensor<[],si8> -> !torch.int | |
%5375 = torch.aten._make_per_tensor_quantized_tensor %5370, %5373, %5374 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5376 = torch.aten.dequantize.self %5375 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5377 = torch.aten.mul.Tensor %5376, %909 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%5378 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_997 = torch.constant.int 12 | |
%5380 = torch.aten.item %5378 : !torch.vtensor<[],f32> -> !torch.float | |
%5381 = torch.aten.item %5379 : !torch.vtensor<[],si8> -> !torch.int | |
%5382 = torch.aten.quantize_per_tensor %5377, %5380, %5381, %int12_997 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5383 = torch.aten.int_repr %5382 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5384 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5385 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5386 = torch.aten.item %5384 : !torch.vtensor<[],f32> -> !torch.float | |
%5387 = torch.aten.item %5385 : !torch.vtensor<[],si8> -> !torch.int | |
%5388 = torch.aten._make_per_tensor_quantized_tensor %5383, %5386, %5387 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5389 = torch.aten.dequantize.self %5388 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_998 = torch.constant.int 1 | |
%5390 = torch.aten.add.Tensor %5389, %4539, %int1_998 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5391 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5392 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_999 = torch.constant.int 12 | |
%5393 = torch.aten.item %5391 : !torch.vtensor<[],f32> -> !torch.float | |
%5394 = torch.aten.item %5392 : !torch.vtensor<[],si8> -> !torch.int | |
%5395 = torch.aten.quantize_per_tensor %5390, %5393, %5394, %int12_999 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5396 = torch.aten.int_repr %5395 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5397 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5398 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5399 = torch.aten.item %5397 : !torch.vtensor<[],f32> -> !torch.float | |
%5400 = torch.aten.item %5398 : !torch.vtensor<[],si8> -> !torch.int | |
%5401 = torch.aten._make_per_tensor_quantized_tensor %5396, %5399, %5400 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5402 = torch.aten.dequantize.self %5401 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5403 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5404 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1000 = torch.constant.int 12 | |
%5405 = torch.aten.item %5403 : !torch.vtensor<[],f32> -> !torch.float | |
%5406 = torch.aten.item %5404 : !torch.vtensor<[],si8> -> !torch.int | |
%5407 = torch.aten.quantize_per_tensor %122, %5405, %5406, %int12_1000 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5408 = torch.aten.int_repr %5407 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%5409 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5410 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5411 = torch.aten.item %5409 : !torch.vtensor<[],f32> -> !torch.float | |
%5412 = torch.aten.item %5410 : !torch.vtensor<[],si8> -> !torch.int | |
%5413 = torch.aten._make_per_tensor_quantized_tensor %5408, %5411, %5412 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5414 = torch.aten.dequantize.self %5413 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%5415 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5416 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1001 = torch.constant.int 12 | |
%5417 = torch.aten.item %5415 : !torch.vtensor<[],f32> -> !torch.float | |
%5418 = torch.aten.item %5416 : !torch.vtensor<[],si8> -> !torch.int | |
%5419 = torch.aten.quantize_per_tensor %123, %5417, %5418, %int12_1001 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5420 = torch.aten.int_repr %5419 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5421 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5422 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5423 = torch.aten.item %5421 : !torch.vtensor<[],f32> -> !torch.float | |
%5424 = torch.aten.item %5422 : !torch.vtensor<[],si8> -> !torch.int | |
%5425 = torch.aten._make_per_tensor_quantized_tensor %5420, %5423, %5424 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5426 = torch.aten.dequantize.self %5425 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1002 = torch.constant.int 1 | |
%int1_1003 = torch.constant.int 1 | |
%int1_1004 = torch.constant.int 1 | |
%int1_1005 = torch.constant.int 1 | |
%int1_1006 = torch.constant.int 1 | |
%int1_1007 = torch.constant.int 1 | |
%int0_1008 = torch.constant.int 0 | |
%5427 = torch.prim.ListConstruct %int1_1002, %int1_1003 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5428 = torch.prim.ListConstruct %int1_1004, %int1_1005 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5429 = torch.prim.ListConstruct %int1_1006, %int1_1007 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5430 = torch.prim.ListConstruct %int0_1008, %int0_1008 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1009 = torch.constant.bool false | |
%int1_1010 = torch.constant.int 1 | |
%5431 = torch.aten.convolution %5402, %5414, %5426, %5429, %5427, %5428, %false_1009, %5430, %int1_1010 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1011 = torch.constant.float 0.1015625 | |
%5432 = torch.aten.leaky_relu %5431, %float1.015630e-01_1011 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5433 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5434 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1012 = torch.constant.int 12 | |
%5435 = torch.aten.item %5433 : !torch.vtensor<[],f32> -> !torch.float | |
%5436 = torch.aten.item %5434 : !torch.vtensor<[],si8> -> !torch.int | |
%5437 = torch.aten.quantize_per_tensor %5432, %5435, %5436, %int12_1012 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5438 = torch.aten.int_repr %5437 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5439 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5440 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5441 = torch.aten.item %5439 : !torch.vtensor<[],f32> -> !torch.float | |
%5442 = torch.aten.item %5440 : !torch.vtensor<[],si8> -> !torch.int | |
%5443 = torch.aten._make_per_tensor_quantized_tensor %5438, %5441, %5442 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5444 = torch.aten.dequantize.self %5443 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5445 = torch.prim.ListConstruct %5402, %5444 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1013 = torch.constant.int 1 | |
%5446 = torch.aten.cat %5445, %int1_1013 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%5447 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5448 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1014 = torch.constant.int 12 | |
%5449 = torch.aten.item %5447 : !torch.vtensor<[],f32> -> !torch.float | |
%5450 = torch.aten.item %5448 : !torch.vtensor<[],si8> -> !torch.int | |
%5451 = torch.aten.quantize_per_tensor %5446, %5449, %5450, %int12_1014 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5452 = torch.aten.int_repr %5451 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%5453 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5454 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5455 = torch.aten.item %5453 : !torch.vtensor<[],f32> -> !torch.float | |
%5456 = torch.aten.item %5454 : !torch.vtensor<[],si8> -> !torch.int | |
%5457 = torch.aten._make_per_tensor_quantized_tensor %5452, %5455, %5456 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5458 = torch.aten.dequantize.self %5457 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%5459 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5460 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1015 = torch.constant.int 12 | |
%5461 = torch.aten.item %5459 : !torch.vtensor<[],f32> -> !torch.float | |
%5462 = torch.aten.item %5460 : !torch.vtensor<[],si8> -> !torch.int | |
%5463 = torch.aten.quantize_per_tensor %124, %5461, %5462, %int12_1015 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5464 = torch.aten.int_repr %5463 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%5465 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5466 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5467 = torch.aten.item %5465 : !torch.vtensor<[],f32> -> !torch.float | |
%5468 = torch.aten.item %5466 : !torch.vtensor<[],si8> -> !torch.int | |
%5469 = torch.aten._make_per_tensor_quantized_tensor %5464, %5467, %5468 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5470 = torch.aten.dequantize.self %5469 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%5471 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5472 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1016 = torch.constant.int 12 | |
%5473 = torch.aten.item %5471 : !torch.vtensor<[],f32> -> !torch.float | |
%5474 = torch.aten.item %5472 : !torch.vtensor<[],si8> -> !torch.int | |
%5475 = torch.aten.quantize_per_tensor %125, %5473, %5474, %int12_1016 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5476 = torch.aten.int_repr %5475 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5477 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5478 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5479 = torch.aten.item %5477 : !torch.vtensor<[],f32> -> !torch.float | |
%5480 = torch.aten.item %5478 : !torch.vtensor<[],si8> -> !torch.int | |
%5481 = torch.aten._make_per_tensor_quantized_tensor %5476, %5479, %5480 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5482 = torch.aten.dequantize.self %5481 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1017 = torch.constant.int 1 | |
%int1_1018 = torch.constant.int 1 | |
%int1_1019 = torch.constant.int 1 | |
%int1_1020 = torch.constant.int 1 | |
%int1_1021 = torch.constant.int 1 | |
%int1_1022 = torch.constant.int 1 | |
%int0_1023 = torch.constant.int 0 | |
%5483 = torch.prim.ListConstruct %int1_1017, %int1_1018 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5484 = torch.prim.ListConstruct %int1_1019, %int1_1020 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5485 = torch.prim.ListConstruct %int1_1021, %int1_1022 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5486 = torch.prim.ListConstruct %int0_1023, %int0_1023 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1024 = torch.constant.bool false | |
%int1_1025 = torch.constant.int 1 | |
%5487 = torch.aten.convolution %5458, %5470, %5482, %5485, %5483, %5484, %false_1024, %5486, %int1_1025 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1026 = torch.constant.float 0.1015625 | |
%5488 = torch.aten.leaky_relu %5487, %float1.015630e-01_1026 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5489 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5490 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1027 = torch.constant.int 12 | |
%5491 = torch.aten.item %5489 : !torch.vtensor<[],f32> -> !torch.float | |
%5492 = torch.aten.item %5490 : !torch.vtensor<[],si8> -> !torch.int | |
%5493 = torch.aten.quantize_per_tensor %5488, %5491, %5492, %int12_1027 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5494 = torch.aten.int_repr %5493 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5495 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5496 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5497 = torch.aten.item %5495 : !torch.vtensor<[],f32> -> !torch.float | |
%5498 = torch.aten.item %5496 : !torch.vtensor<[],si8> -> !torch.int | |
%5499 = torch.aten._make_per_tensor_quantized_tensor %5494, %5497, %5498 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5500 = torch.aten.dequantize.self %5499 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5501 = torch.prim.ListConstruct %5402, %5444, %5500 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1028 = torch.constant.int 1 | |
%5502 = torch.aten.cat %5501, %int1_1028 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%5503 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5504 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1029 = torch.constant.int 12 | |
%5505 = torch.aten.item %5503 : !torch.vtensor<[],f32> -> !torch.float | |
%5506 = torch.aten.item %5504 : !torch.vtensor<[],si8> -> !torch.int | |
%5507 = torch.aten.quantize_per_tensor %5502, %5505, %5506, %int12_1029 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5508 = torch.aten.int_repr %5507 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%5509 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5510 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5511 = torch.aten.item %5509 : !torch.vtensor<[],f32> -> !torch.float | |
%5512 = torch.aten.item %5510 : !torch.vtensor<[],si8> -> !torch.int | |
%5513 = torch.aten._make_per_tensor_quantized_tensor %5508, %5511, %5512 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5514 = torch.aten.dequantize.self %5513 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%5515 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1030 = torch.constant.int 12 | |
%5517 = torch.aten.item %5515 : !torch.vtensor<[],f32> -> !torch.float | |
%5518 = torch.aten.item %5516 : !torch.vtensor<[],si8> -> !torch.int | |
%5519 = torch.aten.quantize_per_tensor %126, %5517, %5518, %int12_1030 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5520 = torch.aten.int_repr %5519 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%5521 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5522 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5523 = torch.aten.item %5521 : !torch.vtensor<[],f32> -> !torch.float | |
%5524 = torch.aten.item %5522 : !torch.vtensor<[],si8> -> !torch.int | |
%5525 = torch.aten._make_per_tensor_quantized_tensor %5520, %5523, %5524 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5526 = torch.aten.dequantize.self %5525 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%5527 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5528 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1031 = torch.constant.int 12 | |
%5529 = torch.aten.item %5527 : !torch.vtensor<[],f32> -> !torch.float | |
%5530 = torch.aten.item %5528 : !torch.vtensor<[],si8> -> !torch.int | |
%5531 = torch.aten.quantize_per_tensor %127, %5529, %5530, %int12_1031 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5532 = torch.aten.int_repr %5531 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5533 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5534 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5535 = torch.aten.item %5533 : !torch.vtensor<[],f32> -> !torch.float | |
%5536 = torch.aten.item %5534 : !torch.vtensor<[],si8> -> !torch.int | |
%5537 = torch.aten._make_per_tensor_quantized_tensor %5532, %5535, %5536 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5538 = torch.aten.dequantize.self %5537 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1032 = torch.constant.int 1 | |
%int1_1033 = torch.constant.int 1 | |
%int1_1034 = torch.constant.int 1 | |
%int1_1035 = torch.constant.int 1 | |
%int1_1036 = torch.constant.int 1 | |
%int1_1037 = torch.constant.int 1 | |
%int0_1038 = torch.constant.int 0 | |
%5539 = torch.prim.ListConstruct %int1_1032, %int1_1033 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5540 = torch.prim.ListConstruct %int1_1034, %int1_1035 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5541 = torch.prim.ListConstruct %int1_1036, %int1_1037 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5542 = torch.prim.ListConstruct %int0_1038, %int0_1038 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1039 = torch.constant.bool false | |
%int1_1040 = torch.constant.int 1 | |
%5543 = torch.aten.convolution %5514, %5526, %5538, %5541, %5539, %5540, %false_1039, %5542, %int1_1040 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1041 = torch.constant.float 0.1015625 | |
%5544 = torch.aten.leaky_relu %5543, %float1.015630e-01_1041 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5545 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5546 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1042 = torch.constant.int 12 | |
%5547 = torch.aten.item %5545 : !torch.vtensor<[],f32> -> !torch.float | |
%5548 = torch.aten.item %5546 : !torch.vtensor<[],si8> -> !torch.int | |
%5549 = torch.aten.quantize_per_tensor %5544, %5547, %5548, %int12_1042 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5550 = torch.aten.int_repr %5549 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5551 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5552 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5553 = torch.aten.item %5551 : !torch.vtensor<[],f32> -> !torch.float | |
%5554 = torch.aten.item %5552 : !torch.vtensor<[],si8> -> !torch.int | |
%5555 = torch.aten._make_per_tensor_quantized_tensor %5550, %5553, %5554 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5556 = torch.aten.dequantize.self %5555 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5557 = torch.prim.ListConstruct %5402, %5444, %5500, %5556 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1043 = torch.constant.int 1 | |
%5558 = torch.aten.cat %5557, %int1_1043 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%5559 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5560 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1044 = torch.constant.int 12 | |
%5561 = torch.aten.item %5559 : !torch.vtensor<[],f32> -> !torch.float | |
%5562 = torch.aten.item %5560 : !torch.vtensor<[],si8> -> !torch.int | |
%5563 = torch.aten.quantize_per_tensor %5558, %5561, %5562, %int12_1044 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5564 = torch.aten.int_repr %5563 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%5565 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5566 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5567 = torch.aten.item %5565 : !torch.vtensor<[],f32> -> !torch.float | |
%5568 = torch.aten.item %5566 : !torch.vtensor<[],si8> -> !torch.int | |
%5569 = torch.aten._make_per_tensor_quantized_tensor %5564, %5567, %5568 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5570 = torch.aten.dequantize.self %5569 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%5571 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5572 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1045 = torch.constant.int 12 | |
%5573 = torch.aten.item %5571 : !torch.vtensor<[],f32> -> !torch.float | |
%5574 = torch.aten.item %5572 : !torch.vtensor<[],si8> -> !torch.int | |
%5575 = torch.aten.quantize_per_tensor %128, %5573, %5574, %int12_1045 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5576 = torch.aten.int_repr %5575 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%5577 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5578 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5579 = torch.aten.item %5577 : !torch.vtensor<[],f32> -> !torch.float | |
%5580 = torch.aten.item %5578 : !torch.vtensor<[],si8> -> !torch.int | |
%5581 = torch.aten._make_per_tensor_quantized_tensor %5576, %5579, %5580 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5582 = torch.aten.dequantize.self %5581 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%5583 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5584 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1046 = torch.constant.int 12 | |
%5585 = torch.aten.item %5583 : !torch.vtensor<[],f32> -> !torch.float | |
%5586 = torch.aten.item %5584 : !torch.vtensor<[],si8> -> !torch.int | |
%5587 = torch.aten.quantize_per_tensor %129, %5585, %5586, %int12_1046 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5588 = torch.aten.int_repr %5587 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5589 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5590 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5591 = torch.aten.item %5589 : !torch.vtensor<[],f32> -> !torch.float | |
%5592 = torch.aten.item %5590 : !torch.vtensor<[],si8> -> !torch.int | |
%5593 = torch.aten._make_per_tensor_quantized_tensor %5588, %5591, %5592 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5594 = torch.aten.dequantize.self %5593 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1047 = torch.constant.int 1 | |
%int1_1048 = torch.constant.int 1 | |
%int1_1049 = torch.constant.int 1 | |
%int1_1050 = torch.constant.int 1 | |
%int1_1051 = torch.constant.int 1 | |
%int1_1052 = torch.constant.int 1 | |
%int0_1053 = torch.constant.int 0 | |
%5595 = torch.prim.ListConstruct %int1_1047, %int1_1048 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5596 = torch.prim.ListConstruct %int1_1049, %int1_1050 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5597 = torch.prim.ListConstruct %int1_1051, %int1_1052 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5598 = torch.prim.ListConstruct %int0_1053, %int0_1053 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1054 = torch.constant.bool false | |
%int1_1055 = torch.constant.int 1 | |
%5599 = torch.aten.convolution %5570, %5582, %5594, %5597, %5595, %5596, %false_1054, %5598, %int1_1055 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1056 = torch.constant.float 0.1015625 | |
%5600 = torch.aten.leaky_relu %5599, %float1.015630e-01_1056 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5601 = torch.prim.ListConstruct %5402, %5444, %5500, %5556, %5600 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1057 = torch.constant.int 1 | |
%5602 = torch.aten.cat %5601, %int1_1057 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%5603 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5604 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1058 = torch.constant.int 12 | |
%5605 = torch.aten.item %5603 : !torch.vtensor<[],f32> -> !torch.float | |
%5606 = torch.aten.item %5604 : !torch.vtensor<[],si8> -> !torch.int | |
%5607 = torch.aten.quantize_per_tensor %5602, %5605, %5606, %int12_1058 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5608 = torch.aten.int_repr %5607 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%5609 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5610 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5611 = torch.aten.item %5609 : !torch.vtensor<[],f32> -> !torch.float | |
%5612 = torch.aten.item %5610 : !torch.vtensor<[],si8> -> !torch.int | |
%5613 = torch.aten._make_per_tensor_quantized_tensor %5608, %5611, %5612 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5614 = torch.aten.dequantize.self %5613 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%5615 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5616 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1059 = torch.constant.int 12 | |
%5617 = torch.aten.item %5615 : !torch.vtensor<[],f32> -> !torch.float | |
%5618 = torch.aten.item %5616 : !torch.vtensor<[],si8> -> !torch.int | |
%5619 = torch.aten.quantize_per_tensor %130, %5617, %5618, %int12_1059 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5620 = torch.aten.int_repr %5619 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%5621 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5622 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5623 = torch.aten.item %5621 : !torch.vtensor<[],f32> -> !torch.float | |
%5624 = torch.aten.item %5622 : !torch.vtensor<[],si8> -> !torch.int | |
%5625 = torch.aten._make_per_tensor_quantized_tensor %5620, %5623, %5624 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5626 = torch.aten.dequantize.self %5625 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%5627 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5628 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1060 = torch.constant.int 12 | |
%5629 = torch.aten.item %5627 : !torch.vtensor<[],f32> -> !torch.float | |
%5630 = torch.aten.item %5628 : !torch.vtensor<[],si8> -> !torch.int | |
%5631 = torch.aten.quantize_per_tensor %131, %5629, %5630, %int12_1060 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5632 = torch.aten.int_repr %5631 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%5633 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5634 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5635 = torch.aten.item %5633 : !torch.vtensor<[],f32> -> !torch.float | |
%5636 = torch.aten.item %5634 : !torch.vtensor<[],si8> -> !torch.int | |
%5637 = torch.aten._make_per_tensor_quantized_tensor %5632, %5635, %5636 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5638 = torch.aten.dequantize.self %5637 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1061 = torch.constant.int 1 | |
%int1_1062 = torch.constant.int 1 | |
%int1_1063 = torch.constant.int 1 | |
%int1_1064 = torch.constant.int 1 | |
%int1_1065 = torch.constant.int 1 | |
%int1_1066 = torch.constant.int 1 | |
%int0_1067 = torch.constant.int 0 | |
%5639 = torch.prim.ListConstruct %int1_1061, %int1_1062 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5640 = torch.prim.ListConstruct %int1_1063, %int1_1064 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5641 = torch.prim.ListConstruct %int1_1065, %int1_1066 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5642 = torch.prim.ListConstruct %int0_1067, %int0_1067 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1068 = torch.constant.bool false | |
%int1_1069 = torch.constant.int 1 | |
%5643 = torch.aten.convolution %5614, %5626, %5638, %5641, %5639, %5640, %false_1068, %5642, %int1_1069 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5644 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5645 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1070 = torch.constant.int 12 | |
%5646 = torch.aten.item %5644 : !torch.vtensor<[],f32> -> !torch.float | |
%5647 = torch.aten.item %5645 : !torch.vtensor<[],si8> -> !torch.int | |
%5648 = torch.aten.quantize_per_tensor %5643, %5646, %5647, %int12_1070 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5649 = torch.aten.int_repr %5648 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5650 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5651 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5652 = torch.aten.item %5650 : !torch.vtensor<[],f32> -> !torch.float | |
%5653 = torch.aten.item %5651 : !torch.vtensor<[],si8> -> !torch.int | |
%5654 = torch.aten._make_per_tensor_quantized_tensor %5649, %5652, %5653 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5655 = torch.aten.dequantize.self %5654 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5656 = torch.aten.mul.Tensor %5655, %922 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%5657 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1071 = torch.constant.int 12 | |
%5659 = torch.aten.item %5657 : !torch.vtensor<[],f32> -> !torch.float | |
%5660 = torch.aten.item %5658 : !torch.vtensor<[],si8> -> !torch.int | |
%5661 = torch.aten.quantize_per_tensor %5656, %5659, %5660, %int12_1071 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5662 = torch.aten.int_repr %5661 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5663 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5664 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5665 = torch.aten.item %5663 : !torch.vtensor<[],f32> -> !torch.float | |
%5666 = torch.aten.item %5664 : !torch.vtensor<[],si8> -> !torch.int | |
%5667 = torch.aten._make_per_tensor_quantized_tensor %5662, %5665, %5666 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5668 = torch.aten.dequantize.self %5667 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1072 = torch.constant.int 1 | |
%5669 = torch.aten.add.Tensor %5668, %5402, %int1_1072 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5670 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5671 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1073 = torch.constant.int 12 | |
%5672 = torch.aten.item %5670 : !torch.vtensor<[],f32> -> !torch.float | |
%5673 = torch.aten.item %5671 : !torch.vtensor<[],si8> -> !torch.int | |
%5674 = torch.aten.quantize_per_tensor %5669, %5672, %5673, %int12_1073 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5675 = torch.aten.int_repr %5674 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5676 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5677 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5678 = torch.aten.item %5676 : !torch.vtensor<[],f32> -> !torch.float | |
%5679 = torch.aten.item %5677 : !torch.vtensor<[],si8> -> !torch.int | |
%5680 = torch.aten._make_per_tensor_quantized_tensor %5675, %5678, %5679 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5681 = torch.aten.dequantize.self %5680 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5682 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5683 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1074 = torch.constant.int 12 | |
%5684 = torch.aten.item %5682 : !torch.vtensor<[],f32> -> !torch.float | |
%5685 = torch.aten.item %5683 : !torch.vtensor<[],si8> -> !torch.int | |
%5686 = torch.aten.quantize_per_tensor %132, %5684, %5685, %int12_1074 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5687 = torch.aten.int_repr %5686 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%5688 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5689 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5690 = torch.aten.item %5688 : !torch.vtensor<[],f32> -> !torch.float | |
%5691 = torch.aten.item %5689 : !torch.vtensor<[],si8> -> !torch.int | |
%5692 = torch.aten._make_per_tensor_quantized_tensor %5687, %5690, %5691 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5693 = torch.aten.dequantize.self %5692 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%5694 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5695 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1075 = torch.constant.int 12 | |
%5696 = torch.aten.item %5694 : !torch.vtensor<[],f32> -> !torch.float | |
%5697 = torch.aten.item %5695 : !torch.vtensor<[],si8> -> !torch.int | |
%5698 = torch.aten.quantize_per_tensor %133, %5696, %5697, %int12_1075 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5699 = torch.aten.int_repr %5698 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5700 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5701 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5702 = torch.aten.item %5700 : !torch.vtensor<[],f32> -> !torch.float | |
%5703 = torch.aten.item %5701 : !torch.vtensor<[],si8> -> !torch.int | |
%5704 = torch.aten._make_per_tensor_quantized_tensor %5699, %5702, %5703 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5705 = torch.aten.dequantize.self %5704 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1076 = torch.constant.int 1 | |
%int1_1077 = torch.constant.int 1 | |
%int1_1078 = torch.constant.int 1 | |
%int1_1079 = torch.constant.int 1 | |
%int1_1080 = torch.constant.int 1 | |
%int1_1081 = torch.constant.int 1 | |
%int0_1082 = torch.constant.int 0 | |
%5706 = torch.prim.ListConstruct %int1_1076, %int1_1077 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5707 = torch.prim.ListConstruct %int1_1078, %int1_1079 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5708 = torch.prim.ListConstruct %int1_1080, %int1_1081 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5709 = torch.prim.ListConstruct %int0_1082, %int0_1082 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1083 = torch.constant.bool false | |
%int1_1084 = torch.constant.int 1 | |
%5710 = torch.aten.convolution %5681, %5693, %5705, %5708, %5706, %5707, %false_1083, %5709, %int1_1084 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1085 = torch.constant.float 0.1015625 | |
%5711 = torch.aten.leaky_relu %5710, %float1.015630e-01_1085 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5712 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5713 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1086 = torch.constant.int 12 | |
%5714 = torch.aten.item %5712 : !torch.vtensor<[],f32> -> !torch.float | |
%5715 = torch.aten.item %5713 : !torch.vtensor<[],si8> -> !torch.int | |
%5716 = torch.aten.quantize_per_tensor %5711, %5714, %5715, %int12_1086 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5717 = torch.aten.int_repr %5716 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5718 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5719 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5720 = torch.aten.item %5718 : !torch.vtensor<[],f32> -> !torch.float | |
%5721 = torch.aten.item %5719 : !torch.vtensor<[],si8> -> !torch.int | |
%5722 = torch.aten._make_per_tensor_quantized_tensor %5717, %5720, %5721 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5723 = torch.aten.dequantize.self %5722 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5724 = torch.prim.ListConstruct %5681, %5723 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1087 = torch.constant.int 1 | |
%5725 = torch.aten.cat %5724, %int1_1087 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%5726 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5727 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1088 = torch.constant.int 12 | |
%5728 = torch.aten.item %5726 : !torch.vtensor<[],f32> -> !torch.float | |
%5729 = torch.aten.item %5727 : !torch.vtensor<[],si8> -> !torch.int | |
%5730 = torch.aten.quantize_per_tensor %5725, %5728, %5729, %int12_1088 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5731 = torch.aten.int_repr %5730 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%5732 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5733 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5734 = torch.aten.item %5732 : !torch.vtensor<[],f32> -> !torch.float | |
%5735 = torch.aten.item %5733 : !torch.vtensor<[],si8> -> !torch.int | |
%5736 = torch.aten._make_per_tensor_quantized_tensor %5731, %5734, %5735 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%5737 = torch.aten.dequantize.self %5736 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%5738 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5739 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1089 = torch.constant.int 12 | |
%5740 = torch.aten.item %5738 : !torch.vtensor<[],f32> -> !torch.float | |
%5741 = torch.aten.item %5739 : !torch.vtensor<[],si8> -> !torch.int | |
%5742 = torch.aten.quantize_per_tensor %134, %5740, %5741, %int12_1089 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5743 = torch.aten.int_repr %5742 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%5744 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5745 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5746 = torch.aten.item %5744 : !torch.vtensor<[],f32> -> !torch.float | |
%5747 = torch.aten.item %5745 : !torch.vtensor<[],si8> -> !torch.int | |
%5748 = torch.aten._make_per_tensor_quantized_tensor %5743, %5746, %5747 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%5749 = torch.aten.dequantize.self %5748 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%5750 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5751 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1090 = torch.constant.int 12 | |
%5752 = torch.aten.item %5750 : !torch.vtensor<[],f32> -> !torch.float | |
%5753 = torch.aten.item %5751 : !torch.vtensor<[],si8> -> !torch.int | |
%5754 = torch.aten.quantize_per_tensor %135, %5752, %5753, %int12_1090 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5755 = torch.aten.int_repr %5754 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5756 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5757 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5758 = torch.aten.item %5756 : !torch.vtensor<[],f32> -> !torch.float | |
%5759 = torch.aten.item %5757 : !torch.vtensor<[],si8> -> !torch.int | |
%5760 = torch.aten._make_per_tensor_quantized_tensor %5755, %5758, %5759 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5761 = torch.aten.dequantize.self %5760 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1091 = torch.constant.int 1 | |
%int1_1092 = torch.constant.int 1 | |
%int1_1093 = torch.constant.int 1 | |
%int1_1094 = torch.constant.int 1 | |
%int1_1095 = torch.constant.int 1 | |
%int1_1096 = torch.constant.int 1 | |
%int0_1097 = torch.constant.int 0 | |
%5762 = torch.prim.ListConstruct %int1_1091, %int1_1092 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5763 = torch.prim.ListConstruct %int1_1093, %int1_1094 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5764 = torch.prim.ListConstruct %int1_1095, %int1_1096 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5765 = torch.prim.ListConstruct %int0_1097, %int0_1097 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1098 = torch.constant.bool false | |
%int1_1099 = torch.constant.int 1 | |
%5766 = torch.aten.convolution %5737, %5749, %5761, %5764, %5762, %5763, %false_1098, %5765, %int1_1099 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1100 = torch.constant.float 0.1015625 | |
%5767 = torch.aten.leaky_relu %5766, %float1.015630e-01_1100 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5768 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5769 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1101 = torch.constant.int 12 | |
%5770 = torch.aten.item %5768 : !torch.vtensor<[],f32> -> !torch.float | |
%5771 = torch.aten.item %5769 : !torch.vtensor<[],si8> -> !torch.int | |
%5772 = torch.aten.quantize_per_tensor %5767, %5770, %5771, %int12_1101 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5773 = torch.aten.int_repr %5772 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5774 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5775 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5776 = torch.aten.item %5774 : !torch.vtensor<[],f32> -> !torch.float | |
%5777 = torch.aten.item %5775 : !torch.vtensor<[],si8> -> !torch.int | |
%5778 = torch.aten._make_per_tensor_quantized_tensor %5773, %5776, %5777 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5779 = torch.aten.dequantize.self %5778 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5780 = torch.prim.ListConstruct %5681, %5723, %5779 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1102 = torch.constant.int 1 | |
%5781 = torch.aten.cat %5780, %int1_1102 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%5782 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1103 = torch.constant.int 12 | |
%5784 = torch.aten.item %5782 : !torch.vtensor<[],f32> -> !torch.float | |
%5785 = torch.aten.item %5783 : !torch.vtensor<[],si8> -> !torch.int | |
%5786 = torch.aten.quantize_per_tensor %5781, %5784, %5785, %int12_1103 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5787 = torch.aten.int_repr %5786 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%5788 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5790 = torch.aten.item %5788 : !torch.vtensor<[],f32> -> !torch.float | |
%5791 = torch.aten.item %5789 : !torch.vtensor<[],si8> -> !torch.int | |
%5792 = torch.aten._make_per_tensor_quantized_tensor %5787, %5790, %5791 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%5793 = torch.aten.dequantize.self %5792 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%5794 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1104 = torch.constant.int 12 | |
%5796 = torch.aten.item %5794 : !torch.vtensor<[],f32> -> !torch.float | |
%5797 = torch.aten.item %5795 : !torch.vtensor<[],si8> -> !torch.int | |
%5798 = torch.aten.quantize_per_tensor %136, %5796, %5797, %int12_1104 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5799 = torch.aten.int_repr %5798 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%5800 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5801 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5802 = torch.aten.item %5800 : !torch.vtensor<[],f32> -> !torch.float | |
%5803 = torch.aten.item %5801 : !torch.vtensor<[],si8> -> !torch.int | |
%5804 = torch.aten._make_per_tensor_quantized_tensor %5799, %5802, %5803 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%5805 = torch.aten.dequantize.self %5804 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%5806 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5807 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1105 = torch.constant.int 12 | |
%5808 = torch.aten.item %5806 : !torch.vtensor<[],f32> -> !torch.float | |
%5809 = torch.aten.item %5807 : !torch.vtensor<[],si8> -> !torch.int | |
%5810 = torch.aten.quantize_per_tensor %137, %5808, %5809, %int12_1105 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5811 = torch.aten.int_repr %5810 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5812 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5813 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5814 = torch.aten.item %5812 : !torch.vtensor<[],f32> -> !torch.float | |
%5815 = torch.aten.item %5813 : !torch.vtensor<[],si8> -> !torch.int | |
%5816 = torch.aten._make_per_tensor_quantized_tensor %5811, %5814, %5815 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5817 = torch.aten.dequantize.self %5816 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1106 = torch.constant.int 1 | |
%int1_1107 = torch.constant.int 1 | |
%int1_1108 = torch.constant.int 1 | |
%int1_1109 = torch.constant.int 1 | |
%int1_1110 = torch.constant.int 1 | |
%int1_1111 = torch.constant.int 1 | |
%int0_1112 = torch.constant.int 0 | |
%5818 = torch.prim.ListConstruct %int1_1106, %int1_1107 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5819 = torch.prim.ListConstruct %int1_1108, %int1_1109 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5820 = torch.prim.ListConstruct %int1_1110, %int1_1111 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5821 = torch.prim.ListConstruct %int0_1112, %int0_1112 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1113 = torch.constant.bool false | |
%int1_1114 = torch.constant.int 1 | |
%5822 = torch.aten.convolution %5793, %5805, %5817, %5820, %5818, %5819, %false_1113, %5821, %int1_1114 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1115 = torch.constant.float 0.1015625 | |
%5823 = torch.aten.leaky_relu %5822, %float1.015630e-01_1115 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5824 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5825 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1116 = torch.constant.int 12 | |
%5826 = torch.aten.item %5824 : !torch.vtensor<[],f32> -> !torch.float | |
%5827 = torch.aten.item %5825 : !torch.vtensor<[],si8> -> !torch.int | |
%5828 = torch.aten.quantize_per_tensor %5823, %5826, %5827, %int12_1116 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5829 = torch.aten.int_repr %5828 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5830 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5831 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5832 = torch.aten.item %5830 : !torch.vtensor<[],f32> -> !torch.float | |
%5833 = torch.aten.item %5831 : !torch.vtensor<[],si8> -> !torch.int | |
%5834 = torch.aten._make_per_tensor_quantized_tensor %5829, %5832, %5833 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5835 = torch.aten.dequantize.self %5834 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%5836 = torch.prim.ListConstruct %5681, %5723, %5779, %5835 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1117 = torch.constant.int 1 | |
%5837 = torch.aten.cat %5836, %int1_1117 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%5838 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5839 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1118 = torch.constant.int 12 | |
%5840 = torch.aten.item %5838 : !torch.vtensor<[],f32> -> !torch.float | |
%5841 = torch.aten.item %5839 : !torch.vtensor<[],si8> -> !torch.int | |
%5842 = torch.aten.quantize_per_tensor %5837, %5840, %5841, %int12_1118 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5843 = torch.aten.int_repr %5842 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%5844 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5845 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5846 = torch.aten.item %5844 : !torch.vtensor<[],f32> -> !torch.float | |
%5847 = torch.aten.item %5845 : !torch.vtensor<[],si8> -> !torch.int | |
%5848 = torch.aten._make_per_tensor_quantized_tensor %5843, %5846, %5847 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%5849 = torch.aten.dequantize.self %5848 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%5850 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5851 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1119 = torch.constant.int 12 | |
%5852 = torch.aten.item %5850 : !torch.vtensor<[],f32> -> !torch.float | |
%5853 = torch.aten.item %5851 : !torch.vtensor<[],si8> -> !torch.int | |
%5854 = torch.aten.quantize_per_tensor %138, %5852, %5853, %int12_1119 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5855 = torch.aten.int_repr %5854 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%5856 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5857 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5858 = torch.aten.item %5856 : !torch.vtensor<[],f32> -> !torch.float | |
%5859 = torch.aten.item %5857 : !torch.vtensor<[],si8> -> !torch.int | |
%5860 = torch.aten._make_per_tensor_quantized_tensor %5855, %5858, %5859 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%5861 = torch.aten.dequantize.self %5860 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%5862 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5863 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1120 = torch.constant.int 12 | |
%5864 = torch.aten.item %5862 : !torch.vtensor<[],f32> -> !torch.float | |
%5865 = torch.aten.item %5863 : !torch.vtensor<[],si8> -> !torch.int | |
%5866 = torch.aten.quantize_per_tensor %139, %5864, %5865, %int12_1120 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5867 = torch.aten.int_repr %5866 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5868 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5869 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5870 = torch.aten.item %5868 : !torch.vtensor<[],f32> -> !torch.float | |
%5871 = torch.aten.item %5869 : !torch.vtensor<[],si8> -> !torch.int | |
%5872 = torch.aten._make_per_tensor_quantized_tensor %5867, %5870, %5871 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5873 = torch.aten.dequantize.self %5872 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1121 = torch.constant.int 1 | |
%int1_1122 = torch.constant.int 1 | |
%int1_1123 = torch.constant.int 1 | |
%int1_1124 = torch.constant.int 1 | |
%int1_1125 = torch.constant.int 1 | |
%int1_1126 = torch.constant.int 1 | |
%int0_1127 = torch.constant.int 0 | |
%5874 = torch.prim.ListConstruct %int1_1121, %int1_1122 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5875 = torch.prim.ListConstruct %int1_1123, %int1_1124 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5876 = torch.prim.ListConstruct %int1_1125, %int1_1126 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5877 = torch.prim.ListConstruct %int0_1127, %int0_1127 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1128 = torch.constant.bool false | |
%int1_1129 = torch.constant.int 1 | |
%5878 = torch.aten.convolution %5849, %5861, %5873, %5876, %5874, %5875, %false_1128, %5877, %int1_1129 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1130 = torch.constant.float 0.1015625 | |
%5879 = torch.aten.leaky_relu %5878, %float1.015630e-01_1130 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5880 = torch.prim.ListConstruct %5681, %5723, %5779, %5835, %5879 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1131 = torch.constant.int 1 | |
%5881 = torch.aten.cat %5880, %int1_1131 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%5882 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5883 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1132 = torch.constant.int 12 | |
%5884 = torch.aten.item %5882 : !torch.vtensor<[],f32> -> !torch.float | |
%5885 = torch.aten.item %5883 : !torch.vtensor<[],si8> -> !torch.int | |
%5886 = torch.aten.quantize_per_tensor %5881, %5884, %5885, %int12_1132 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5887 = torch.aten.int_repr %5886 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%5888 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5889 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5890 = torch.aten.item %5888 : !torch.vtensor<[],f32> -> !torch.float | |
%5891 = torch.aten.item %5889 : !torch.vtensor<[],si8> -> !torch.int | |
%5892 = torch.aten._make_per_tensor_quantized_tensor %5887, %5890, %5891 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%5893 = torch.aten.dequantize.self %5892 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%5894 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5895 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1133 = torch.constant.int 12 | |
%5896 = torch.aten.item %5894 : !torch.vtensor<[],f32> -> !torch.float | |
%5897 = torch.aten.item %5895 : !torch.vtensor<[],si8> -> !torch.int | |
%5898 = torch.aten.quantize_per_tensor %140, %5896, %5897, %int12_1133 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5899 = torch.aten.int_repr %5898 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%5900 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5901 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5902 = torch.aten.item %5900 : !torch.vtensor<[],f32> -> !torch.float | |
%5903 = torch.aten.item %5901 : !torch.vtensor<[],si8> -> !torch.int | |
%5904 = torch.aten._make_per_tensor_quantized_tensor %5899, %5902, %5903 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%5905 = torch.aten.dequantize.self %5904 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%5906 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5907 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1134 = torch.constant.int 12 | |
%5908 = torch.aten.item %5906 : !torch.vtensor<[],f32> -> !torch.float | |
%5909 = torch.aten.item %5907 : !torch.vtensor<[],si8> -> !torch.int | |
%5910 = torch.aten.quantize_per_tensor %141, %5908, %5909, %int12_1134 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5911 = torch.aten.int_repr %5910 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%5912 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5913 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5914 = torch.aten.item %5912 : !torch.vtensor<[],f32> -> !torch.float | |
%5915 = torch.aten.item %5913 : !torch.vtensor<[],si8> -> !torch.int | |
%5916 = torch.aten._make_per_tensor_quantized_tensor %5911, %5914, %5915 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%5917 = torch.aten.dequantize.self %5916 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1135 = torch.constant.int 1 | |
%int1_1136 = torch.constant.int 1 | |
%int1_1137 = torch.constant.int 1 | |
%int1_1138 = torch.constant.int 1 | |
%int1_1139 = torch.constant.int 1 | |
%int1_1140 = torch.constant.int 1 | |
%int0_1141 = torch.constant.int 0 | |
%5918 = torch.prim.ListConstruct %int1_1135, %int1_1136 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5919 = torch.prim.ListConstruct %int1_1137, %int1_1138 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5920 = torch.prim.ListConstruct %int1_1139, %int1_1140 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5921 = torch.prim.ListConstruct %int0_1141, %int0_1141 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1142 = torch.constant.bool false | |
%int1_1143 = torch.constant.int 1 | |
%5922 = torch.aten.convolution %5893, %5905, %5917, %5920, %5918, %5919, %false_1142, %5921, %int1_1143 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5923 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5924 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1144 = torch.constant.int 12 | |
%5925 = torch.aten.item %5923 : !torch.vtensor<[],f32> -> !torch.float | |
%5926 = torch.aten.item %5924 : !torch.vtensor<[],si8> -> !torch.int | |
%5927 = torch.aten.quantize_per_tensor %5922, %5925, %5926, %int12_1144 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5928 = torch.aten.int_repr %5927 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5929 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5930 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5931 = torch.aten.item %5929 : !torch.vtensor<[],f32> -> !torch.float | |
%5932 = torch.aten.item %5930 : !torch.vtensor<[],si8> -> !torch.int | |
%5933 = torch.aten._make_per_tensor_quantized_tensor %5928, %5931, %5932 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5934 = torch.aten.dequantize.self %5933 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5935 = torch.aten.mul.Tensor %5934, %935 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%5936 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5937 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1145 = torch.constant.int 12 | |
%5938 = torch.aten.item %5936 : !torch.vtensor<[],f32> -> !torch.float | |
%5939 = torch.aten.item %5937 : !torch.vtensor<[],si8> -> !torch.int | |
%5940 = torch.aten.quantize_per_tensor %5935, %5938, %5939, %int12_1145 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5941 = torch.aten.int_repr %5940 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5942 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5943 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5944 = torch.aten.item %5942 : !torch.vtensor<[],f32> -> !torch.float | |
%5945 = torch.aten.item %5943 : !torch.vtensor<[],si8> -> !torch.int | |
%5946 = torch.aten._make_per_tensor_quantized_tensor %5941, %5944, %5945 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5947 = torch.aten.dequantize.self %5946 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1146 = torch.constant.int 1 | |
%5948 = torch.aten.add.Tensor %5947, %5681, %int1_1146 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%5949 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5950 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1147 = torch.constant.int 12 | |
%5951 = torch.aten.item %5949 : !torch.vtensor<[],f32> -> !torch.float | |
%5952 = torch.aten.item %5950 : !torch.vtensor<[],si8> -> !torch.int | |
%5953 = torch.aten.quantize_per_tensor %5948, %5951, %5952, %int12_1147 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5954 = torch.aten.int_repr %5953 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%5955 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5956 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5957 = torch.aten.item %5955 : !torch.vtensor<[],f32> -> !torch.float | |
%5958 = torch.aten.item %5956 : !torch.vtensor<[],si8> -> !torch.int | |
%5959 = torch.aten._make_per_tensor_quantized_tensor %5954, %5957, %5958 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%5960 = torch.aten.dequantize.self %5959 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%5961 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5962 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1148 = torch.constant.int 12 | |
%5963 = torch.aten.item %5961 : !torch.vtensor<[],f32> -> !torch.float | |
%5964 = torch.aten.item %5962 : !torch.vtensor<[],si8> -> !torch.int | |
%5965 = torch.aten.quantize_per_tensor %142, %5963, %5964, %int12_1148 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5966 = torch.aten.int_repr %5965 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%5967 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5968 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5969 = torch.aten.item %5967 : !torch.vtensor<[],f32> -> !torch.float | |
%5970 = torch.aten.item %5968 : !torch.vtensor<[],si8> -> !torch.int | |
%5971 = torch.aten._make_per_tensor_quantized_tensor %5966, %5969, %5970 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%5972 = torch.aten.dequantize.self %5971 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%5973 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5974 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1149 = torch.constant.int 12 | |
%5975 = torch.aten.item %5973 : !torch.vtensor<[],f32> -> !torch.float | |
%5976 = torch.aten.item %5974 : !torch.vtensor<[],si8> -> !torch.int | |
%5977 = torch.aten.quantize_per_tensor %143, %5975, %5976, %int12_1149 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5978 = torch.aten.int_repr %5977 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%5979 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5980 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5981 = torch.aten.item %5979 : !torch.vtensor<[],f32> -> !torch.float | |
%5982 = torch.aten.item %5980 : !torch.vtensor<[],si8> -> !torch.int | |
%5983 = torch.aten._make_per_tensor_quantized_tensor %5978, %5981, %5982 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%5984 = torch.aten.dequantize.self %5983 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1150 = torch.constant.int 1 | |
%int1_1151 = torch.constant.int 1 | |
%int1_1152 = torch.constant.int 1 | |
%int1_1153 = torch.constant.int 1 | |
%int1_1154 = torch.constant.int 1 | |
%int1_1155 = torch.constant.int 1 | |
%int0_1156 = torch.constant.int 0 | |
%5985 = torch.prim.ListConstruct %int1_1150, %int1_1151 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5986 = torch.prim.ListConstruct %int1_1152, %int1_1153 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5987 = torch.prim.ListConstruct %int1_1154, %int1_1155 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5988 = torch.prim.ListConstruct %int0_1156, %int0_1156 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1157 = torch.constant.bool false | |
%int1_1158 = torch.constant.int 1 | |
%5989 = torch.aten.convolution %5960, %5972, %5984, %5987, %5985, %5986, %false_1157, %5988, %int1_1158 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1159 = torch.constant.float 0.1015625 | |
%5990 = torch.aten.leaky_relu %5989, %float1.015630e-01_1159 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%5991 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5992 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1160 = torch.constant.int 12 | |
%5993 = torch.aten.item %5991 : !torch.vtensor<[],f32> -> !torch.float | |
%5994 = torch.aten.item %5992 : !torch.vtensor<[],si8> -> !torch.int | |
%5995 = torch.aten.quantize_per_tensor %5990, %5993, %5994, %int12_1160 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%5996 = torch.aten.int_repr %5995 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%5997 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%5998 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%5999 = torch.aten.item %5997 : !torch.vtensor<[],f32> -> !torch.float | |
%6000 = torch.aten.item %5998 : !torch.vtensor<[],si8> -> !torch.int | |
%6001 = torch.aten._make_per_tensor_quantized_tensor %5996, %5999, %6000 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6002 = torch.aten.dequantize.self %6001 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6003 = torch.prim.ListConstruct %5960, %6002 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1161 = torch.constant.int 1 | |
%6004 = torch.aten.cat %6003, %int1_1161 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%6005 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6006 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1162 = torch.constant.int 12 | |
%6007 = torch.aten.item %6005 : !torch.vtensor<[],f32> -> !torch.float | |
%6008 = torch.aten.item %6006 : !torch.vtensor<[],si8> -> !torch.int | |
%6009 = torch.aten.quantize_per_tensor %6004, %6007, %6008, %int12_1162 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6010 = torch.aten.int_repr %6009 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%6011 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6012 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6013 = torch.aten.item %6011 : !torch.vtensor<[],f32> -> !torch.float | |
%6014 = torch.aten.item %6012 : !torch.vtensor<[],si8> -> !torch.int | |
%6015 = torch.aten._make_per_tensor_quantized_tensor %6010, %6013, %6014 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6016 = torch.aten.dequantize.self %6015 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%6017 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6018 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1163 = torch.constant.int 12 | |
%6019 = torch.aten.item %6017 : !torch.vtensor<[],f32> -> !torch.float | |
%6020 = torch.aten.item %6018 : !torch.vtensor<[],si8> -> !torch.int | |
%6021 = torch.aten.quantize_per_tensor %144, %6019, %6020, %int12_1163 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6022 = torch.aten.int_repr %6021 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%6023 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6024 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6025 = torch.aten.item %6023 : !torch.vtensor<[],f32> -> !torch.float | |
%6026 = torch.aten.item %6024 : !torch.vtensor<[],si8> -> !torch.int | |
%6027 = torch.aten._make_per_tensor_quantized_tensor %6022, %6025, %6026 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6028 = torch.aten.dequantize.self %6027 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%6029 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6030 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1164 = torch.constant.int 12 | |
%6031 = torch.aten.item %6029 : !torch.vtensor<[],f32> -> !torch.float | |
%6032 = torch.aten.item %6030 : !torch.vtensor<[],si8> -> !torch.int | |
%6033 = torch.aten.quantize_per_tensor %145, %6031, %6032, %int12_1164 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6034 = torch.aten.int_repr %6033 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6035 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6036 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6037 = torch.aten.item %6035 : !torch.vtensor<[],f32> -> !torch.float | |
%6038 = torch.aten.item %6036 : !torch.vtensor<[],si8> -> !torch.int | |
%6039 = torch.aten._make_per_tensor_quantized_tensor %6034, %6037, %6038 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6040 = torch.aten.dequantize.self %6039 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1165 = torch.constant.int 1 | |
%int1_1166 = torch.constant.int 1 | |
%int1_1167 = torch.constant.int 1 | |
%int1_1168 = torch.constant.int 1 | |
%int1_1169 = torch.constant.int 1 | |
%int1_1170 = torch.constant.int 1 | |
%int0_1171 = torch.constant.int 0 | |
%6041 = torch.prim.ListConstruct %int1_1165, %int1_1166 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6042 = torch.prim.ListConstruct %int1_1167, %int1_1168 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6043 = torch.prim.ListConstruct %int1_1169, %int1_1170 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6044 = torch.prim.ListConstruct %int0_1171, %int0_1171 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1172 = torch.constant.bool false | |
%int1_1173 = torch.constant.int 1 | |
%6045 = torch.aten.convolution %6016, %6028, %6040, %6043, %6041, %6042, %false_1172, %6044, %int1_1173 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1174 = torch.constant.float 0.1015625 | |
%6046 = torch.aten.leaky_relu %6045, %float1.015630e-01_1174 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6047 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6048 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1175 = torch.constant.int 12 | |
%6049 = torch.aten.item %6047 : !torch.vtensor<[],f32> -> !torch.float | |
%6050 = torch.aten.item %6048 : !torch.vtensor<[],si8> -> !torch.int | |
%6051 = torch.aten.quantize_per_tensor %6046, %6049, %6050, %int12_1175 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6052 = torch.aten.int_repr %6051 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6053 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6054 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6055 = torch.aten.item %6053 : !torch.vtensor<[],f32> -> !torch.float | |
%6056 = torch.aten.item %6054 : !torch.vtensor<[],si8> -> !torch.int | |
%6057 = torch.aten._make_per_tensor_quantized_tensor %6052, %6055, %6056 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6058 = torch.aten.dequantize.self %6057 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6059 = torch.prim.ListConstruct %5960, %6002, %6058 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1176 = torch.constant.int 1 | |
%6060 = torch.aten.cat %6059, %int1_1176 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%6061 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6062 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1177 = torch.constant.int 12 | |
%6063 = torch.aten.item %6061 : !torch.vtensor<[],f32> -> !torch.float | |
%6064 = torch.aten.item %6062 : !torch.vtensor<[],si8> -> !torch.int | |
%6065 = torch.aten.quantize_per_tensor %6060, %6063, %6064, %int12_1177 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6066 = torch.aten.int_repr %6065 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%6067 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6068 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6069 = torch.aten.item %6067 : !torch.vtensor<[],f32> -> !torch.float | |
%6070 = torch.aten.item %6068 : !torch.vtensor<[],si8> -> !torch.int | |
%6071 = torch.aten._make_per_tensor_quantized_tensor %6066, %6069, %6070 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6072 = torch.aten.dequantize.self %6071 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%6073 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6074 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1178 = torch.constant.int 12 | |
%6075 = torch.aten.item %6073 : !torch.vtensor<[],f32> -> !torch.float | |
%6076 = torch.aten.item %6074 : !torch.vtensor<[],si8> -> !torch.int | |
%6077 = torch.aten.quantize_per_tensor %146, %6075, %6076, %int12_1178 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6078 = torch.aten.int_repr %6077 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%6079 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6080 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6081 = torch.aten.item %6079 : !torch.vtensor<[],f32> -> !torch.float | |
%6082 = torch.aten.item %6080 : !torch.vtensor<[],si8> -> !torch.int | |
%6083 = torch.aten._make_per_tensor_quantized_tensor %6078, %6081, %6082 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6084 = torch.aten.dequantize.self %6083 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%6085 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6086 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1179 = torch.constant.int 12 | |
%6087 = torch.aten.item %6085 : !torch.vtensor<[],f32> -> !torch.float | |
%6088 = torch.aten.item %6086 : !torch.vtensor<[],si8> -> !torch.int | |
%6089 = torch.aten.quantize_per_tensor %147, %6087, %6088, %int12_1179 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6090 = torch.aten.int_repr %6089 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6091 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6092 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6093 = torch.aten.item %6091 : !torch.vtensor<[],f32> -> !torch.float | |
%6094 = torch.aten.item %6092 : !torch.vtensor<[],si8> -> !torch.int | |
%6095 = torch.aten._make_per_tensor_quantized_tensor %6090, %6093, %6094 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6096 = torch.aten.dequantize.self %6095 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1180 = torch.constant.int 1 | |
%int1_1181 = torch.constant.int 1 | |
%int1_1182 = torch.constant.int 1 | |
%int1_1183 = torch.constant.int 1 | |
%int1_1184 = torch.constant.int 1 | |
%int1_1185 = torch.constant.int 1 | |
%int0_1186 = torch.constant.int 0 | |
%6097 = torch.prim.ListConstruct %int1_1180, %int1_1181 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6098 = torch.prim.ListConstruct %int1_1182, %int1_1183 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6099 = torch.prim.ListConstruct %int1_1184, %int1_1185 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6100 = torch.prim.ListConstruct %int0_1186, %int0_1186 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1187 = torch.constant.bool false | |
%int1_1188 = torch.constant.int 1 | |
%6101 = torch.aten.convolution %6072, %6084, %6096, %6099, %6097, %6098, %false_1187, %6100, %int1_1188 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1189 = torch.constant.float 0.1015625 | |
%6102 = torch.aten.leaky_relu %6101, %float1.015630e-01_1189 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6103 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1190 = torch.constant.int 12 | |
%6105 = torch.aten.item %6103 : !torch.vtensor<[],f32> -> !torch.float | |
%6106 = torch.aten.item %6104 : !torch.vtensor<[],si8> -> !torch.int | |
%6107 = torch.aten.quantize_per_tensor %6102, %6105, %6106, %int12_1190 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6108 = torch.aten.int_repr %6107 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6109 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6111 = torch.aten.item %6109 : !torch.vtensor<[],f32> -> !torch.float | |
%6112 = torch.aten.item %6110 : !torch.vtensor<[],si8> -> !torch.int | |
%6113 = torch.aten._make_per_tensor_quantized_tensor %6108, %6111, %6112 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6114 = torch.aten.dequantize.self %6113 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6115 = torch.prim.ListConstruct %5960, %6002, %6058, %6114 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1191 = torch.constant.int 1 | |
%6116 = torch.aten.cat %6115, %int1_1191 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%6117 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6118 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1192 = torch.constant.int 12 | |
%6119 = torch.aten.item %6117 : !torch.vtensor<[],f32> -> !torch.float | |
%6120 = torch.aten.item %6118 : !torch.vtensor<[],si8> -> !torch.int | |
%6121 = torch.aten.quantize_per_tensor %6116, %6119, %6120, %int12_1192 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6122 = torch.aten.int_repr %6121 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%6123 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6124 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6125 = torch.aten.item %6123 : !torch.vtensor<[],f32> -> !torch.float | |
%6126 = torch.aten.item %6124 : !torch.vtensor<[],si8> -> !torch.int | |
%6127 = torch.aten._make_per_tensor_quantized_tensor %6122, %6125, %6126 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6128 = torch.aten.dequantize.self %6127 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%6129 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6130 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1193 = torch.constant.int 12 | |
%6131 = torch.aten.item %6129 : !torch.vtensor<[],f32> -> !torch.float | |
%6132 = torch.aten.item %6130 : !torch.vtensor<[],si8> -> !torch.int | |
%6133 = torch.aten.quantize_per_tensor %148, %6131, %6132, %int12_1193 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6134 = torch.aten.int_repr %6133 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%6135 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6136 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6137 = torch.aten.item %6135 : !torch.vtensor<[],f32> -> !torch.float | |
%6138 = torch.aten.item %6136 : !torch.vtensor<[],si8> -> !torch.int | |
%6139 = torch.aten._make_per_tensor_quantized_tensor %6134, %6137, %6138 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6140 = torch.aten.dequantize.self %6139 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%6141 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6142 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1194 = torch.constant.int 12 | |
%6143 = torch.aten.item %6141 : !torch.vtensor<[],f32> -> !torch.float | |
%6144 = torch.aten.item %6142 : !torch.vtensor<[],si8> -> !torch.int | |
%6145 = torch.aten.quantize_per_tensor %149, %6143, %6144, %int12_1194 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6146 = torch.aten.int_repr %6145 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6147 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6148 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6149 = torch.aten.item %6147 : !torch.vtensor<[],f32> -> !torch.float | |
%6150 = torch.aten.item %6148 : !torch.vtensor<[],si8> -> !torch.int | |
%6151 = torch.aten._make_per_tensor_quantized_tensor %6146, %6149, %6150 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6152 = torch.aten.dequantize.self %6151 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1195 = torch.constant.int 1 | |
%int1_1196 = torch.constant.int 1 | |
%int1_1197 = torch.constant.int 1 | |
%int1_1198 = torch.constant.int 1 | |
%int1_1199 = torch.constant.int 1 | |
%int1_1200 = torch.constant.int 1 | |
%int0_1201 = torch.constant.int 0 | |
%6153 = torch.prim.ListConstruct %int1_1195, %int1_1196 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6154 = torch.prim.ListConstruct %int1_1197, %int1_1198 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6155 = torch.prim.ListConstruct %int1_1199, %int1_1200 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6156 = torch.prim.ListConstruct %int0_1201, %int0_1201 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1202 = torch.constant.bool false | |
%int1_1203 = torch.constant.int 1 | |
%6157 = torch.aten.convolution %6128, %6140, %6152, %6155, %6153, %6154, %false_1202, %6156, %int1_1203 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1204 = torch.constant.float 0.1015625 | |
%6158 = torch.aten.leaky_relu %6157, %float1.015630e-01_1204 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6159 = torch.prim.ListConstruct %5960, %6002, %6058, %6114, %6158 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1205 = torch.constant.int 1 | |
%6160 = torch.aten.cat %6159, %int1_1205 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%6161 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6162 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1206 = torch.constant.int 12 | |
%6163 = torch.aten.item %6161 : !torch.vtensor<[],f32> -> !torch.float | |
%6164 = torch.aten.item %6162 : !torch.vtensor<[],si8> -> !torch.int | |
%6165 = torch.aten.quantize_per_tensor %6160, %6163, %6164, %int12_1206 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6166 = torch.aten.int_repr %6165 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%6167 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6168 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6169 = torch.aten.item %6167 : !torch.vtensor<[],f32> -> !torch.float | |
%6170 = torch.aten.item %6168 : !torch.vtensor<[],si8> -> !torch.int | |
%6171 = torch.aten._make_per_tensor_quantized_tensor %6166, %6169, %6170 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6172 = torch.aten.dequantize.self %6171 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%6173 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1207 = torch.constant.int 12 | |
%6175 = torch.aten.item %6173 : !torch.vtensor<[],f32> -> !torch.float | |
%6176 = torch.aten.item %6174 : !torch.vtensor<[],si8> -> !torch.int | |
%6177 = torch.aten.quantize_per_tensor %150, %6175, %6176, %int12_1207 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6178 = torch.aten.int_repr %6177 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%6179 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6181 = torch.aten.item %6179 : !torch.vtensor<[],f32> -> !torch.float | |
%6182 = torch.aten.item %6180 : !torch.vtensor<[],si8> -> !torch.int | |
%6183 = torch.aten._make_per_tensor_quantized_tensor %6178, %6181, %6182 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6184 = torch.aten.dequantize.self %6183 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%6185 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1208 = torch.constant.int 12 | |
%6187 = torch.aten.item %6185 : !torch.vtensor<[],f32> -> !torch.float | |
%6188 = torch.aten.item %6186 : !torch.vtensor<[],si8> -> !torch.int | |
%6189 = torch.aten.quantize_per_tensor %151, %6187, %6188, %int12_1208 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6190 = torch.aten.int_repr %6189 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%6191 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6192 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6193 = torch.aten.item %6191 : !torch.vtensor<[],f32> -> !torch.float | |
%6194 = torch.aten.item %6192 : !torch.vtensor<[],si8> -> !torch.int | |
%6195 = torch.aten._make_per_tensor_quantized_tensor %6190, %6193, %6194 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6196 = torch.aten.dequantize.self %6195 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1209 = torch.constant.int 1 | |
%int1_1210 = torch.constant.int 1 | |
%int1_1211 = torch.constant.int 1 | |
%int1_1212 = torch.constant.int 1 | |
%int1_1213 = torch.constant.int 1 | |
%int1_1214 = torch.constant.int 1 | |
%int0_1215 = torch.constant.int 0 | |
%6197 = torch.prim.ListConstruct %int1_1209, %int1_1210 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6198 = torch.prim.ListConstruct %int1_1211, %int1_1212 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6199 = torch.prim.ListConstruct %int1_1213, %int1_1214 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6200 = torch.prim.ListConstruct %int0_1215, %int0_1215 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1216 = torch.constant.bool false | |
%int1_1217 = torch.constant.int 1 | |
%6201 = torch.aten.convolution %6172, %6184, %6196, %6199, %6197, %6198, %false_1216, %6200, %int1_1217 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6202 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6203 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1218 = torch.constant.int 12 | |
%6204 = torch.aten.item %6202 : !torch.vtensor<[],f32> -> !torch.float | |
%6205 = torch.aten.item %6203 : !torch.vtensor<[],si8> -> !torch.int | |
%6206 = torch.aten.quantize_per_tensor %6201, %6204, %6205, %int12_1218 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6207 = torch.aten.int_repr %6206 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6208 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6209 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6210 = torch.aten.item %6208 : !torch.vtensor<[],f32> -> !torch.float | |
%6211 = torch.aten.item %6209 : !torch.vtensor<[],si8> -> !torch.int | |
%6212 = torch.aten._make_per_tensor_quantized_tensor %6207, %6210, %6211 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6213 = torch.aten.dequantize.self %6212 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6214 = torch.aten.mul.Tensor %6213, %948 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%6215 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6216 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1219 = torch.constant.int 12 | |
%6217 = torch.aten.item %6215 : !torch.vtensor<[],f32> -> !torch.float | |
%6218 = torch.aten.item %6216 : !torch.vtensor<[],si8> -> !torch.int | |
%6219 = torch.aten.quantize_per_tensor %6214, %6217, %6218, %int12_1219 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6220 = torch.aten.int_repr %6219 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6221 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6222 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6223 = torch.aten.item %6221 : !torch.vtensor<[],f32> -> !torch.float | |
%6224 = torch.aten.item %6222 : !torch.vtensor<[],si8> -> !torch.int | |
%6225 = torch.aten._make_per_tensor_quantized_tensor %6220, %6223, %6224 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6226 = torch.aten.dequantize.self %6225 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1220 = torch.constant.int 1 | |
%6227 = torch.aten.add.Tensor %6226, %5960, %int1_1220 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6228 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1221 = torch.constant.int 12 | |
%6230 = torch.aten.item %6228 : !torch.vtensor<[],f32> -> !torch.float | |
%6231 = torch.aten.item %6229 : !torch.vtensor<[],si8> -> !torch.int | |
%6232 = torch.aten.quantize_per_tensor %6227, %6230, %6231, %int12_1221 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6233 = torch.aten.int_repr %6232 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6234 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6235 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6236 = torch.aten.item %6234 : !torch.vtensor<[],f32> -> !torch.float | |
%6237 = torch.aten.item %6235 : !torch.vtensor<[],si8> -> !torch.int | |
%6238 = torch.aten._make_per_tensor_quantized_tensor %6233, %6236, %6237 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6239 = torch.aten.dequantize.self %6238 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6240 = torch.aten.mul.Tensor %6239, %961 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%6241 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1222 = torch.constant.int 12 | |
%6243 = torch.aten.item %6241 : !torch.vtensor<[],f32> -> !torch.float | |
%6244 = torch.aten.item %6242 : !torch.vtensor<[],si8> -> !torch.int | |
%6245 = torch.aten.quantize_per_tensor %6240, %6243, %6244, %int12_1222 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6246 = torch.aten.int_repr %6245 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6247 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6248 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6249 = torch.aten.item %6247 : !torch.vtensor<[],f32> -> !torch.float | |
%6250 = torch.aten.item %6248 : !torch.vtensor<[],si8> -> !torch.int | |
%6251 = torch.aten._make_per_tensor_quantized_tensor %6246, %6249, %6250 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6252 = torch.aten.dequantize.self %6251 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1223 = torch.constant.int 1 | |
%6253 = torch.aten.add.Tensor %6252, %5402, %int1_1223 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6254 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6255 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1224 = torch.constant.int 12 | |
%6256 = torch.aten.item %6254 : !torch.vtensor<[],f32> -> !torch.float | |
%6257 = torch.aten.item %6255 : !torch.vtensor<[],si8> -> !torch.int | |
%6258 = torch.aten.quantize_per_tensor %6253, %6256, %6257, %int12_1224 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6259 = torch.aten.int_repr %6258 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6260 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6261 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6262 = torch.aten.item %6260 : !torch.vtensor<[],f32> -> !torch.float | |
%6263 = torch.aten.item %6261 : !torch.vtensor<[],si8> -> !torch.int | |
%6264 = torch.aten._make_per_tensor_quantized_tensor %6259, %6262, %6263 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6265 = torch.aten.dequantize.self %6264 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6266 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6267 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1225 = torch.constant.int 12 | |
%6268 = torch.aten.item %6266 : !torch.vtensor<[],f32> -> !torch.float | |
%6269 = torch.aten.item %6267 : !torch.vtensor<[],si8> -> !torch.int | |
%6270 = torch.aten.quantize_per_tensor %152, %6268, %6269, %int12_1225 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6271 = torch.aten.int_repr %6270 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%6272 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6273 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6274 = torch.aten.item %6272 : !torch.vtensor<[],f32> -> !torch.float | |
%6275 = torch.aten.item %6273 : !torch.vtensor<[],si8> -> !torch.int | |
%6276 = torch.aten._make_per_tensor_quantized_tensor %6271, %6274, %6275 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6277 = torch.aten.dequantize.self %6276 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%6278 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6279 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1226 = torch.constant.int 12 | |
%6280 = torch.aten.item %6278 : !torch.vtensor<[],f32> -> !torch.float | |
%6281 = torch.aten.item %6279 : !torch.vtensor<[],si8> -> !torch.int | |
%6282 = torch.aten.quantize_per_tensor %153, %6280, %6281, %int12_1226 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6283 = torch.aten.int_repr %6282 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6284 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6285 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6286 = torch.aten.item %6284 : !torch.vtensor<[],f32> -> !torch.float | |
%6287 = torch.aten.item %6285 : !torch.vtensor<[],si8> -> !torch.int | |
%6288 = torch.aten._make_per_tensor_quantized_tensor %6283, %6286, %6287 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6289 = torch.aten.dequantize.self %6288 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1227 = torch.constant.int 1 | |
%int1_1228 = torch.constant.int 1 | |
%int1_1229 = torch.constant.int 1 | |
%int1_1230 = torch.constant.int 1 | |
%int1_1231 = torch.constant.int 1 | |
%int1_1232 = torch.constant.int 1 | |
%int0_1233 = torch.constant.int 0 | |
%6290 = torch.prim.ListConstruct %int1_1227, %int1_1228 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6291 = torch.prim.ListConstruct %int1_1229, %int1_1230 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6292 = torch.prim.ListConstruct %int1_1231, %int1_1232 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6293 = torch.prim.ListConstruct %int0_1233, %int0_1233 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1234 = torch.constant.bool false | |
%int1_1235 = torch.constant.int 1 | |
%6294 = torch.aten.convolution %6265, %6277, %6289, %6292, %6290, %6291, %false_1234, %6293, %int1_1235 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1236 = torch.constant.float 0.1015625 | |
%6295 = torch.aten.leaky_relu %6294, %float1.015630e-01_1236 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6296 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6297 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1237 = torch.constant.int 12 | |
%6298 = torch.aten.item %6296 : !torch.vtensor<[],f32> -> !torch.float | |
%6299 = torch.aten.item %6297 : !torch.vtensor<[],si8> -> !torch.int | |
%6300 = torch.aten.quantize_per_tensor %6295, %6298, %6299, %int12_1237 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6301 = torch.aten.int_repr %6300 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6302 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6303 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6304 = torch.aten.item %6302 : !torch.vtensor<[],f32> -> !torch.float | |
%6305 = torch.aten.item %6303 : !torch.vtensor<[],si8> -> !torch.int | |
%6306 = torch.aten._make_per_tensor_quantized_tensor %6301, %6304, %6305 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6307 = torch.aten.dequantize.self %6306 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6308 = torch.prim.ListConstruct %6265, %6307 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1238 = torch.constant.int 1 | |
%6309 = torch.aten.cat %6308, %int1_1238 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%6310 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6311 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1239 = torch.constant.int 12 | |
%6312 = torch.aten.item %6310 : !torch.vtensor<[],f32> -> !torch.float | |
%6313 = torch.aten.item %6311 : !torch.vtensor<[],si8> -> !torch.int | |
%6314 = torch.aten.quantize_per_tensor %6309, %6312, %6313, %int12_1239 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6315 = torch.aten.int_repr %6314 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%6316 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6317 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6318 = torch.aten.item %6316 : !torch.vtensor<[],f32> -> !torch.float | |
%6319 = torch.aten.item %6317 : !torch.vtensor<[],si8> -> !torch.int | |
%6320 = torch.aten._make_per_tensor_quantized_tensor %6315, %6318, %6319 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6321 = torch.aten.dequantize.self %6320 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%6322 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1240 = torch.constant.int 12 | |
%6324 = torch.aten.item %6322 : !torch.vtensor<[],f32> -> !torch.float | |
%6325 = torch.aten.item %6323 : !torch.vtensor<[],si8> -> !torch.int | |
%6326 = torch.aten.quantize_per_tensor %154, %6324, %6325, %int12_1240 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6327 = torch.aten.int_repr %6326 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%6328 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6329 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6330 = torch.aten.item %6328 : !torch.vtensor<[],f32> -> !torch.float | |
%6331 = torch.aten.item %6329 : !torch.vtensor<[],si8> -> !torch.int | |
%6332 = torch.aten._make_per_tensor_quantized_tensor %6327, %6330, %6331 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6333 = torch.aten.dequantize.self %6332 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%6334 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6335 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1241 = torch.constant.int 12 | |
%6336 = torch.aten.item %6334 : !torch.vtensor<[],f32> -> !torch.float | |
%6337 = torch.aten.item %6335 : !torch.vtensor<[],si8> -> !torch.int | |
%6338 = torch.aten.quantize_per_tensor %155, %6336, %6337, %int12_1241 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6339 = torch.aten.int_repr %6338 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6340 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6341 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6342 = torch.aten.item %6340 : !torch.vtensor<[],f32> -> !torch.float | |
%6343 = torch.aten.item %6341 : !torch.vtensor<[],si8> -> !torch.int | |
%6344 = torch.aten._make_per_tensor_quantized_tensor %6339, %6342, %6343 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6345 = torch.aten.dequantize.self %6344 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1242 = torch.constant.int 1 | |
%int1_1243 = torch.constant.int 1 | |
%int1_1244 = torch.constant.int 1 | |
%int1_1245 = torch.constant.int 1 | |
%int1_1246 = torch.constant.int 1 | |
%int1_1247 = torch.constant.int 1 | |
%int0_1248 = torch.constant.int 0 | |
%6346 = torch.prim.ListConstruct %int1_1242, %int1_1243 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6347 = torch.prim.ListConstruct %int1_1244, %int1_1245 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6348 = torch.prim.ListConstruct %int1_1246, %int1_1247 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6349 = torch.prim.ListConstruct %int0_1248, %int0_1248 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1249 = torch.constant.bool false | |
%int1_1250 = torch.constant.int 1 | |
%6350 = torch.aten.convolution %6321, %6333, %6345, %6348, %6346, %6347, %false_1249, %6349, %int1_1250 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1251 = torch.constant.float 0.1015625 | |
%6351 = torch.aten.leaky_relu %6350, %float1.015630e-01_1251 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6352 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6353 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1252 = torch.constant.int 12 | |
%6354 = torch.aten.item %6352 : !torch.vtensor<[],f32> -> !torch.float | |
%6355 = torch.aten.item %6353 : !torch.vtensor<[],si8> -> !torch.int | |
%6356 = torch.aten.quantize_per_tensor %6351, %6354, %6355, %int12_1252 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6357 = torch.aten.int_repr %6356 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6358 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6359 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6360 = torch.aten.item %6358 : !torch.vtensor<[],f32> -> !torch.float | |
%6361 = torch.aten.item %6359 : !torch.vtensor<[],si8> -> !torch.int | |
%6362 = torch.aten._make_per_tensor_quantized_tensor %6357, %6360, %6361 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6363 = torch.aten.dequantize.self %6362 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6364 = torch.prim.ListConstruct %6265, %6307, %6363 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1253 = torch.constant.int 1 | |
%6365 = torch.aten.cat %6364, %int1_1253 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%6366 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6367 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1254 = torch.constant.int 12 | |
%6368 = torch.aten.item %6366 : !torch.vtensor<[],f32> -> !torch.float | |
%6369 = torch.aten.item %6367 : !torch.vtensor<[],si8> -> !torch.int | |
%6370 = torch.aten.quantize_per_tensor %6365, %6368, %6369, %int12_1254 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6371 = torch.aten.int_repr %6370 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%6372 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6373 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6374 = torch.aten.item %6372 : !torch.vtensor<[],f32> -> !torch.float | |
%6375 = torch.aten.item %6373 : !torch.vtensor<[],si8> -> !torch.int | |
%6376 = torch.aten._make_per_tensor_quantized_tensor %6371, %6374, %6375 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6377 = torch.aten.dequantize.self %6376 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%6378 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6379 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1255 = torch.constant.int 12 | |
%6380 = torch.aten.item %6378 : !torch.vtensor<[],f32> -> !torch.float | |
%6381 = torch.aten.item %6379 : !torch.vtensor<[],si8> -> !torch.int | |
%6382 = torch.aten.quantize_per_tensor %156, %6380, %6381, %int12_1255 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6383 = torch.aten.int_repr %6382 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%6384 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6385 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6386 = torch.aten.item %6384 : !torch.vtensor<[],f32> -> !torch.float | |
%6387 = torch.aten.item %6385 : !torch.vtensor<[],si8> -> !torch.int | |
%6388 = torch.aten._make_per_tensor_quantized_tensor %6383, %6386, %6387 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6389 = torch.aten.dequantize.self %6388 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%6390 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6391 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1256 = torch.constant.int 12 | |
%6392 = torch.aten.item %6390 : !torch.vtensor<[],f32> -> !torch.float | |
%6393 = torch.aten.item %6391 : !torch.vtensor<[],si8> -> !torch.int | |
%6394 = torch.aten.quantize_per_tensor %157, %6392, %6393, %int12_1256 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6395 = torch.aten.int_repr %6394 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6396 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6397 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6398 = torch.aten.item %6396 : !torch.vtensor<[],f32> -> !torch.float | |
%6399 = torch.aten.item %6397 : !torch.vtensor<[],si8> -> !torch.int | |
%6400 = torch.aten._make_per_tensor_quantized_tensor %6395, %6398, %6399 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6401 = torch.aten.dequantize.self %6400 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1257 = torch.constant.int 1 | |
%int1_1258 = torch.constant.int 1 | |
%int1_1259 = torch.constant.int 1 | |
%int1_1260 = torch.constant.int 1 | |
%int1_1261 = torch.constant.int 1 | |
%int1_1262 = torch.constant.int 1 | |
%int0_1263 = torch.constant.int 0 | |
%6402 = torch.prim.ListConstruct %int1_1257, %int1_1258 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6403 = torch.prim.ListConstruct %int1_1259, %int1_1260 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6404 = torch.prim.ListConstruct %int1_1261, %int1_1262 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6405 = torch.prim.ListConstruct %int0_1263, %int0_1263 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1264 = torch.constant.bool false | |
%int1_1265 = torch.constant.int 1 | |
%6406 = torch.aten.convolution %6377, %6389, %6401, %6404, %6402, %6403, %false_1264, %6405, %int1_1265 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1266 = torch.constant.float 0.1015625 | |
%6407 = torch.aten.leaky_relu %6406, %float1.015630e-01_1266 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6408 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1267 = torch.constant.int 12 | |
%6410 = torch.aten.item %6408 : !torch.vtensor<[],f32> -> !torch.float | |
%6411 = torch.aten.item %6409 : !torch.vtensor<[],si8> -> !torch.int | |
%6412 = torch.aten.quantize_per_tensor %6407, %6410, %6411, %int12_1267 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6413 = torch.aten.int_repr %6412 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6414 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6415 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6416 = torch.aten.item %6414 : !torch.vtensor<[],f32> -> !torch.float | |
%6417 = torch.aten.item %6415 : !torch.vtensor<[],si8> -> !torch.int | |
%6418 = torch.aten._make_per_tensor_quantized_tensor %6413, %6416, %6417 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6419 = torch.aten.dequantize.self %6418 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6420 = torch.prim.ListConstruct %6265, %6307, %6363, %6419 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1268 = torch.constant.int 1 | |
%6421 = torch.aten.cat %6420, %int1_1268 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%6422 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1269 = torch.constant.int 12 | |
%6424 = torch.aten.item %6422 : !torch.vtensor<[],f32> -> !torch.float | |
%6425 = torch.aten.item %6423 : !torch.vtensor<[],si8> -> !torch.int | |
%6426 = torch.aten.quantize_per_tensor %6421, %6424, %6425, %int12_1269 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6427 = torch.aten.int_repr %6426 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%6428 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6430 = torch.aten.item %6428 : !torch.vtensor<[],f32> -> !torch.float | |
%6431 = torch.aten.item %6429 : !torch.vtensor<[],si8> -> !torch.int | |
%6432 = torch.aten._make_per_tensor_quantized_tensor %6427, %6430, %6431 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6433 = torch.aten.dequantize.self %6432 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%6434 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1270 = torch.constant.int 12 | |
%6436 = torch.aten.item %6434 : !torch.vtensor<[],f32> -> !torch.float | |
%6437 = torch.aten.item %6435 : !torch.vtensor<[],si8> -> !torch.int | |
%6438 = torch.aten.quantize_per_tensor %158, %6436, %6437, %int12_1270 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6439 = torch.aten.int_repr %6438 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%6440 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6442 = torch.aten.item %6440 : !torch.vtensor<[],f32> -> !torch.float | |
%6443 = torch.aten.item %6441 : !torch.vtensor<[],si8> -> !torch.int | |
%6444 = torch.aten._make_per_tensor_quantized_tensor %6439, %6442, %6443 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6445 = torch.aten.dequantize.self %6444 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%6446 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1271 = torch.constant.int 12 | |
%6448 = torch.aten.item %6446 : !torch.vtensor<[],f32> -> !torch.float | |
%6449 = torch.aten.item %6447 : !torch.vtensor<[],si8> -> !torch.int | |
%6450 = torch.aten.quantize_per_tensor %159, %6448, %6449, %int12_1271 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6451 = torch.aten.int_repr %6450 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6452 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6454 = torch.aten.item %6452 : !torch.vtensor<[],f32> -> !torch.float | |
%6455 = torch.aten.item %6453 : !torch.vtensor<[],si8> -> !torch.int | |
%6456 = torch.aten._make_per_tensor_quantized_tensor %6451, %6454, %6455 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6457 = torch.aten.dequantize.self %6456 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1272 = torch.constant.int 1 | |
%int1_1273 = torch.constant.int 1 | |
%int1_1274 = torch.constant.int 1 | |
%int1_1275 = torch.constant.int 1 | |
%int1_1276 = torch.constant.int 1 | |
%int1_1277 = torch.constant.int 1 | |
%int0_1278 = torch.constant.int 0 | |
%6458 = torch.prim.ListConstruct %int1_1272, %int1_1273 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6459 = torch.prim.ListConstruct %int1_1274, %int1_1275 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6460 = torch.prim.ListConstruct %int1_1276, %int1_1277 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6461 = torch.prim.ListConstruct %int0_1278, %int0_1278 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1279 = torch.constant.bool false | |
%int1_1280 = torch.constant.int 1 | |
%6462 = torch.aten.convolution %6433, %6445, %6457, %6460, %6458, %6459, %false_1279, %6461, %int1_1280 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1281 = torch.constant.float 0.1015625 | |
%6463 = torch.aten.leaky_relu %6462, %float1.015630e-01_1281 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6464 = torch.prim.ListConstruct %6265, %6307, %6363, %6419, %6463 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1282 = torch.constant.int 1 | |
%6465 = torch.aten.cat %6464, %int1_1282 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%6466 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6467 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1283 = torch.constant.int 12 | |
%6468 = torch.aten.item %6466 : !torch.vtensor<[],f32> -> !torch.float | |
%6469 = torch.aten.item %6467 : !torch.vtensor<[],si8> -> !torch.int | |
%6470 = torch.aten.quantize_per_tensor %6465, %6468, %6469, %int12_1283 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6471 = torch.aten.int_repr %6470 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%6472 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6473 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6474 = torch.aten.item %6472 : !torch.vtensor<[],f32> -> !torch.float | |
%6475 = torch.aten.item %6473 : !torch.vtensor<[],si8> -> !torch.int | |
%6476 = torch.aten._make_per_tensor_quantized_tensor %6471, %6474, %6475 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6477 = torch.aten.dequantize.self %6476 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%6478 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6479 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1284 = torch.constant.int 12 | |
%6480 = torch.aten.item %6478 : !torch.vtensor<[],f32> -> !torch.float | |
%6481 = torch.aten.item %6479 : !torch.vtensor<[],si8> -> !torch.int | |
%6482 = torch.aten.quantize_per_tensor %160, %6480, %6481, %int12_1284 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6483 = torch.aten.int_repr %6482 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%6484 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6485 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6486 = torch.aten.item %6484 : !torch.vtensor<[],f32> -> !torch.float | |
%6487 = torch.aten.item %6485 : !torch.vtensor<[],si8> -> !torch.int | |
%6488 = torch.aten._make_per_tensor_quantized_tensor %6483, %6486, %6487 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6489 = torch.aten.dequantize.self %6488 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%6490 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6491 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1285 = torch.constant.int 12 | |
%6492 = torch.aten.item %6490 : !torch.vtensor<[],f32> -> !torch.float | |
%6493 = torch.aten.item %6491 : !torch.vtensor<[],si8> -> !torch.int | |
%6494 = torch.aten.quantize_per_tensor %161, %6492, %6493, %int12_1285 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6495 = torch.aten.int_repr %6494 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%6496 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6497 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6498 = torch.aten.item %6496 : !torch.vtensor<[],f32> -> !torch.float | |
%6499 = torch.aten.item %6497 : !torch.vtensor<[],si8> -> !torch.int | |
%6500 = torch.aten._make_per_tensor_quantized_tensor %6495, %6498, %6499 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6501 = torch.aten.dequantize.self %6500 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1286 = torch.constant.int 1 | |
%int1_1287 = torch.constant.int 1 | |
%int1_1288 = torch.constant.int 1 | |
%int1_1289 = torch.constant.int 1 | |
%int1_1290 = torch.constant.int 1 | |
%int1_1291 = torch.constant.int 1 | |
%int0_1292 = torch.constant.int 0 | |
%6502 = torch.prim.ListConstruct %int1_1286, %int1_1287 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6503 = torch.prim.ListConstruct %int1_1288, %int1_1289 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6504 = torch.prim.ListConstruct %int1_1290, %int1_1291 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6505 = torch.prim.ListConstruct %int0_1292, %int0_1292 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1293 = torch.constant.bool false | |
%int1_1294 = torch.constant.int 1 | |
%6506 = torch.aten.convolution %6477, %6489, %6501, %6504, %6502, %6503, %false_1293, %6505, %int1_1294 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6507 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6508 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1295 = torch.constant.int 12 | |
%6509 = torch.aten.item %6507 : !torch.vtensor<[],f32> -> !torch.float | |
%6510 = torch.aten.item %6508 : !torch.vtensor<[],si8> -> !torch.int | |
%6511 = torch.aten.quantize_per_tensor %6506, %6509, %6510, %int12_1295 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6512 = torch.aten.int_repr %6511 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6513 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6514 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6515 = torch.aten.item %6513 : !torch.vtensor<[],f32> -> !torch.float | |
%6516 = torch.aten.item %6514 : !torch.vtensor<[],si8> -> !torch.int | |
%6517 = torch.aten._make_per_tensor_quantized_tensor %6512, %6515, %6516 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6518 = torch.aten.dequantize.self %6517 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6519 = torch.aten.mul.Tensor %6518, %974 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%6520 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1296 = torch.constant.int 12 | |
%6522 = torch.aten.item %6520 : !torch.vtensor<[],f32> -> !torch.float | |
%6523 = torch.aten.item %6521 : !torch.vtensor<[],si8> -> !torch.int | |
%6524 = torch.aten.quantize_per_tensor %6519, %6522, %6523, %int12_1296 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6525 = torch.aten.int_repr %6524 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6526 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6527 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6528 = torch.aten.item %6526 : !torch.vtensor<[],f32> -> !torch.float | |
%6529 = torch.aten.item %6527 : !torch.vtensor<[],si8> -> !torch.int | |
%6530 = torch.aten._make_per_tensor_quantized_tensor %6525, %6528, %6529 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6531 = torch.aten.dequantize.self %6530 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1297 = torch.constant.int 1 | |
%6532 = torch.aten.add.Tensor %6531, %6265, %int1_1297 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6533 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6534 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1298 = torch.constant.int 12 | |
%6535 = torch.aten.item %6533 : !torch.vtensor<[],f32> -> !torch.float | |
%6536 = torch.aten.item %6534 : !torch.vtensor<[],si8> -> !torch.int | |
%6537 = torch.aten.quantize_per_tensor %6532, %6535, %6536, %int12_1298 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6538 = torch.aten.int_repr %6537 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6539 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6540 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6541 = torch.aten.item %6539 : !torch.vtensor<[],f32> -> !torch.float | |
%6542 = torch.aten.item %6540 : !torch.vtensor<[],si8> -> !torch.int | |
%6543 = torch.aten._make_per_tensor_quantized_tensor %6538, %6541, %6542 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6544 = torch.aten.dequantize.self %6543 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6545 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6546 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1299 = torch.constant.int 12 | |
%6547 = torch.aten.item %6545 : !torch.vtensor<[],f32> -> !torch.float | |
%6548 = torch.aten.item %6546 : !torch.vtensor<[],si8> -> !torch.int | |
%6549 = torch.aten.quantize_per_tensor %162, %6547, %6548, %int12_1299 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6550 = torch.aten.int_repr %6549 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%6551 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6552 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6553 = torch.aten.item %6551 : !torch.vtensor<[],f32> -> !torch.float | |
%6554 = torch.aten.item %6552 : !torch.vtensor<[],si8> -> !torch.int | |
%6555 = torch.aten._make_per_tensor_quantized_tensor %6550, %6553, %6554 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6556 = torch.aten.dequantize.self %6555 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%6557 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6558 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1300 = torch.constant.int 12 | |
%6559 = torch.aten.item %6557 : !torch.vtensor<[],f32> -> !torch.float | |
%6560 = torch.aten.item %6558 : !torch.vtensor<[],si8> -> !torch.int | |
%6561 = torch.aten.quantize_per_tensor %163, %6559, %6560, %int12_1300 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6562 = torch.aten.int_repr %6561 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6563 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6564 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6565 = torch.aten.item %6563 : !torch.vtensor<[],f32> -> !torch.float | |
%6566 = torch.aten.item %6564 : !torch.vtensor<[],si8> -> !torch.int | |
%6567 = torch.aten._make_per_tensor_quantized_tensor %6562, %6565, %6566 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6568 = torch.aten.dequantize.self %6567 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1301 = torch.constant.int 1 | |
%int1_1302 = torch.constant.int 1 | |
%int1_1303 = torch.constant.int 1 | |
%int1_1304 = torch.constant.int 1 | |
%int1_1305 = torch.constant.int 1 | |
%int1_1306 = torch.constant.int 1 | |
%int0_1307 = torch.constant.int 0 | |
%6569 = torch.prim.ListConstruct %int1_1301, %int1_1302 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6570 = torch.prim.ListConstruct %int1_1303, %int1_1304 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6571 = torch.prim.ListConstruct %int1_1305, %int1_1306 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6572 = torch.prim.ListConstruct %int0_1307, %int0_1307 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1308 = torch.constant.bool false | |
%int1_1309 = torch.constant.int 1 | |
%6573 = torch.aten.convolution %6544, %6556, %6568, %6571, %6569, %6570, %false_1308, %6572, %int1_1309 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1310 = torch.constant.float 0.1015625 | |
%6574 = torch.aten.leaky_relu %6573, %float1.015630e-01_1310 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6575 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6576 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1311 = torch.constant.int 12 | |
%6577 = torch.aten.item %6575 : !torch.vtensor<[],f32> -> !torch.float | |
%6578 = torch.aten.item %6576 : !torch.vtensor<[],si8> -> !torch.int | |
%6579 = torch.aten.quantize_per_tensor %6574, %6577, %6578, %int12_1311 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6580 = torch.aten.int_repr %6579 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6581 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6582 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6583 = torch.aten.item %6581 : !torch.vtensor<[],f32> -> !torch.float | |
%6584 = torch.aten.item %6582 : !torch.vtensor<[],si8> -> !torch.int | |
%6585 = torch.aten._make_per_tensor_quantized_tensor %6580, %6583, %6584 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6586 = torch.aten.dequantize.self %6585 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6587 = torch.prim.ListConstruct %6544, %6586 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1312 = torch.constant.int 1 | |
%6588 = torch.aten.cat %6587, %int1_1312 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%6589 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6590 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1313 = torch.constant.int 12 | |
%6591 = torch.aten.item %6589 : !torch.vtensor<[],f32> -> !torch.float | |
%6592 = torch.aten.item %6590 : !torch.vtensor<[],si8> -> !torch.int | |
%6593 = torch.aten.quantize_per_tensor %6588, %6591, %6592, %int12_1313 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6594 = torch.aten.int_repr %6593 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%6595 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6596 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6597 = torch.aten.item %6595 : !torch.vtensor<[],f32> -> !torch.float | |
%6598 = torch.aten.item %6596 : !torch.vtensor<[],si8> -> !torch.int | |
%6599 = torch.aten._make_per_tensor_quantized_tensor %6594, %6597, %6598 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6600 = torch.aten.dequantize.self %6599 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%6601 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6602 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1314 = torch.constant.int 12 | |
%6603 = torch.aten.item %6601 : !torch.vtensor<[],f32> -> !torch.float | |
%6604 = torch.aten.item %6602 : !torch.vtensor<[],si8> -> !torch.int | |
%6605 = torch.aten.quantize_per_tensor %164, %6603, %6604, %int12_1314 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6606 = torch.aten.int_repr %6605 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%6607 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6608 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6609 = torch.aten.item %6607 : !torch.vtensor<[],f32> -> !torch.float | |
%6610 = torch.aten.item %6608 : !torch.vtensor<[],si8> -> !torch.int | |
%6611 = torch.aten._make_per_tensor_quantized_tensor %6606, %6609, %6610 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6612 = torch.aten.dequantize.self %6611 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%6613 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6614 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1315 = torch.constant.int 12 | |
%6615 = torch.aten.item %6613 : !torch.vtensor<[],f32> -> !torch.float | |
%6616 = torch.aten.item %6614 : !torch.vtensor<[],si8> -> !torch.int | |
%6617 = torch.aten.quantize_per_tensor %165, %6615, %6616, %int12_1315 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6618 = torch.aten.int_repr %6617 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6619 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6620 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6621 = torch.aten.item %6619 : !torch.vtensor<[],f32> -> !torch.float | |
%6622 = torch.aten.item %6620 : !torch.vtensor<[],si8> -> !torch.int | |
%6623 = torch.aten._make_per_tensor_quantized_tensor %6618, %6621, %6622 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6624 = torch.aten.dequantize.self %6623 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1316 = torch.constant.int 1 | |
%int1_1317 = torch.constant.int 1 | |
%int1_1318 = torch.constant.int 1 | |
%int1_1319 = torch.constant.int 1 | |
%int1_1320 = torch.constant.int 1 | |
%int1_1321 = torch.constant.int 1 | |
%int0_1322 = torch.constant.int 0 | |
%6625 = torch.prim.ListConstruct %int1_1316, %int1_1317 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6626 = torch.prim.ListConstruct %int1_1318, %int1_1319 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6627 = torch.prim.ListConstruct %int1_1320, %int1_1321 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6628 = torch.prim.ListConstruct %int0_1322, %int0_1322 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1323 = torch.constant.bool false | |
%int1_1324 = torch.constant.int 1 | |
%6629 = torch.aten.convolution %6600, %6612, %6624, %6627, %6625, %6626, %false_1323, %6628, %int1_1324 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1325 = torch.constant.float 0.1015625 | |
%6630 = torch.aten.leaky_relu %6629, %float1.015630e-01_1325 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6631 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6632 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1326 = torch.constant.int 12 | |
%6633 = torch.aten.item %6631 : !torch.vtensor<[],f32> -> !torch.float | |
%6634 = torch.aten.item %6632 : !torch.vtensor<[],si8> -> !torch.int | |
%6635 = torch.aten.quantize_per_tensor %6630, %6633, %6634, %int12_1326 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6636 = torch.aten.int_repr %6635 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6637 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6638 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6639 = torch.aten.item %6637 : !torch.vtensor<[],f32> -> !torch.float | |
%6640 = torch.aten.item %6638 : !torch.vtensor<[],si8> -> !torch.int | |
%6641 = torch.aten._make_per_tensor_quantized_tensor %6636, %6639, %6640 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6642 = torch.aten.dequantize.self %6641 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6643 = torch.prim.ListConstruct %6544, %6586, %6642 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1327 = torch.constant.int 1 | |
%6644 = torch.aten.cat %6643, %int1_1327 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%6645 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1328 = torch.constant.int 12 | |
%6647 = torch.aten.item %6645 : !torch.vtensor<[],f32> -> !torch.float | |
%6648 = torch.aten.item %6646 : !torch.vtensor<[],si8> -> !torch.int | |
%6649 = torch.aten.quantize_per_tensor %6644, %6647, %6648, %int12_1328 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6650 = torch.aten.int_repr %6649 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%6651 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6653 = torch.aten.item %6651 : !torch.vtensor<[],f32> -> !torch.float | |
%6654 = torch.aten.item %6652 : !torch.vtensor<[],si8> -> !torch.int | |
%6655 = torch.aten._make_per_tensor_quantized_tensor %6650, %6653, %6654 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6656 = torch.aten.dequantize.self %6655 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%6657 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6658 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1329 = torch.constant.int 12 | |
%6659 = torch.aten.item %6657 : !torch.vtensor<[],f32> -> !torch.float | |
%6660 = torch.aten.item %6658 : !torch.vtensor<[],si8> -> !torch.int | |
%6661 = torch.aten.quantize_per_tensor %166, %6659, %6660, %int12_1329 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6662 = torch.aten.int_repr %6661 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%6663 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6664 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6665 = torch.aten.item %6663 : !torch.vtensor<[],f32> -> !torch.float | |
%6666 = torch.aten.item %6664 : !torch.vtensor<[],si8> -> !torch.int | |
%6667 = torch.aten._make_per_tensor_quantized_tensor %6662, %6665, %6666 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6668 = torch.aten.dequantize.self %6667 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%6669 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6670 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1330 = torch.constant.int 12 | |
%6671 = torch.aten.item %6669 : !torch.vtensor<[],f32> -> !torch.float | |
%6672 = torch.aten.item %6670 : !torch.vtensor<[],si8> -> !torch.int | |
%6673 = torch.aten.quantize_per_tensor %167, %6671, %6672, %int12_1330 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6674 = torch.aten.int_repr %6673 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6675 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6677 = torch.aten.item %6675 : !torch.vtensor<[],f32> -> !torch.float | |
%6678 = torch.aten.item %6676 : !torch.vtensor<[],si8> -> !torch.int | |
%6679 = torch.aten._make_per_tensor_quantized_tensor %6674, %6677, %6678 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6680 = torch.aten.dequantize.self %6679 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1331 = torch.constant.int 1 | |
%int1_1332 = torch.constant.int 1 | |
%int1_1333 = torch.constant.int 1 | |
%int1_1334 = torch.constant.int 1 | |
%int1_1335 = torch.constant.int 1 | |
%int1_1336 = torch.constant.int 1 | |
%int0_1337 = torch.constant.int 0 | |
%6681 = torch.prim.ListConstruct %int1_1331, %int1_1332 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6682 = torch.prim.ListConstruct %int1_1333, %int1_1334 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6683 = torch.prim.ListConstruct %int1_1335, %int1_1336 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6684 = torch.prim.ListConstruct %int0_1337, %int0_1337 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1338 = torch.constant.bool false | |
%int1_1339 = torch.constant.int 1 | |
%6685 = torch.aten.convolution %6656, %6668, %6680, %6683, %6681, %6682, %false_1338, %6684, %int1_1339 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1340 = torch.constant.float 0.1015625 | |
%6686 = torch.aten.leaky_relu %6685, %float1.015630e-01_1340 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6687 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6688 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1341 = torch.constant.int 12 | |
%6689 = torch.aten.item %6687 : !torch.vtensor<[],f32> -> !torch.float | |
%6690 = torch.aten.item %6688 : !torch.vtensor<[],si8> -> !torch.int | |
%6691 = torch.aten.quantize_per_tensor %6686, %6689, %6690, %int12_1341 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6692 = torch.aten.int_repr %6691 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6693 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6694 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6695 = torch.aten.item %6693 : !torch.vtensor<[],f32> -> !torch.float | |
%6696 = torch.aten.item %6694 : !torch.vtensor<[],si8> -> !torch.int | |
%6697 = torch.aten._make_per_tensor_quantized_tensor %6692, %6695, %6696 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6698 = torch.aten.dequantize.self %6697 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6699 = torch.prim.ListConstruct %6544, %6586, %6642, %6698 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1342 = torch.constant.int 1 | |
%6700 = torch.aten.cat %6699, %int1_1342 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%6701 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6702 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1343 = torch.constant.int 12 | |
%6703 = torch.aten.item %6701 : !torch.vtensor<[],f32> -> !torch.float | |
%6704 = torch.aten.item %6702 : !torch.vtensor<[],si8> -> !torch.int | |
%6705 = torch.aten.quantize_per_tensor %6700, %6703, %6704, %int12_1343 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6706 = torch.aten.int_repr %6705 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%6707 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6708 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6709 = torch.aten.item %6707 : !torch.vtensor<[],f32> -> !torch.float | |
%6710 = torch.aten.item %6708 : !torch.vtensor<[],si8> -> !torch.int | |
%6711 = torch.aten._make_per_tensor_quantized_tensor %6706, %6709, %6710 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6712 = torch.aten.dequantize.self %6711 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%6713 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6714 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1344 = torch.constant.int 12 | |
%6715 = torch.aten.item %6713 : !torch.vtensor<[],f32> -> !torch.float | |
%6716 = torch.aten.item %6714 : !torch.vtensor<[],si8> -> !torch.int | |
%6717 = torch.aten.quantize_per_tensor %168, %6715, %6716, %int12_1344 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6718 = torch.aten.int_repr %6717 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%6719 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6720 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6721 = torch.aten.item %6719 : !torch.vtensor<[],f32> -> !torch.float | |
%6722 = torch.aten.item %6720 : !torch.vtensor<[],si8> -> !torch.int | |
%6723 = torch.aten._make_per_tensor_quantized_tensor %6718, %6721, %6722 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6724 = torch.aten.dequantize.self %6723 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%6725 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6726 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1345 = torch.constant.int 12 | |
%6727 = torch.aten.item %6725 : !torch.vtensor<[],f32> -> !torch.float | |
%6728 = torch.aten.item %6726 : !torch.vtensor<[],si8> -> !torch.int | |
%6729 = torch.aten.quantize_per_tensor %169, %6727, %6728, %int12_1345 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6730 = torch.aten.int_repr %6729 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6731 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6732 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6733 = torch.aten.item %6731 : !torch.vtensor<[],f32> -> !torch.float | |
%6734 = torch.aten.item %6732 : !torch.vtensor<[],si8> -> !torch.int | |
%6735 = torch.aten._make_per_tensor_quantized_tensor %6730, %6733, %6734 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6736 = torch.aten.dequantize.self %6735 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1346 = torch.constant.int 1 | |
%int1_1347 = torch.constant.int 1 | |
%int1_1348 = torch.constant.int 1 | |
%int1_1349 = torch.constant.int 1 | |
%int1_1350 = torch.constant.int 1 | |
%int1_1351 = torch.constant.int 1 | |
%int0_1352 = torch.constant.int 0 | |
%6737 = torch.prim.ListConstruct %int1_1346, %int1_1347 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6738 = torch.prim.ListConstruct %int1_1348, %int1_1349 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6739 = torch.prim.ListConstruct %int1_1350, %int1_1351 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6740 = torch.prim.ListConstruct %int0_1352, %int0_1352 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1353 = torch.constant.bool false | |
%int1_1354 = torch.constant.int 1 | |
%6741 = torch.aten.convolution %6712, %6724, %6736, %6739, %6737, %6738, %false_1353, %6740, %int1_1354 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1355 = torch.constant.float 0.1015625 | |
%6742 = torch.aten.leaky_relu %6741, %float1.015630e-01_1355 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6743 = torch.prim.ListConstruct %6544, %6586, %6642, %6698, %6742 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1356 = torch.constant.int 1 | |
%6744 = torch.aten.cat %6743, %int1_1356 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%6745 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6746 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1357 = torch.constant.int 12 | |
%6747 = torch.aten.item %6745 : !torch.vtensor<[],f32> -> !torch.float | |
%6748 = torch.aten.item %6746 : !torch.vtensor<[],si8> -> !torch.int | |
%6749 = torch.aten.quantize_per_tensor %6744, %6747, %6748, %int12_1357 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6750 = torch.aten.int_repr %6749 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%6751 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6752 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6753 = torch.aten.item %6751 : !torch.vtensor<[],f32> -> !torch.float | |
%6754 = torch.aten.item %6752 : !torch.vtensor<[],si8> -> !torch.int | |
%6755 = torch.aten._make_per_tensor_quantized_tensor %6750, %6753, %6754 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%6756 = torch.aten.dequantize.self %6755 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%6757 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6758 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1358 = torch.constant.int 12 | |
%6759 = torch.aten.item %6757 : !torch.vtensor<[],f32> -> !torch.float | |
%6760 = torch.aten.item %6758 : !torch.vtensor<[],si8> -> !torch.int | |
%6761 = torch.aten.quantize_per_tensor %170, %6759, %6760, %int12_1358 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6762 = torch.aten.int_repr %6761 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%6763 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6764 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6765 = torch.aten.item %6763 : !torch.vtensor<[],f32> -> !torch.float | |
%6766 = torch.aten.item %6764 : !torch.vtensor<[],si8> -> !torch.int | |
%6767 = torch.aten._make_per_tensor_quantized_tensor %6762, %6765, %6766 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%6768 = torch.aten.dequantize.self %6767 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%6769 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6770 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1359 = torch.constant.int 12 | |
%6771 = torch.aten.item %6769 : !torch.vtensor<[],f32> -> !torch.float | |
%6772 = torch.aten.item %6770 : !torch.vtensor<[],si8> -> !torch.int | |
%6773 = torch.aten.quantize_per_tensor %171, %6771, %6772, %int12_1359 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6774 = torch.aten.int_repr %6773 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%6775 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6776 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6777 = torch.aten.item %6775 : !torch.vtensor<[],f32> -> !torch.float | |
%6778 = torch.aten.item %6776 : !torch.vtensor<[],si8> -> !torch.int | |
%6779 = torch.aten._make_per_tensor_quantized_tensor %6774, %6777, %6778 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%6780 = torch.aten.dequantize.self %6779 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1360 = torch.constant.int 1 | |
%int1_1361 = torch.constant.int 1 | |
%int1_1362 = torch.constant.int 1 | |
%int1_1363 = torch.constant.int 1 | |
%int1_1364 = torch.constant.int 1 | |
%int1_1365 = torch.constant.int 1 | |
%int0_1366 = torch.constant.int 0 | |
%6781 = torch.prim.ListConstruct %int1_1360, %int1_1361 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6782 = torch.prim.ListConstruct %int1_1362, %int1_1363 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6783 = torch.prim.ListConstruct %int1_1364, %int1_1365 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6784 = torch.prim.ListConstruct %int0_1366, %int0_1366 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1367 = torch.constant.bool false | |
%int1_1368 = torch.constant.int 1 | |
%6785 = torch.aten.convolution %6756, %6768, %6780, %6783, %6781, %6782, %false_1367, %6784, %int1_1368 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6786 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6787 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1369 = torch.constant.int 12 | |
%6788 = torch.aten.item %6786 : !torch.vtensor<[],f32> -> !torch.float | |
%6789 = torch.aten.item %6787 : !torch.vtensor<[],si8> -> !torch.int | |
%6790 = torch.aten.quantize_per_tensor %6785, %6788, %6789, %int12_1369 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6791 = torch.aten.int_repr %6790 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6792 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6793 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6794 = torch.aten.item %6792 : !torch.vtensor<[],f32> -> !torch.float | |
%6795 = torch.aten.item %6793 : !torch.vtensor<[],si8> -> !torch.int | |
%6796 = torch.aten._make_per_tensor_quantized_tensor %6791, %6794, %6795 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6797 = torch.aten.dequantize.self %6796 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6798 = torch.aten.mul.Tensor %6797, %987 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%6799 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6800 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1370 = torch.constant.int 12 | |
%6801 = torch.aten.item %6799 : !torch.vtensor<[],f32> -> !torch.float | |
%6802 = torch.aten.item %6800 : !torch.vtensor<[],si8> -> !torch.int | |
%6803 = torch.aten.quantize_per_tensor %6798, %6801, %6802, %int12_1370 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6804 = torch.aten.int_repr %6803 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6805 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6806 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6807 = torch.aten.item %6805 : !torch.vtensor<[],f32> -> !torch.float | |
%6808 = torch.aten.item %6806 : !torch.vtensor<[],si8> -> !torch.int | |
%6809 = torch.aten._make_per_tensor_quantized_tensor %6804, %6807, %6808 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6810 = torch.aten.dequantize.self %6809 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1371 = torch.constant.int 1 | |
%6811 = torch.aten.add.Tensor %6810, %6544, %int1_1371 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%6812 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6813 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1372 = torch.constant.int 12 | |
%6814 = torch.aten.item %6812 : !torch.vtensor<[],f32> -> !torch.float | |
%6815 = torch.aten.item %6813 : !torch.vtensor<[],si8> -> !torch.int | |
%6816 = torch.aten.quantize_per_tensor %6811, %6814, %6815, %int12_1372 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6817 = torch.aten.int_repr %6816 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%6818 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6819 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6820 = torch.aten.item %6818 : !torch.vtensor<[],f32> -> !torch.float | |
%6821 = torch.aten.item %6819 : !torch.vtensor<[],si8> -> !torch.int | |
%6822 = torch.aten._make_per_tensor_quantized_tensor %6817, %6820, %6821 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%6823 = torch.aten.dequantize.self %6822 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%6824 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6825 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1373 = torch.constant.int 12 | |
%6826 = torch.aten.item %6824 : !torch.vtensor<[],f32> -> !torch.float | |
%6827 = torch.aten.item %6825 : !torch.vtensor<[],si8> -> !torch.int | |
%6828 = torch.aten.quantize_per_tensor %172, %6826, %6827, %int12_1373 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6829 = torch.aten.int_repr %6828 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%6830 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6831 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6832 = torch.aten.item %6830 : !torch.vtensor<[],f32> -> !torch.float | |
%6833 = torch.aten.item %6831 : !torch.vtensor<[],si8> -> !torch.int | |
%6834 = torch.aten._make_per_tensor_quantized_tensor %6829, %6832, %6833 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%6835 = torch.aten.dequantize.self %6834 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%6836 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6837 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1374 = torch.constant.int 12 | |
%6838 = torch.aten.item %6836 : !torch.vtensor<[],f32> -> !torch.float | |
%6839 = torch.aten.item %6837 : !torch.vtensor<[],si8> -> !torch.int | |
%6840 = torch.aten.quantize_per_tensor %173, %6838, %6839, %int12_1374 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6841 = torch.aten.int_repr %6840 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6842 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6843 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6844 = torch.aten.item %6842 : !torch.vtensor<[],f32> -> !torch.float | |
%6845 = torch.aten.item %6843 : !torch.vtensor<[],si8> -> !torch.int | |
%6846 = torch.aten._make_per_tensor_quantized_tensor %6841, %6844, %6845 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6847 = torch.aten.dequantize.self %6846 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1375 = torch.constant.int 1 | |
%int1_1376 = torch.constant.int 1 | |
%int1_1377 = torch.constant.int 1 | |
%int1_1378 = torch.constant.int 1 | |
%int1_1379 = torch.constant.int 1 | |
%int1_1380 = torch.constant.int 1 | |
%int0_1381 = torch.constant.int 0 | |
%6848 = torch.prim.ListConstruct %int1_1375, %int1_1376 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6849 = torch.prim.ListConstruct %int1_1377, %int1_1378 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6850 = torch.prim.ListConstruct %int1_1379, %int1_1380 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6851 = torch.prim.ListConstruct %int0_1381, %int0_1381 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1382 = torch.constant.bool false | |
%int1_1383 = torch.constant.int 1 | |
%6852 = torch.aten.convolution %6823, %6835, %6847, %6850, %6848, %6849, %false_1382, %6851, %int1_1383 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1384 = torch.constant.float 0.1015625 | |
%6853 = torch.aten.leaky_relu %6852, %float1.015630e-01_1384 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6854 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6855 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1385 = torch.constant.int 12 | |
%6856 = torch.aten.item %6854 : !torch.vtensor<[],f32> -> !torch.float | |
%6857 = torch.aten.item %6855 : !torch.vtensor<[],si8> -> !torch.int | |
%6858 = torch.aten.quantize_per_tensor %6853, %6856, %6857, %int12_1385 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6859 = torch.aten.int_repr %6858 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6860 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6861 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6862 = torch.aten.item %6860 : !torch.vtensor<[],f32> -> !torch.float | |
%6863 = torch.aten.item %6861 : !torch.vtensor<[],si8> -> !torch.int | |
%6864 = torch.aten._make_per_tensor_quantized_tensor %6859, %6862, %6863 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6865 = torch.aten.dequantize.self %6864 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6866 = torch.prim.ListConstruct %6823, %6865 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1386 = torch.constant.int 1 | |
%6867 = torch.aten.cat %6866, %int1_1386 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%6868 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6869 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1387 = torch.constant.int 12 | |
%6870 = torch.aten.item %6868 : !torch.vtensor<[],f32> -> !torch.float | |
%6871 = torch.aten.item %6869 : !torch.vtensor<[],si8> -> !torch.int | |
%6872 = torch.aten.quantize_per_tensor %6867, %6870, %6871, %int12_1387 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6873 = torch.aten.int_repr %6872 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%6874 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6875 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6876 = torch.aten.item %6874 : !torch.vtensor<[],f32> -> !torch.float | |
%6877 = torch.aten.item %6875 : !torch.vtensor<[],si8> -> !torch.int | |
%6878 = torch.aten._make_per_tensor_quantized_tensor %6873, %6876, %6877 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%6879 = torch.aten.dequantize.self %6878 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%6880 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6881 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1388 = torch.constant.int 12 | |
%6882 = torch.aten.item %6880 : !torch.vtensor<[],f32> -> !torch.float | |
%6883 = torch.aten.item %6881 : !torch.vtensor<[],si8> -> !torch.int | |
%6884 = torch.aten.quantize_per_tensor %174, %6882, %6883, %int12_1388 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6885 = torch.aten.int_repr %6884 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%6886 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6887 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6888 = torch.aten.item %6886 : !torch.vtensor<[],f32> -> !torch.float | |
%6889 = torch.aten.item %6887 : !torch.vtensor<[],si8> -> !torch.int | |
%6890 = torch.aten._make_per_tensor_quantized_tensor %6885, %6888, %6889 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%6891 = torch.aten.dequantize.self %6890 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%6892 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6893 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1389 = torch.constant.int 12 | |
%6894 = torch.aten.item %6892 : !torch.vtensor<[],f32> -> !torch.float | |
%6895 = torch.aten.item %6893 : !torch.vtensor<[],si8> -> !torch.int | |
%6896 = torch.aten.quantize_per_tensor %175, %6894, %6895, %int12_1389 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6897 = torch.aten.int_repr %6896 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6898 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6899 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6900 = torch.aten.item %6898 : !torch.vtensor<[],f32> -> !torch.float | |
%6901 = torch.aten.item %6899 : !torch.vtensor<[],si8> -> !torch.int | |
%6902 = torch.aten._make_per_tensor_quantized_tensor %6897, %6900, %6901 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6903 = torch.aten.dequantize.self %6902 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1390 = torch.constant.int 1 | |
%int1_1391 = torch.constant.int 1 | |
%int1_1392 = torch.constant.int 1 | |
%int1_1393 = torch.constant.int 1 | |
%int1_1394 = torch.constant.int 1 | |
%int1_1395 = torch.constant.int 1 | |
%int0_1396 = torch.constant.int 0 | |
%6904 = torch.prim.ListConstruct %int1_1390, %int1_1391 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6905 = torch.prim.ListConstruct %int1_1392, %int1_1393 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6906 = torch.prim.ListConstruct %int1_1394, %int1_1395 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6907 = torch.prim.ListConstruct %int0_1396, %int0_1396 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1397 = torch.constant.bool false | |
%int1_1398 = torch.constant.int 1 | |
%6908 = torch.aten.convolution %6879, %6891, %6903, %6906, %6904, %6905, %false_1397, %6907, %int1_1398 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1399 = torch.constant.float 0.1015625 | |
%6909 = torch.aten.leaky_relu %6908, %float1.015630e-01_1399 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6910 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6911 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1400 = torch.constant.int 12 | |
%6912 = torch.aten.item %6910 : !torch.vtensor<[],f32> -> !torch.float | |
%6913 = torch.aten.item %6911 : !torch.vtensor<[],si8> -> !torch.int | |
%6914 = torch.aten.quantize_per_tensor %6909, %6912, %6913, %int12_1400 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6915 = torch.aten.int_repr %6914 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6916 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6917 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6918 = torch.aten.item %6916 : !torch.vtensor<[],f32> -> !torch.float | |
%6919 = torch.aten.item %6917 : !torch.vtensor<[],si8> -> !torch.int | |
%6920 = torch.aten._make_per_tensor_quantized_tensor %6915, %6918, %6919 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6921 = torch.aten.dequantize.self %6920 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6922 = torch.prim.ListConstruct %6823, %6865, %6921 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1401 = torch.constant.int 1 | |
%6923 = torch.aten.cat %6922, %int1_1401 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%6924 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6925 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1402 = torch.constant.int 12 | |
%6926 = torch.aten.item %6924 : !torch.vtensor<[],f32> -> !torch.float | |
%6927 = torch.aten.item %6925 : !torch.vtensor<[],si8> -> !torch.int | |
%6928 = torch.aten.quantize_per_tensor %6923, %6926, %6927, %int12_1402 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6929 = torch.aten.int_repr %6928 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%6930 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6931 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6932 = torch.aten.item %6930 : !torch.vtensor<[],f32> -> !torch.float | |
%6933 = torch.aten.item %6931 : !torch.vtensor<[],si8> -> !torch.int | |
%6934 = torch.aten._make_per_tensor_quantized_tensor %6929, %6932, %6933 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%6935 = torch.aten.dequantize.self %6934 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%6936 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6937 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1403 = torch.constant.int 12 | |
%6938 = torch.aten.item %6936 : !torch.vtensor<[],f32> -> !torch.float | |
%6939 = torch.aten.item %6937 : !torch.vtensor<[],si8> -> !torch.int | |
%6940 = torch.aten.quantize_per_tensor %176, %6938, %6939, %int12_1403 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6941 = torch.aten.int_repr %6940 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%6942 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6943 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6944 = torch.aten.item %6942 : !torch.vtensor<[],f32> -> !torch.float | |
%6945 = torch.aten.item %6943 : !torch.vtensor<[],si8> -> !torch.int | |
%6946 = torch.aten._make_per_tensor_quantized_tensor %6941, %6944, %6945 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%6947 = torch.aten.dequantize.self %6946 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%6948 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6949 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1404 = torch.constant.int 12 | |
%6950 = torch.aten.item %6948 : !torch.vtensor<[],f32> -> !torch.float | |
%6951 = torch.aten.item %6949 : !torch.vtensor<[],si8> -> !torch.int | |
%6952 = torch.aten.quantize_per_tensor %177, %6950, %6951, %int12_1404 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6953 = torch.aten.int_repr %6952 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%6954 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6955 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6956 = torch.aten.item %6954 : !torch.vtensor<[],f32> -> !torch.float | |
%6957 = torch.aten.item %6955 : !torch.vtensor<[],si8> -> !torch.int | |
%6958 = torch.aten._make_per_tensor_quantized_tensor %6953, %6956, %6957 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%6959 = torch.aten.dequantize.self %6958 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1405 = torch.constant.int 1 | |
%int1_1406 = torch.constant.int 1 | |
%int1_1407 = torch.constant.int 1 | |
%int1_1408 = torch.constant.int 1 | |
%int1_1409 = torch.constant.int 1 | |
%int1_1410 = torch.constant.int 1 | |
%int0_1411 = torch.constant.int 0 | |
%6960 = torch.prim.ListConstruct %int1_1405, %int1_1406 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6961 = torch.prim.ListConstruct %int1_1407, %int1_1408 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6962 = torch.prim.ListConstruct %int1_1409, %int1_1410 : (!torch.int, !torch.int) -> !torch.list<int> | |
%6963 = torch.prim.ListConstruct %int0_1411, %int0_1411 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1412 = torch.constant.bool false | |
%int1_1413 = torch.constant.int 1 | |
%6964 = torch.aten.convolution %6935, %6947, %6959, %6962, %6960, %6961, %false_1412, %6963, %int1_1413 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1414 = torch.constant.float 0.1015625 | |
%6965 = torch.aten.leaky_relu %6964, %float1.015630e-01_1414 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%6966 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6967 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1415 = torch.constant.int 12 | |
%6968 = torch.aten.item %6966 : !torch.vtensor<[],f32> -> !torch.float | |
%6969 = torch.aten.item %6967 : !torch.vtensor<[],si8> -> !torch.int | |
%6970 = torch.aten.quantize_per_tensor %6965, %6968, %6969, %int12_1415 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6971 = torch.aten.int_repr %6970 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%6972 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6973 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6974 = torch.aten.item %6972 : !torch.vtensor<[],f32> -> !torch.float | |
%6975 = torch.aten.item %6973 : !torch.vtensor<[],si8> -> !torch.int | |
%6976 = torch.aten._make_per_tensor_quantized_tensor %6971, %6974, %6975 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%6977 = torch.aten.dequantize.self %6976 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%6978 = torch.prim.ListConstruct %6823, %6865, %6921, %6977 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1416 = torch.constant.int 1 | |
%6979 = torch.aten.cat %6978, %int1_1416 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%6980 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6981 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1417 = torch.constant.int 12 | |
%6982 = torch.aten.item %6980 : !torch.vtensor<[],f32> -> !torch.float | |
%6983 = torch.aten.item %6981 : !torch.vtensor<[],si8> -> !torch.int | |
%6984 = torch.aten.quantize_per_tensor %6979, %6982, %6983, %int12_1417 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6985 = torch.aten.int_repr %6984 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%6986 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6987 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%6988 = torch.aten.item %6986 : !torch.vtensor<[],f32> -> !torch.float | |
%6989 = torch.aten.item %6987 : !torch.vtensor<[],si8> -> !torch.int | |
%6990 = torch.aten._make_per_tensor_quantized_tensor %6985, %6988, %6989 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%6991 = torch.aten.dequantize.self %6990 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%6992 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1418 = torch.constant.int 12 | |
%6994 = torch.aten.item %6992 : !torch.vtensor<[],f32> -> !torch.float | |
%6995 = torch.aten.item %6993 : !torch.vtensor<[],si8> -> !torch.int | |
%6996 = torch.aten.quantize_per_tensor %178, %6994, %6995, %int12_1418 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%6997 = torch.aten.int_repr %6996 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%6998 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%6999 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7000 = torch.aten.item %6998 : !torch.vtensor<[],f32> -> !torch.float | |
%7001 = torch.aten.item %6999 : !torch.vtensor<[],si8> -> !torch.int | |
%7002 = torch.aten._make_per_tensor_quantized_tensor %6997, %7000, %7001 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%7003 = torch.aten.dequantize.self %7002 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%7004 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7005 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1419 = torch.constant.int 12 | |
%7006 = torch.aten.item %7004 : !torch.vtensor<[],f32> -> !torch.float | |
%7007 = torch.aten.item %7005 : !torch.vtensor<[],si8> -> !torch.int | |
%7008 = torch.aten.quantize_per_tensor %179, %7006, %7007, %int12_1419 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7009 = torch.aten.int_repr %7008 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7010 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7011 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7012 = torch.aten.item %7010 : !torch.vtensor<[],f32> -> !torch.float | |
%7013 = torch.aten.item %7011 : !torch.vtensor<[],si8> -> !torch.int | |
%7014 = torch.aten._make_per_tensor_quantized_tensor %7009, %7012, %7013 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7015 = torch.aten.dequantize.self %7014 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1420 = torch.constant.int 1 | |
%int1_1421 = torch.constant.int 1 | |
%int1_1422 = torch.constant.int 1 | |
%int1_1423 = torch.constant.int 1 | |
%int1_1424 = torch.constant.int 1 | |
%int1_1425 = torch.constant.int 1 | |
%int0_1426 = torch.constant.int 0 | |
%7016 = torch.prim.ListConstruct %int1_1420, %int1_1421 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7017 = torch.prim.ListConstruct %int1_1422, %int1_1423 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7018 = torch.prim.ListConstruct %int1_1424, %int1_1425 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7019 = torch.prim.ListConstruct %int0_1426, %int0_1426 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1427 = torch.constant.bool false | |
%int1_1428 = torch.constant.int 1 | |
%7020 = torch.aten.convolution %6991, %7003, %7015, %7018, %7016, %7017, %false_1427, %7019, %int1_1428 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1429 = torch.constant.float 0.1015625 | |
%7021 = torch.aten.leaky_relu %7020, %float1.015630e-01_1429 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7022 = torch.prim.ListConstruct %6823, %6865, %6921, %6977, %7021 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1430 = torch.constant.int 1 | |
%7023 = torch.aten.cat %7022, %int1_1430 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%7024 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1431 = torch.constant.int 12 | |
%7026 = torch.aten.item %7024 : !torch.vtensor<[],f32> -> !torch.float | |
%7027 = torch.aten.item %7025 : !torch.vtensor<[],si8> -> !torch.int | |
%7028 = torch.aten.quantize_per_tensor %7023, %7026, %7027, %int12_1431 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7029 = torch.aten.int_repr %7028 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%7030 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7031 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7032 = torch.aten.item %7030 : !torch.vtensor<[],f32> -> !torch.float | |
%7033 = torch.aten.item %7031 : !torch.vtensor<[],si8> -> !torch.int | |
%7034 = torch.aten._make_per_tensor_quantized_tensor %7029, %7032, %7033 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7035 = torch.aten.dequantize.self %7034 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%7036 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7037 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1432 = torch.constant.int 12 | |
%7038 = torch.aten.item %7036 : !torch.vtensor<[],f32> -> !torch.float | |
%7039 = torch.aten.item %7037 : !torch.vtensor<[],si8> -> !torch.int | |
%7040 = torch.aten.quantize_per_tensor %180, %7038, %7039, %int12_1432 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7041 = torch.aten.int_repr %7040 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%7042 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7043 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7044 = torch.aten.item %7042 : !torch.vtensor<[],f32> -> !torch.float | |
%7045 = torch.aten.item %7043 : !torch.vtensor<[],si8> -> !torch.int | |
%7046 = torch.aten._make_per_tensor_quantized_tensor %7041, %7044, %7045 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7047 = torch.aten.dequantize.self %7046 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%7048 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1433 = torch.constant.int 12 | |
%7050 = torch.aten.item %7048 : !torch.vtensor<[],f32> -> !torch.float | |
%7051 = torch.aten.item %7049 : !torch.vtensor<[],si8> -> !torch.int | |
%7052 = torch.aten.quantize_per_tensor %181, %7050, %7051, %int12_1433 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7053 = torch.aten.int_repr %7052 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%7054 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7056 = torch.aten.item %7054 : !torch.vtensor<[],f32> -> !torch.float | |
%7057 = torch.aten.item %7055 : !torch.vtensor<[],si8> -> !torch.int | |
%7058 = torch.aten._make_per_tensor_quantized_tensor %7053, %7056, %7057 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7059 = torch.aten.dequantize.self %7058 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1434 = torch.constant.int 1 | |
%int1_1435 = torch.constant.int 1 | |
%int1_1436 = torch.constant.int 1 | |
%int1_1437 = torch.constant.int 1 | |
%int1_1438 = torch.constant.int 1 | |
%int1_1439 = torch.constant.int 1 | |
%int0_1440 = torch.constant.int 0 | |
%7060 = torch.prim.ListConstruct %int1_1434, %int1_1435 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7061 = torch.prim.ListConstruct %int1_1436, %int1_1437 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7062 = torch.prim.ListConstruct %int1_1438, %int1_1439 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7063 = torch.prim.ListConstruct %int0_1440, %int0_1440 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1441 = torch.constant.bool false | |
%int1_1442 = torch.constant.int 1 | |
%7064 = torch.aten.convolution %7035, %7047, %7059, %7062, %7060, %7061, %false_1441, %7063, %int1_1442 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7065 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7066 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1443 = torch.constant.int 12 | |
%7067 = torch.aten.item %7065 : !torch.vtensor<[],f32> -> !torch.float | |
%7068 = torch.aten.item %7066 : !torch.vtensor<[],si8> -> !torch.int | |
%7069 = torch.aten.quantize_per_tensor %7064, %7067, %7068, %int12_1443 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7070 = torch.aten.int_repr %7069 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7071 = torch.vtensor.literal(dense<2.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7072 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7073 = torch.aten.item %7071 : !torch.vtensor<[],f32> -> !torch.float | |
%7074 = torch.aten.item %7072 : !torch.vtensor<[],si8> -> !torch.int | |
%7075 = torch.aten._make_per_tensor_quantized_tensor %7070, %7073, %7074 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7076 = torch.aten.dequantize.self %7075 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7077 = torch.aten.mul.Tensor %7076, %1000 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%7078 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7079 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1444 = torch.constant.int 12 | |
%7080 = torch.aten.item %7078 : !torch.vtensor<[],f32> -> !torch.float | |
%7081 = torch.aten.item %7079 : !torch.vtensor<[],si8> -> !torch.int | |
%7082 = torch.aten.quantize_per_tensor %7077, %7080, %7081, %int12_1444 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7083 = torch.aten.int_repr %7082 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7084 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7085 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7086 = torch.aten.item %7084 : !torch.vtensor<[],f32> -> !torch.float | |
%7087 = torch.aten.item %7085 : !torch.vtensor<[],si8> -> !torch.int | |
%7088 = torch.aten._make_per_tensor_quantized_tensor %7083, %7086, %7087 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7089 = torch.aten.dequantize.self %7088 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1445 = torch.constant.int 1 | |
%7090 = torch.aten.add.Tensor %7089, %6823, %int1_1445 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7091 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7092 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1446 = torch.constant.int 12 | |
%7093 = torch.aten.item %7091 : !torch.vtensor<[],f32> -> !torch.float | |
%7094 = torch.aten.item %7092 : !torch.vtensor<[],si8> -> !torch.int | |
%7095 = torch.aten.quantize_per_tensor %7090, %7093, %7094, %int12_1446 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7096 = torch.aten.int_repr %7095 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7097 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7098 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7099 = torch.aten.item %7097 : !torch.vtensor<[],f32> -> !torch.float | |
%7100 = torch.aten.item %7098 : !torch.vtensor<[],si8> -> !torch.int | |
%7101 = torch.aten._make_per_tensor_quantized_tensor %7096, %7099, %7100 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7102 = torch.aten.dequantize.self %7101 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7103 = torch.aten.mul.Tensor %7102, %1013 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%7104 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7105 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1447 = torch.constant.int 12 | |
%7106 = torch.aten.item %7104 : !torch.vtensor<[],f32> -> !torch.float | |
%7107 = torch.aten.item %7105 : !torch.vtensor<[],si8> -> !torch.int | |
%7108 = torch.aten.quantize_per_tensor %7103, %7106, %7107, %int12_1447 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7109 = torch.aten.int_repr %7108 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7110 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7111 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7112 = torch.aten.item %7110 : !torch.vtensor<[],f32> -> !torch.float | |
%7113 = torch.aten.item %7111 : !torch.vtensor<[],si8> -> !torch.int | |
%7114 = torch.aten._make_per_tensor_quantized_tensor %7109, %7112, %7113 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7115 = torch.aten.dequantize.self %7114 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1448 = torch.constant.int 1 | |
%7116 = torch.aten.add.Tensor %7115, %6265, %int1_1448 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7117 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7118 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1449 = torch.constant.int 12 | |
%7119 = torch.aten.item %7117 : !torch.vtensor<[],f32> -> !torch.float | |
%7120 = torch.aten.item %7118 : !torch.vtensor<[],si8> -> !torch.int | |
%7121 = torch.aten.quantize_per_tensor %7116, %7119, %7120, %int12_1449 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7122 = torch.aten.int_repr %7121 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7123 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7124 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7125 = torch.aten.item %7123 : !torch.vtensor<[],f32> -> !torch.float | |
%7126 = torch.aten.item %7124 : !torch.vtensor<[],si8> -> !torch.int | |
%7127 = torch.aten._make_per_tensor_quantized_tensor %7122, %7125, %7126 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7128 = torch.aten.dequantize.self %7127 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7129 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7130 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1450 = torch.constant.int 12 | |
%7131 = torch.aten.item %7129 : !torch.vtensor<[],f32> -> !torch.float | |
%7132 = torch.aten.item %7130 : !torch.vtensor<[],si8> -> !torch.int | |
%7133 = torch.aten.quantize_per_tensor %182, %7131, %7132, %int12_1450 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7134 = torch.aten.int_repr %7133 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%7135 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7136 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7137 = torch.aten.item %7135 : !torch.vtensor<[],f32> -> !torch.float | |
%7138 = torch.aten.item %7136 : !torch.vtensor<[],si8> -> !torch.int | |
%7139 = torch.aten._make_per_tensor_quantized_tensor %7134, %7137, %7138 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7140 = torch.aten.dequantize.self %7139 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%7141 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7142 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1451 = torch.constant.int 12 | |
%7143 = torch.aten.item %7141 : !torch.vtensor<[],f32> -> !torch.float | |
%7144 = torch.aten.item %7142 : !torch.vtensor<[],si8> -> !torch.int | |
%7145 = torch.aten.quantize_per_tensor %183, %7143, %7144, %int12_1451 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7146 = torch.aten.int_repr %7145 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7147 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7148 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7149 = torch.aten.item %7147 : !torch.vtensor<[],f32> -> !torch.float | |
%7150 = torch.aten.item %7148 : !torch.vtensor<[],si8> -> !torch.int | |
%7151 = torch.aten._make_per_tensor_quantized_tensor %7146, %7149, %7150 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7152 = torch.aten.dequantize.self %7151 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1452 = torch.constant.int 1 | |
%int1_1453 = torch.constant.int 1 | |
%int1_1454 = torch.constant.int 1 | |
%int1_1455 = torch.constant.int 1 | |
%int1_1456 = torch.constant.int 1 | |
%int1_1457 = torch.constant.int 1 | |
%int0_1458 = torch.constant.int 0 | |
%7153 = torch.prim.ListConstruct %int1_1452, %int1_1453 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7154 = torch.prim.ListConstruct %int1_1454, %int1_1455 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7155 = torch.prim.ListConstruct %int1_1456, %int1_1457 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7156 = torch.prim.ListConstruct %int0_1458, %int0_1458 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1459 = torch.constant.bool false | |
%int1_1460 = torch.constant.int 1 | |
%7157 = torch.aten.convolution %7128, %7140, %7152, %7155, %7153, %7154, %false_1459, %7156, %int1_1460 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1461 = torch.constant.float 0.1015625 | |
%7158 = torch.aten.leaky_relu %7157, %float1.015630e-01_1461 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7159 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7160 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1462 = torch.constant.int 12 | |
%7161 = torch.aten.item %7159 : !torch.vtensor<[],f32> -> !torch.float | |
%7162 = torch.aten.item %7160 : !torch.vtensor<[],si8> -> !torch.int | |
%7163 = torch.aten.quantize_per_tensor %7158, %7161, %7162, %int12_1462 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7164 = torch.aten.int_repr %7163 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7165 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7166 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7167 = torch.aten.item %7165 : !torch.vtensor<[],f32> -> !torch.float | |
%7168 = torch.aten.item %7166 : !torch.vtensor<[],si8> -> !torch.int | |
%7169 = torch.aten._make_per_tensor_quantized_tensor %7164, %7167, %7168 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7170 = torch.aten.dequantize.self %7169 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7171 = torch.prim.ListConstruct %7128, %7170 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1463 = torch.constant.int 1 | |
%7172 = torch.aten.cat %7171, %int1_1463 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%7173 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1464 = torch.constant.int 12 | |
%7175 = torch.aten.item %7173 : !torch.vtensor<[],f32> -> !torch.float | |
%7176 = torch.aten.item %7174 : !torch.vtensor<[],si8> -> !torch.int | |
%7177 = torch.aten.quantize_per_tensor %7172, %7175, %7176, %int12_1464 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7178 = torch.aten.int_repr %7177 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%7179 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7181 = torch.aten.item %7179 : !torch.vtensor<[],f32> -> !torch.float | |
%7182 = torch.aten.item %7180 : !torch.vtensor<[],si8> -> !torch.int | |
%7183 = torch.aten._make_per_tensor_quantized_tensor %7178, %7181, %7182 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7184 = torch.aten.dequantize.self %7183 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%7185 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1465 = torch.constant.int 12 | |
%7187 = torch.aten.item %7185 : !torch.vtensor<[],f32> -> !torch.float | |
%7188 = torch.aten.item %7186 : !torch.vtensor<[],si8> -> !torch.int | |
%7189 = torch.aten.quantize_per_tensor %184, %7187, %7188, %int12_1465 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7190 = torch.aten.int_repr %7189 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%7191 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7192 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7193 = torch.aten.item %7191 : !torch.vtensor<[],f32> -> !torch.float | |
%7194 = torch.aten.item %7192 : !torch.vtensor<[],si8> -> !torch.int | |
%7195 = torch.aten._make_per_tensor_quantized_tensor %7190, %7193, %7194 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7196 = torch.aten.dequantize.self %7195 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%7197 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7198 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1466 = torch.constant.int 12 | |
%7199 = torch.aten.item %7197 : !torch.vtensor<[],f32> -> !torch.float | |
%7200 = torch.aten.item %7198 : !torch.vtensor<[],si8> -> !torch.int | |
%7201 = torch.aten.quantize_per_tensor %185, %7199, %7200, %int12_1466 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7202 = torch.aten.int_repr %7201 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7203 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7204 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7205 = torch.aten.item %7203 : !torch.vtensor<[],f32> -> !torch.float | |
%7206 = torch.aten.item %7204 : !torch.vtensor<[],si8> -> !torch.int | |
%7207 = torch.aten._make_per_tensor_quantized_tensor %7202, %7205, %7206 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7208 = torch.aten.dequantize.self %7207 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1467 = torch.constant.int 1 | |
%int1_1468 = torch.constant.int 1 | |
%int1_1469 = torch.constant.int 1 | |
%int1_1470 = torch.constant.int 1 | |
%int1_1471 = torch.constant.int 1 | |
%int1_1472 = torch.constant.int 1 | |
%int0_1473 = torch.constant.int 0 | |
%7209 = torch.prim.ListConstruct %int1_1467, %int1_1468 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7210 = torch.prim.ListConstruct %int1_1469, %int1_1470 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7211 = torch.prim.ListConstruct %int1_1471, %int1_1472 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7212 = torch.prim.ListConstruct %int0_1473, %int0_1473 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1474 = torch.constant.bool false | |
%int1_1475 = torch.constant.int 1 | |
%7213 = torch.aten.convolution %7184, %7196, %7208, %7211, %7209, %7210, %false_1474, %7212, %int1_1475 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1476 = torch.constant.float 0.1015625 | |
%7214 = torch.aten.leaky_relu %7213, %float1.015630e-01_1476 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7215 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7216 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1477 = torch.constant.int 12 | |
%7217 = torch.aten.item %7215 : !torch.vtensor<[],f32> -> !torch.float | |
%7218 = torch.aten.item %7216 : !torch.vtensor<[],si8> -> !torch.int | |
%7219 = torch.aten.quantize_per_tensor %7214, %7217, %7218, %int12_1477 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7220 = torch.aten.int_repr %7219 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7221 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7222 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7223 = torch.aten.item %7221 : !torch.vtensor<[],f32> -> !torch.float | |
%7224 = torch.aten.item %7222 : !torch.vtensor<[],si8> -> !torch.int | |
%7225 = torch.aten._make_per_tensor_quantized_tensor %7220, %7223, %7224 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7226 = torch.aten.dequantize.self %7225 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7227 = torch.prim.ListConstruct %7128, %7170, %7226 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1478 = torch.constant.int 1 | |
%7228 = torch.aten.cat %7227, %int1_1478 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%7229 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7230 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1479 = torch.constant.int 12 | |
%7231 = torch.aten.item %7229 : !torch.vtensor<[],f32> -> !torch.float | |
%7232 = torch.aten.item %7230 : !torch.vtensor<[],si8> -> !torch.int | |
%7233 = torch.aten.quantize_per_tensor %7228, %7231, %7232, %int12_1479 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%7234 = torch.aten.int_repr %7233 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%7235 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7236 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7237 = torch.aten.item %7235 : !torch.vtensor<[],f32> -> !torch.float | |
%7238 = torch.aten.item %7236 : !torch.vtensor<[],si8> -> !torch.int | |
%7239 = torch.aten._make_per_tensor_quantized_tensor %7234, %7237, %7238 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%7240 = torch.aten.dequantize.self %7239 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%7241 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7242 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1480 = torch.constant.int 12 | |
%7243 = torch.aten.item %7241 : !torch.vtensor<[],f32> -> !torch.float | |
%7244 = torch.aten.item %7242 : !torch.vtensor<[],si8> -> !torch.int | |
%7245 = torch.aten.quantize_per_tensor %186, %7243, %7244, %int12_1480 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%7246 = torch.aten.int_repr %7245 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%7247 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7248 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7249 = torch.aten.item %7247 : !torch.vtensor<[],f32> -> !torch.float | |
%7250 = torch.aten.item %7248 : !torch.vtensor<[],si8> -> !torch.int | |
%7251 = torch.aten._make_per_tensor_quantized_tensor %7246, %7249, %7250 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%7252 = torch.aten.dequantize.self %7251 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%7253 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7254 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1481 = torch.constant.int 12 | |
%7255 = torch.aten.item %7253 : !torch.vtensor<[],f32> -> !torch.float | |
%7256 = torch.aten.item %7254 : !torch.vtensor<[],si8> -> !torch.int | |
%7257 = torch.aten.quantize_per_tensor %187, %7255, %7256, %int12_1481 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7258 = torch.aten.int_repr %7257 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7259 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7260 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7261 = torch.aten.item %7259 : !torch.vtensor<[],f32> -> !torch.float | |
%7262 = torch.aten.item %7260 : !torch.vtensor<[],si8> -> !torch.int | |
%7263 = torch.aten._make_per_tensor_quantized_tensor %7258, %7261, %7262 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7264 = torch.aten.dequantize.self %7263 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1482 = torch.constant.int 1 | |
%int1_1483 = torch.constant.int 1 | |
%int1_1484 = torch.constant.int 1 | |
%int1_1485 = torch.constant.int 1 | |
%int1_1486 = torch.constant.int 1 | |
%int1_1487 = torch.constant.int 1 | |
%int0_1488 = torch.constant.int 0 | |
%7265 = torch.prim.ListConstruct %int1_1482, %int1_1483 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7266 = torch.prim.ListConstruct %int1_1484, %int1_1485 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7267 = torch.prim.ListConstruct %int1_1486, %int1_1487 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7268 = torch.prim.ListConstruct %int0_1488, %int0_1488 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1489 = torch.constant.bool false | |
%int1_1490 = torch.constant.int 1 | |
%7269 = torch.aten.convolution %7240, %7252, %7264, %7267, %7265, %7266, %false_1489, %7268, %int1_1490 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1491 = torch.constant.float 0.1015625 | |
%7270 = torch.aten.leaky_relu %7269, %float1.015630e-01_1491 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7271 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7272 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1492 = torch.constant.int 12 | |
%7273 = torch.aten.item %7271 : !torch.vtensor<[],f32> -> !torch.float | |
%7274 = torch.aten.item %7272 : !torch.vtensor<[],si8> -> !torch.int | |
%7275 = torch.aten.quantize_per_tensor %7270, %7273, %7274, %int12_1492 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7276 = torch.aten.int_repr %7275 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7277 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7279 = torch.aten.item %7277 : !torch.vtensor<[],f32> -> !torch.float | |
%7280 = torch.aten.item %7278 : !torch.vtensor<[],si8> -> !torch.int | |
%7281 = torch.aten._make_per_tensor_quantized_tensor %7276, %7279, %7280 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7282 = torch.aten.dequantize.self %7281 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7283 = torch.prim.ListConstruct %7128, %7170, %7226, %7282 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1493 = torch.constant.int 1 | |
%7284 = torch.aten.cat %7283, %int1_1493 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%7285 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7286 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1494 = torch.constant.int 12 | |
%7287 = torch.aten.item %7285 : !torch.vtensor<[],f32> -> !torch.float | |
%7288 = torch.aten.item %7286 : !torch.vtensor<[],si8> -> !torch.int | |
%7289 = torch.aten.quantize_per_tensor %7284, %7287, %7288, %int12_1494 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%7290 = torch.aten.int_repr %7289 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%7291 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7292 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7293 = torch.aten.item %7291 : !torch.vtensor<[],f32> -> !torch.float | |
%7294 = torch.aten.item %7292 : !torch.vtensor<[],si8> -> !torch.int | |
%7295 = torch.aten._make_per_tensor_quantized_tensor %7290, %7293, %7294 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%7296 = torch.aten.dequantize.self %7295 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%7297 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7298 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1495 = torch.constant.int 12 | |
%7299 = torch.aten.item %7297 : !torch.vtensor<[],f32> -> !torch.float | |
%7300 = torch.aten.item %7298 : !torch.vtensor<[],si8> -> !torch.int | |
%7301 = torch.aten.quantize_per_tensor %188, %7299, %7300, %int12_1495 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%7302 = torch.aten.int_repr %7301 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%7303 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7305 = torch.aten.item %7303 : !torch.vtensor<[],f32> -> !torch.float | |
%7306 = torch.aten.item %7304 : !torch.vtensor<[],si8> -> !torch.int | |
%7307 = torch.aten._make_per_tensor_quantized_tensor %7302, %7305, %7306 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%7308 = torch.aten.dequantize.self %7307 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%7309 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1496 = torch.constant.int 12 | |
%7311 = torch.aten.item %7309 : !torch.vtensor<[],f32> -> !torch.float | |
%7312 = torch.aten.item %7310 : !torch.vtensor<[],si8> -> !torch.int | |
%7313 = torch.aten.quantize_per_tensor %189, %7311, %7312, %int12_1496 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7314 = torch.aten.int_repr %7313 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7315 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7316 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7317 = torch.aten.item %7315 : !torch.vtensor<[],f32> -> !torch.float | |
%7318 = torch.aten.item %7316 : !torch.vtensor<[],si8> -> !torch.int | |
%7319 = torch.aten._make_per_tensor_quantized_tensor %7314, %7317, %7318 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7320 = torch.aten.dequantize.self %7319 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1497 = torch.constant.int 1 | |
%int1_1498 = torch.constant.int 1 | |
%int1_1499 = torch.constant.int 1 | |
%int1_1500 = torch.constant.int 1 | |
%int1_1501 = torch.constant.int 1 | |
%int1_1502 = torch.constant.int 1 | |
%int0_1503 = torch.constant.int 0 | |
%7321 = torch.prim.ListConstruct %int1_1497, %int1_1498 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7322 = torch.prim.ListConstruct %int1_1499, %int1_1500 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7323 = torch.prim.ListConstruct %int1_1501, %int1_1502 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7324 = torch.prim.ListConstruct %int0_1503, %int0_1503 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1504 = torch.constant.bool false | |
%int1_1505 = torch.constant.int 1 | |
%7325 = torch.aten.convolution %7296, %7308, %7320, %7323, %7321, %7322, %false_1504, %7324, %int1_1505 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1506 = torch.constant.float 0.1015625 | |
%7326 = torch.aten.leaky_relu %7325, %float1.015630e-01_1506 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7327 = torch.prim.ListConstruct %7128, %7170, %7226, %7282, %7326 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1507 = torch.constant.int 1 | |
%7328 = torch.aten.cat %7327, %int1_1507 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%7329 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7330 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1508 = torch.constant.int 12 | |
%7331 = torch.aten.item %7329 : !torch.vtensor<[],f32> -> !torch.float | |
%7332 = torch.aten.item %7330 : !torch.vtensor<[],si8> -> !torch.int | |
%7333 = torch.aten.quantize_per_tensor %7328, %7331, %7332, %int12_1508 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7334 = torch.aten.int_repr %7333 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%7335 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7336 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7337 = torch.aten.item %7335 : !torch.vtensor<[],f32> -> !torch.float | |
%7338 = torch.aten.item %7336 : !torch.vtensor<[],si8> -> !torch.int | |
%7339 = torch.aten._make_per_tensor_quantized_tensor %7334, %7337, %7338 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7340 = torch.aten.dequantize.self %7339 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%7341 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7342 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1509 = torch.constant.int 12 | |
%7343 = torch.aten.item %7341 : !torch.vtensor<[],f32> -> !torch.float | |
%7344 = torch.aten.item %7342 : !torch.vtensor<[],si8> -> !torch.int | |
%7345 = torch.aten.quantize_per_tensor %190, %7343, %7344, %int12_1509 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7346 = torch.aten.int_repr %7345 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%7347 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7349 = torch.aten.item %7347 : !torch.vtensor<[],f32> -> !torch.float | |
%7350 = torch.aten.item %7348 : !torch.vtensor<[],si8> -> !torch.int | |
%7351 = torch.aten._make_per_tensor_quantized_tensor %7346, %7349, %7350 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7352 = torch.aten.dequantize.self %7351 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%7353 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1510 = torch.constant.int 12 | |
%7355 = torch.aten.item %7353 : !torch.vtensor<[],f32> -> !torch.float | |
%7356 = torch.aten.item %7354 : !torch.vtensor<[],si8> -> !torch.int | |
%7357 = torch.aten.quantize_per_tensor %191, %7355, %7356, %int12_1510 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7358 = torch.aten.int_repr %7357 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%7359 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7361 = torch.aten.item %7359 : !torch.vtensor<[],f32> -> !torch.float | |
%7362 = torch.aten.item %7360 : !torch.vtensor<[],si8> -> !torch.int | |
%7363 = torch.aten._make_per_tensor_quantized_tensor %7358, %7361, %7362 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7364 = torch.aten.dequantize.self %7363 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1511 = torch.constant.int 1 | |
%int1_1512 = torch.constant.int 1 | |
%int1_1513 = torch.constant.int 1 | |
%int1_1514 = torch.constant.int 1 | |
%int1_1515 = torch.constant.int 1 | |
%int1_1516 = torch.constant.int 1 | |
%int0_1517 = torch.constant.int 0 | |
%7365 = torch.prim.ListConstruct %int1_1511, %int1_1512 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7366 = torch.prim.ListConstruct %int1_1513, %int1_1514 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7367 = torch.prim.ListConstruct %int1_1515, %int1_1516 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7368 = torch.prim.ListConstruct %int0_1517, %int0_1517 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1518 = torch.constant.bool false | |
%int1_1519 = torch.constant.int 1 | |
%7369 = torch.aten.convolution %7340, %7352, %7364, %7367, %7365, %7366, %false_1518, %7368, %int1_1519 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7370 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7371 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1520 = torch.constant.int 12 | |
%7372 = torch.aten.item %7370 : !torch.vtensor<[],f32> -> !torch.float | |
%7373 = torch.aten.item %7371 : !torch.vtensor<[],si8> -> !torch.int | |
%7374 = torch.aten.quantize_per_tensor %7369, %7372, %7373, %int12_1520 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7375 = torch.aten.int_repr %7374 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7376 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7377 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7378 = torch.aten.item %7376 : !torch.vtensor<[],f32> -> !torch.float | |
%7379 = torch.aten.item %7377 : !torch.vtensor<[],si8> -> !torch.int | |
%7380 = torch.aten._make_per_tensor_quantized_tensor %7375, %7378, %7379 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7381 = torch.aten.dequantize.self %7380 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7382 = torch.aten.mul.Tensor %7381, %1026 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%7383 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7384 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1521 = torch.constant.int 12 | |
%7385 = torch.aten.item %7383 : !torch.vtensor<[],f32> -> !torch.float | |
%7386 = torch.aten.item %7384 : !torch.vtensor<[],si8> -> !torch.int | |
%7387 = torch.aten.quantize_per_tensor %7382, %7385, %7386, %int12_1521 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7388 = torch.aten.int_repr %7387 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7389 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7390 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7391 = torch.aten.item %7389 : !torch.vtensor<[],f32> -> !torch.float | |
%7392 = torch.aten.item %7390 : !torch.vtensor<[],si8> -> !torch.int | |
%7393 = torch.aten._make_per_tensor_quantized_tensor %7388, %7391, %7392 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7394 = torch.aten.dequantize.self %7393 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1522 = torch.constant.int 1 | |
%7395 = torch.aten.add.Tensor %7394, %7128, %int1_1522 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7396 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7397 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1523 = torch.constant.int 12 | |
%7398 = torch.aten.item %7396 : !torch.vtensor<[],f32> -> !torch.float | |
%7399 = torch.aten.item %7397 : !torch.vtensor<[],si8> -> !torch.int | |
%7400 = torch.aten.quantize_per_tensor %7395, %7398, %7399, %int12_1523 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7401 = torch.aten.int_repr %7400 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7402 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7404 = torch.aten.item %7402 : !torch.vtensor<[],f32> -> !torch.float | |
%7405 = torch.aten.item %7403 : !torch.vtensor<[],si8> -> !torch.int | |
%7406 = torch.aten._make_per_tensor_quantized_tensor %7401, %7404, %7405 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7407 = torch.aten.dequantize.self %7406 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7408 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1524 = torch.constant.int 12 | |
%7410 = torch.aten.item %7408 : !torch.vtensor<[],f32> -> !torch.float | |
%7411 = torch.aten.item %7409 : !torch.vtensor<[],si8> -> !torch.int | |
%7412 = torch.aten.quantize_per_tensor %192, %7410, %7411, %int12_1524 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7413 = torch.aten.int_repr %7412 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%7414 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7415 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7416 = torch.aten.item %7414 : !torch.vtensor<[],f32> -> !torch.float | |
%7417 = torch.aten.item %7415 : !torch.vtensor<[],si8> -> !torch.int | |
%7418 = torch.aten._make_per_tensor_quantized_tensor %7413, %7416, %7417 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7419 = torch.aten.dequantize.self %7418 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%7420 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7421 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1525 = torch.constant.int 12 | |
%7422 = torch.aten.item %7420 : !torch.vtensor<[],f32> -> !torch.float | |
%7423 = torch.aten.item %7421 : !torch.vtensor<[],si8> -> !torch.int | |
%7424 = torch.aten.quantize_per_tensor %193, %7422, %7423, %int12_1525 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7425 = torch.aten.int_repr %7424 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7426 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7427 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7428 = torch.aten.item %7426 : !torch.vtensor<[],f32> -> !torch.float | |
%7429 = torch.aten.item %7427 : !torch.vtensor<[],si8> -> !torch.int | |
%7430 = torch.aten._make_per_tensor_quantized_tensor %7425, %7428, %7429 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7431 = torch.aten.dequantize.self %7430 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1526 = torch.constant.int 1 | |
%int1_1527 = torch.constant.int 1 | |
%int1_1528 = torch.constant.int 1 | |
%int1_1529 = torch.constant.int 1 | |
%int1_1530 = torch.constant.int 1 | |
%int1_1531 = torch.constant.int 1 | |
%int0_1532 = torch.constant.int 0 | |
%7432 = torch.prim.ListConstruct %int1_1526, %int1_1527 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7433 = torch.prim.ListConstruct %int1_1528, %int1_1529 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7434 = torch.prim.ListConstruct %int1_1530, %int1_1531 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7435 = torch.prim.ListConstruct %int0_1532, %int0_1532 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1533 = torch.constant.bool false | |
%int1_1534 = torch.constant.int 1 | |
%7436 = torch.aten.convolution %7407, %7419, %7431, %7434, %7432, %7433, %false_1533, %7435, %int1_1534 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1535 = torch.constant.float 0.1015625 | |
%7437 = torch.aten.leaky_relu %7436, %float1.015630e-01_1535 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7438 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7439 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1536 = torch.constant.int 12 | |
%7440 = torch.aten.item %7438 : !torch.vtensor<[],f32> -> !torch.float | |
%7441 = torch.aten.item %7439 : !torch.vtensor<[],si8> -> !torch.int | |
%7442 = torch.aten.quantize_per_tensor %7437, %7440, %7441, %int12_1536 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7443 = torch.aten.int_repr %7442 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7444 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7445 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7446 = torch.aten.item %7444 : !torch.vtensor<[],f32> -> !torch.float | |
%7447 = torch.aten.item %7445 : !torch.vtensor<[],si8> -> !torch.int | |
%7448 = torch.aten._make_per_tensor_quantized_tensor %7443, %7446, %7447 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7449 = torch.aten.dequantize.self %7448 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7450 = torch.prim.ListConstruct %7407, %7449 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1537 = torch.constant.int 1 | |
%7451 = torch.aten.cat %7450, %int1_1537 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%7452 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1538 = torch.constant.int 12 | |
%7454 = torch.aten.item %7452 : !torch.vtensor<[],f32> -> !torch.float | |
%7455 = torch.aten.item %7453 : !torch.vtensor<[],si8> -> !torch.int | |
%7456 = torch.aten.quantize_per_tensor %7451, %7454, %7455, %int12_1538 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7457 = torch.aten.int_repr %7456 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%7458 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7460 = torch.aten.item %7458 : !torch.vtensor<[],f32> -> !torch.float | |
%7461 = torch.aten.item %7459 : !torch.vtensor<[],si8> -> !torch.int | |
%7462 = torch.aten._make_per_tensor_quantized_tensor %7457, %7460, %7461 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7463 = torch.aten.dequantize.self %7462 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%7464 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1539 = torch.constant.int 12 | |
%7466 = torch.aten.item %7464 : !torch.vtensor<[],f32> -> !torch.float | |
%7467 = torch.aten.item %7465 : !torch.vtensor<[],si8> -> !torch.int | |
%7468 = torch.aten.quantize_per_tensor %194, %7466, %7467, %int12_1539 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7469 = torch.aten.int_repr %7468 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%7470 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7472 = torch.aten.item %7470 : !torch.vtensor<[],f32> -> !torch.float | |
%7473 = torch.aten.item %7471 : !torch.vtensor<[],si8> -> !torch.int | |
%7474 = torch.aten._make_per_tensor_quantized_tensor %7469, %7472, %7473 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7475 = torch.aten.dequantize.self %7474 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%7476 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1540 = torch.constant.int 12 | |
%7478 = torch.aten.item %7476 : !torch.vtensor<[],f32> -> !torch.float | |
%7479 = torch.aten.item %7477 : !torch.vtensor<[],si8> -> !torch.int | |
%7480 = torch.aten.quantize_per_tensor %195, %7478, %7479, %int12_1540 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7481 = torch.aten.int_repr %7480 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7482 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7483 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7484 = torch.aten.item %7482 : !torch.vtensor<[],f32> -> !torch.float | |
%7485 = torch.aten.item %7483 : !torch.vtensor<[],si8> -> !torch.int | |
%7486 = torch.aten._make_per_tensor_quantized_tensor %7481, %7484, %7485 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7487 = torch.aten.dequantize.self %7486 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1541 = torch.constant.int 1 | |
%int1_1542 = torch.constant.int 1 | |
%int1_1543 = torch.constant.int 1 | |
%int1_1544 = torch.constant.int 1 | |
%int1_1545 = torch.constant.int 1 | |
%int1_1546 = torch.constant.int 1 | |
%int0_1547 = torch.constant.int 0 | |
%7488 = torch.prim.ListConstruct %int1_1541, %int1_1542 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7489 = torch.prim.ListConstruct %int1_1543, %int1_1544 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7490 = torch.prim.ListConstruct %int1_1545, %int1_1546 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7491 = torch.prim.ListConstruct %int0_1547, %int0_1547 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1548 = torch.constant.bool false | |
%int1_1549 = torch.constant.int 1 | |
%7492 = torch.aten.convolution %7463, %7475, %7487, %7490, %7488, %7489, %false_1548, %7491, %int1_1549 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1550 = torch.constant.float 0.1015625 | |
%7493 = torch.aten.leaky_relu %7492, %float1.015630e-01_1550 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7494 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7495 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1551 = torch.constant.int 12 | |
%7496 = torch.aten.item %7494 : !torch.vtensor<[],f32> -> !torch.float | |
%7497 = torch.aten.item %7495 : !torch.vtensor<[],si8> -> !torch.int | |
%7498 = torch.aten.quantize_per_tensor %7493, %7496, %7497, %int12_1551 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7499 = torch.aten.int_repr %7498 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7500 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7501 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7502 = torch.aten.item %7500 : !torch.vtensor<[],f32> -> !torch.float | |
%7503 = torch.aten.item %7501 : !torch.vtensor<[],si8> -> !torch.int | |
%7504 = torch.aten._make_per_tensor_quantized_tensor %7499, %7502, %7503 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7505 = torch.aten.dequantize.self %7504 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7506 = torch.prim.ListConstruct %7407, %7449, %7505 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1552 = torch.constant.int 1 | |
%7507 = torch.aten.cat %7506, %int1_1552 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%7508 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7509 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1553 = torch.constant.int 12 | |
%7510 = torch.aten.item %7508 : !torch.vtensor<[],f32> -> !torch.float | |
%7511 = torch.aten.item %7509 : !torch.vtensor<[],si8> -> !torch.int | |
%7512 = torch.aten.quantize_per_tensor %7507, %7510, %7511, %int12_1553 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%7513 = torch.aten.int_repr %7512 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],si8> | |
%7514 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7515 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7516 = torch.aten.item %7514 : !torch.vtensor<[],f32> -> !torch.float | |
%7517 = torch.aten.item %7515 : !torch.vtensor<[],si8> -> !torch.int | |
%7518 = torch.aten._make_per_tensor_quantized_tensor %7513, %7516, %7517 : !torch.vtensor<[1,128,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%7519 = torch.aten.dequantize.self %7518 : !torch.vtensor<[1,128,224,224],!torch.qint8> -> !torch.vtensor<[1,128,224,224],f32> | |
%7520 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1554 = torch.constant.int 12 | |
%7522 = torch.aten.item %7520 : !torch.vtensor<[],f32> -> !torch.float | |
%7523 = torch.aten.item %7521 : !torch.vtensor<[],si8> -> !torch.int | |
%7524 = torch.aten.quantize_per_tensor %196, %7522, %7523, %int12_1554 : !torch.vtensor<[32,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%7525 = torch.aten.int_repr %7524 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],si8> | |
%7526 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7527 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7528 = torch.aten.item %7526 : !torch.vtensor<[],f32> -> !torch.float | |
%7529 = torch.aten.item %7527 : !torch.vtensor<[],si8> -> !torch.int | |
%7530 = torch.aten._make_per_tensor_quantized_tensor %7525, %7528, %7529 : !torch.vtensor<[32,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,128,3,3],!torch.qint8> | |
%7531 = torch.aten.dequantize.self %7530 : !torch.vtensor<[32,128,3,3],!torch.qint8> -> !torch.vtensor<[32,128,3,3],f32> | |
%7532 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7533 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1555 = torch.constant.int 12 | |
%7534 = torch.aten.item %7532 : !torch.vtensor<[],f32> -> !torch.float | |
%7535 = torch.aten.item %7533 : !torch.vtensor<[],si8> -> !torch.int | |
%7536 = torch.aten.quantize_per_tensor %197, %7534, %7535, %int12_1555 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7537 = torch.aten.int_repr %7536 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7538 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7539 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7540 = torch.aten.item %7538 : !torch.vtensor<[],f32> -> !torch.float | |
%7541 = torch.aten.item %7539 : !torch.vtensor<[],si8> -> !torch.int | |
%7542 = torch.aten._make_per_tensor_quantized_tensor %7537, %7540, %7541 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7543 = torch.aten.dequantize.self %7542 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1556 = torch.constant.int 1 | |
%int1_1557 = torch.constant.int 1 | |
%int1_1558 = torch.constant.int 1 | |
%int1_1559 = torch.constant.int 1 | |
%int1_1560 = torch.constant.int 1 | |
%int1_1561 = torch.constant.int 1 | |
%int0_1562 = torch.constant.int 0 | |
%7544 = torch.prim.ListConstruct %int1_1556, %int1_1557 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7545 = torch.prim.ListConstruct %int1_1558, %int1_1559 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7546 = torch.prim.ListConstruct %int1_1560, %int1_1561 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7547 = torch.prim.ListConstruct %int0_1562, %int0_1562 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1563 = torch.constant.bool false | |
%int1_1564 = torch.constant.int 1 | |
%7548 = torch.aten.convolution %7519, %7531, %7543, %7546, %7544, %7545, %false_1563, %7547, %int1_1564 : !torch.vtensor<[1,128,224,224],f32>, !torch.vtensor<[32,128,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1565 = torch.constant.float 0.1015625 | |
%7549 = torch.aten.leaky_relu %7548, %float1.015630e-01_1565 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7550 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7551 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1566 = torch.constant.int 12 | |
%7552 = torch.aten.item %7550 : !torch.vtensor<[],f32> -> !torch.float | |
%7553 = torch.aten.item %7551 : !torch.vtensor<[],si8> -> !torch.int | |
%7554 = torch.aten.quantize_per_tensor %7549, %7552, %7553, %int12_1566 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7555 = torch.aten.int_repr %7554 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7556 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7557 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7558 = torch.aten.item %7556 : !torch.vtensor<[],f32> -> !torch.float | |
%7559 = torch.aten.item %7557 : !torch.vtensor<[],si8> -> !torch.int | |
%7560 = torch.aten._make_per_tensor_quantized_tensor %7555, %7558, %7559 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7561 = torch.aten.dequantize.self %7560 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7562 = torch.prim.ListConstruct %7407, %7449, %7505, %7561 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1567 = torch.constant.int 1 | |
%7563 = torch.aten.cat %7562, %int1_1567 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,160,224,224],f32> | |
%7564 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7565 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1568 = torch.constant.int 12 | |
%7566 = torch.aten.item %7564 : !torch.vtensor<[],f32> -> !torch.float | |
%7567 = torch.aten.item %7565 : !torch.vtensor<[],si8> -> !torch.int | |
%7568 = torch.aten.quantize_per_tensor %7563, %7566, %7567, %int12_1568 : !torch.vtensor<[1,160,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%7569 = torch.aten.int_repr %7568 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],si8> | |
%7570 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7571 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7572 = torch.aten.item %7570 : !torch.vtensor<[],f32> -> !torch.float | |
%7573 = torch.aten.item %7571 : !torch.vtensor<[],si8> -> !torch.int | |
%7574 = torch.aten._make_per_tensor_quantized_tensor %7569, %7572, %7573 : !torch.vtensor<[1,160,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,224,224],!torch.qint8> | |
%7575 = torch.aten.dequantize.self %7574 : !torch.vtensor<[1,160,224,224],!torch.qint8> -> !torch.vtensor<[1,160,224,224],f32> | |
%7576 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7577 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1569 = torch.constant.int 12 | |
%7578 = torch.aten.item %7576 : !torch.vtensor<[],f32> -> !torch.float | |
%7579 = torch.aten.item %7577 : !torch.vtensor<[],si8> -> !torch.int | |
%7580 = torch.aten.quantize_per_tensor %198, %7578, %7579, %int12_1569 : !torch.vtensor<[32,160,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%7581 = torch.aten.int_repr %7580 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],si8> | |
%7582 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7583 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7584 = torch.aten.item %7582 : !torch.vtensor<[],f32> -> !torch.float | |
%7585 = torch.aten.item %7583 : !torch.vtensor<[],si8> -> !torch.int | |
%7586 = torch.aten._make_per_tensor_quantized_tensor %7581, %7584, %7585 : !torch.vtensor<[32,160,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,3,3],!torch.qint8> | |
%7587 = torch.aten.dequantize.self %7586 : !torch.vtensor<[32,160,3,3],!torch.qint8> -> !torch.vtensor<[32,160,3,3],f32> | |
%7588 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7589 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1570 = torch.constant.int 12 | |
%7590 = torch.aten.item %7588 : !torch.vtensor<[],f32> -> !torch.float | |
%7591 = torch.aten.item %7589 : !torch.vtensor<[],si8> -> !torch.int | |
%7592 = torch.aten.quantize_per_tensor %199, %7590, %7591, %int12_1570 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7593 = torch.aten.int_repr %7592 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7594 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7595 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7596 = torch.aten.item %7594 : !torch.vtensor<[],f32> -> !torch.float | |
%7597 = torch.aten.item %7595 : !torch.vtensor<[],si8> -> !torch.int | |
%7598 = torch.aten._make_per_tensor_quantized_tensor %7593, %7596, %7597 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7599 = torch.aten.dequantize.self %7598 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1571 = torch.constant.int 1 | |
%int1_1572 = torch.constant.int 1 | |
%int1_1573 = torch.constant.int 1 | |
%int1_1574 = torch.constant.int 1 | |
%int1_1575 = torch.constant.int 1 | |
%int1_1576 = torch.constant.int 1 | |
%int0_1577 = torch.constant.int 0 | |
%7600 = torch.prim.ListConstruct %int1_1571, %int1_1572 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7601 = torch.prim.ListConstruct %int1_1573, %int1_1574 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7602 = torch.prim.ListConstruct %int1_1575, %int1_1576 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7603 = torch.prim.ListConstruct %int0_1577, %int0_1577 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1578 = torch.constant.bool false | |
%int1_1579 = torch.constant.int 1 | |
%7604 = torch.aten.convolution %7575, %7587, %7599, %7602, %7600, %7601, %false_1578, %7603, %int1_1579 : !torch.vtensor<[1,160,224,224],f32>, !torch.vtensor<[32,160,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1580 = torch.constant.float 0.1015625 | |
%7605 = torch.aten.leaky_relu %7604, %float1.015630e-01_1580 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7606 = torch.prim.ListConstruct %7407, %7449, %7505, %7561, %7605 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1581 = torch.constant.int 1 | |
%7607 = torch.aten.cat %7606, %int1_1581 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,192,224,224],f32> | |
%7608 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7609 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1582 = torch.constant.int 12 | |
%7610 = torch.aten.item %7608 : !torch.vtensor<[],f32> -> !torch.float | |
%7611 = torch.aten.item %7609 : !torch.vtensor<[],si8> -> !torch.int | |
%7612 = torch.aten.quantize_per_tensor %7607, %7610, %7611, %int12_1582 : !torch.vtensor<[1,192,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7613 = torch.aten.int_repr %7612 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],si8> | |
%7614 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7615 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7616 = torch.aten.item %7614 : !torch.vtensor<[],f32> -> !torch.float | |
%7617 = torch.aten.item %7615 : !torch.vtensor<[],si8> -> !torch.int | |
%7618 = torch.aten._make_per_tensor_quantized_tensor %7613, %7616, %7617 : !torch.vtensor<[1,192,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,192,224,224],!torch.qint8> | |
%7619 = torch.aten.dequantize.self %7618 : !torch.vtensor<[1,192,224,224],!torch.qint8> -> !torch.vtensor<[1,192,224,224],f32> | |
%7620 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7621 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1583 = torch.constant.int 12 | |
%7622 = torch.aten.item %7620 : !torch.vtensor<[],f32> -> !torch.float | |
%7623 = torch.aten.item %7621 : !torch.vtensor<[],si8> -> !torch.int | |
%7624 = torch.aten.quantize_per_tensor %200, %7622, %7623, %int12_1583 : !torch.vtensor<[64,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7625 = torch.aten.int_repr %7624 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],si8> | |
%7626 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7628 = torch.aten.item %7626 : !torch.vtensor<[],f32> -> !torch.float | |
%7629 = torch.aten.item %7627 : !torch.vtensor<[],si8> -> !torch.int | |
%7630 = torch.aten._make_per_tensor_quantized_tensor %7625, %7628, %7629 : !torch.vtensor<[64,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,192,3,3],!torch.qint8> | |
%7631 = torch.aten.dequantize.self %7630 : !torch.vtensor<[64,192,3,3],!torch.qint8> -> !torch.vtensor<[64,192,3,3],f32> | |
%7632 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1584 = torch.constant.int 12 | |
%7634 = torch.aten.item %7632 : !torch.vtensor<[],f32> -> !torch.float | |
%7635 = torch.aten.item %7633 : !torch.vtensor<[],si8> -> !torch.int | |
%7636 = torch.aten.quantize_per_tensor %201, %7634, %7635, %int12_1584 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7637 = torch.aten.int_repr %7636 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
%7638 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7639 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7640 = torch.aten.item %7638 : !torch.vtensor<[],f32> -> !torch.float | |
%7641 = torch.aten.item %7639 : !torch.vtensor<[],si8> -> !torch.int | |
%7642 = torch.aten._make_per_tensor_quantized_tensor %7637, %7640, %7641 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
%7643 = torch.aten.dequantize.self %7642 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
%int1_1585 = torch.constant.int 1 | |
%int1_1586 = torch.constant.int 1 | |
%int1_1587 = torch.constant.int 1 | |
%int1_1588 = torch.constant.int 1 | |
%int1_1589 = torch.constant.int 1 | |
%int1_1590 = torch.constant.int 1 | |
%int0_1591 = torch.constant.int 0 | |
%7644 = torch.prim.ListConstruct %int1_1585, %int1_1586 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7645 = torch.prim.ListConstruct %int1_1587, %int1_1588 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7646 = torch.prim.ListConstruct %int1_1589, %int1_1590 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7647 = torch.prim.ListConstruct %int0_1591, %int0_1591 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1592 = torch.constant.bool false | |
%int1_1593 = torch.constant.int 1 | |
%7648 = torch.aten.convolution %7619, %7631, %7643, %7646, %7644, %7645, %false_1592, %7647, %int1_1593 : !torch.vtensor<[1,192,224,224],f32>, !torch.vtensor<[64,192,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7649 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7650 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1594 = torch.constant.int 12 | |
%7651 = torch.aten.item %7649 : !torch.vtensor<[],f32> -> !torch.float | |
%7652 = torch.aten.item %7650 : !torch.vtensor<[],si8> -> !torch.int | |
%7653 = torch.aten.quantize_per_tensor %7648, %7651, %7652, %int12_1594 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7654 = torch.aten.int_repr %7653 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7655 = torch.vtensor.literal(dense<1.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7656 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7657 = torch.aten.item %7655 : !torch.vtensor<[],f32> -> !torch.float | |
%7658 = torch.aten.item %7656 : !torch.vtensor<[],si8> -> !torch.int | |
%7659 = torch.aten._make_per_tensor_quantized_tensor %7654, %7657, %7658 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7660 = torch.aten.dequantize.self %7659 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7661 = torch.aten.mul.Tensor %7660, %1039 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,64,224,224],f32> | |
%7662 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7663 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1595 = torch.constant.int 12 | |
%7664 = torch.aten.item %7662 : !torch.vtensor<[],f32> -> !torch.float | |
%7665 = torch.aten.item %7663 : !torch.vtensor<[],si8> -> !torch.int | |
%7666 = torch.aten.quantize_per_tensor %7661, %7664, %7665, %int12_1595 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7667 = torch.aten.int_repr %7666 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7668 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7669 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7670 = torch.aten.item %7668 : !torch.vtensor<[],f32> -> !torch.float | |
%7671 = torch.aten.item %7669 : !torch.vtensor<[],si8> -> !torch.int | |
%7672 = torch.aten._make_per_tensor_quantized_tensor %7667, %7670, %7671 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7673 = torch.aten.dequantize.self %7672 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%int1_1596 = torch.constant.int 1 | |
%7674 = torch.aten.add.Tensor %7673, %7407, %int1_1596 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,64,224,224],f32>, !torch.int -> !torch.vtensor<[1,64,224,224],f32> | |
%7675 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7676 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1597 = torch.constant.int 12 | |
%7677 = torch.aten.item %7675 : !torch.vtensor<[],f32> -> !torch.float | |
%7678 = torch.aten.item %7676 : !torch.vtensor<[],si8> -> !torch.int | |
%7679 = torch.aten.quantize_per_tensor %7674, %7677, %7678, %int12_1597 : !torch.vtensor<[1,64,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7680 = torch.aten.int_repr %7679 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],si8> | |
%7681 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7682 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7683 = torch.aten.item %7681 : !torch.vtensor<[],f32> -> !torch.float | |
%7684 = torch.aten.item %7682 : !torch.vtensor<[],si8> -> !torch.int | |
%7685 = torch.aten._make_per_tensor_quantized_tensor %7680, %7683, %7684 : !torch.vtensor<[1,64,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,224,224],!torch.qint8> | |
%7686 = torch.aten.dequantize.self %7685 : !torch.vtensor<[1,64,224,224],!torch.qint8> -> !torch.vtensor<[1,64,224,224],f32> | |
%7687 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7688 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1598 = torch.constant.int 12 | |
%7689 = torch.aten.item %7687 : !torch.vtensor<[],f32> -> !torch.float | |
%7690 = torch.aten.item %7688 : !torch.vtensor<[],si8> -> !torch.int | |
%7691 = torch.aten.quantize_per_tensor %202, %7689, %7690, %int12_1598 : !torch.vtensor<[32,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7692 = torch.aten.int_repr %7691 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],si8> | |
%7693 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7694 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7695 = torch.aten.item %7693 : !torch.vtensor<[],f32> -> !torch.float | |
%7696 = torch.aten.item %7694 : !torch.vtensor<[],si8> -> !torch.int | |
%7697 = torch.aten._make_per_tensor_quantized_tensor %7692, %7695, %7696 : !torch.vtensor<[32,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,3,3],!torch.qint8> | |
%7698 = torch.aten.dequantize.self %7697 : !torch.vtensor<[32,64,3,3],!torch.qint8> -> !torch.vtensor<[32,64,3,3],f32> | |
%7699 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7700 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1599 = torch.constant.int 12 | |
%7701 = torch.aten.item %7699 : !torch.vtensor<[],f32> -> !torch.float | |
%7702 = torch.aten.item %7700 : !torch.vtensor<[],si8> -> !torch.int | |
%7703 = torch.aten.quantize_per_tensor %203, %7701, %7702, %int12_1599 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7704 = torch.aten.int_repr %7703 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7705 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7706 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7707 = torch.aten.item %7705 : !torch.vtensor<[],f32> -> !torch.float | |
%7708 = torch.aten.item %7706 : !torch.vtensor<[],si8> -> !torch.int | |
%7709 = torch.aten._make_per_tensor_quantized_tensor %7704, %7707, %7708 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7710 = torch.aten.dequantize.self %7709 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1600 = torch.constant.int 1 | |
%int1_1601 = torch.constant.int 1 | |
%int1_1602 = torch.constant.int 1 | |
%int1_1603 = torch.constant.int 1 | |
%int1_1604 = torch.constant.int 1 | |
%int1_1605 = torch.constant.int 1 | |
%int0_1606 = torch.constant.int 0 | |
%7711 = torch.prim.ListConstruct %int1_1600, %int1_1601 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7712 = torch.prim.ListConstruct %int1_1602, %int1_1603 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7713 = torch.prim.ListConstruct %int1_1604, %int1_1605 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7714 = torch.prim.ListConstruct %int0_1606, %int0_1606 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1607 = torch.constant.bool false | |
%int1_1608 = torch.constant.int 1 | |
%7715 = torch.aten.convolution %7686, %7698, %7710, %7713, %7711, %7712, %false_1607, %7714, %int1_1608 : !torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[32,64,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1609 = torch.constant.float 0.1015625 | |
%7716 = torch.aten.leaky_relu %7715, %float1.015630e-01_1609 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7717 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7718 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1610 = torch.constant.int 12 | |
%7719 = torch.aten.item %7717 : !torch.vtensor<[],f32> -> !torch.float | |
%7720 = torch.aten.item %7718 : !torch.vtensor<[],si8> -> !torch.int | |
%7721 = torch.aten.quantize_per_tensor %7716, %7719, %7720, %int12_1610 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7722 = torch.aten.int_repr %7721 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7723 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7724 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7725 = torch.aten.item %7723 : !torch.vtensor<[],f32> -> !torch.float | |
%7726 = torch.aten.item %7724 : !torch.vtensor<[],si8> -> !torch.int | |
%7727 = torch.aten._make_per_tensor_quantized_tensor %7722, %7725, %7726 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7728 = torch.aten.dequantize.self %7727 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7729 = torch.prim.ListConstruct %7686, %7728 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1611 = torch.constant.int 1 | |
%7730 = torch.aten.cat %7729, %int1_1611 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,96,224,224],f32> | |
%7731 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7732 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1612 = torch.constant.int 12 | |
%7733 = torch.aten.item %7731 : !torch.vtensor<[],f32> -> !torch.float | |
%7734 = torch.aten.item %7732 : !torch.vtensor<[],si8> -> !torch.int | |
%7735 = torch.aten.quantize_per_tensor %7730, %7733, %7734, %int12_1612 : !torch.vtensor<[1,96,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7736 = torch.aten.int_repr %7735 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],si8> | |
%7737 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7738 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7739 = torch.aten.item %7737 : !torch.vtensor<[],f32> -> !torch.float | |
%7740 = torch.aten.item %7738 : !torch.vtensor<[],si8> -> !torch.int | |
%7741 = torch.aten._make_per_tensor_quantized_tensor %7736, %7739, %7740 : !torch.vtensor<[1,96,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,96,224,224],!torch.qint8> | |
%7742 = torch.aten.dequantize.self %7741 : !torch.vtensor<[1,96,224,224],!torch.qint8> -> !torch.vtensor<[1,96,224,224],f32> | |
%7743 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7744 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1613 = torch.constant.int 12 | |
%7745 = torch.aten.item %7743 : !torch.vtensor<[],f32> -> !torch.float | |
%7746 = torch.aten.item %7744 : !torch.vtensor<[],si8> -> !torch.int | |
%7747 = torch.aten.quantize_per_tensor %204, %7745, %7746, %int12_1613 : !torch.vtensor<[32,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7748 = torch.aten.int_repr %7747 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],si8> | |
%7749 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7750 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7751 = torch.aten.item %7749 : !torch.vtensor<[],f32> -> !torch.float | |
%7752 = torch.aten.item %7750 : !torch.vtensor<[],si8> -> !torch.int | |
%7753 = torch.aten._make_per_tensor_quantized_tensor %7748, %7751, %7752 : !torch.vtensor<[32,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,3,3],!torch.qint8> | |
%7754 = torch.aten.dequantize.self %7753 : !torch.vtensor<[32,96,3,3],!torch.qint8> -> !torch.vtensor<[32,96,3,3],f32> | |
%7755 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7756 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1614 = torch.constant.int 12 | |
%7757 = torch.aten.item %7755 : !torch.vtensor<[],f32> -> !torch.float | |
%7758 = torch.aten.item %7756 : !torch.vtensor<[],si8> -> !torch.int | |
%7759 = torch.aten.quantize_per_tensor %205, %7757, %7758, %int12_1614 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7760 = torch.aten.int_repr %7759 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
%7761 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7762 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7763 = torch.aten.item %7761 : !torch.vtensor<[],f32> -> !torch.float | |
%7764 = torch.aten.item %7762 : !torch.vtensor<[],si8> -> !torch.int | |
%7765 = torch.aten._make_per_tensor_quantized_tensor %7760, %7763, %7764 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
%7766 = torch.aten.dequantize.self %7765 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
%int1_1615 = torch.constant.int 1 | |
%int1_1616 = torch.constant.int 1 | |
%int1_1617 = torch.constant.int 1 | |
%int1_1618 = torch.constant.int 1 | |
%int1_1619 = torch.constant.int 1 | |
%int1_1620 = torch.constant.int 1 | |
%int0_1621 = torch.constant.int 0 | |
%7767 = torch.prim.ListConstruct %int1_1615, %int1_1616 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7768 = torch.prim.ListConstruct %int1_1617, %int1_1618 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7769 = torch.prim.ListConstruct %int1_1619, %int1_1620 : (!torch.int, !torch.int) -> !torch.list<int> | |
%7770 = torch.prim.ListConstruct %int0_1621, %int0_1621 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1622 = torch.constant.bool false | |
%int1_1623 = torch.constant.int 1 | |
%7771 = torch.aten.convolution %7742, %7754, %7766, %7769, %7767, %7768, %false_1622, %7770, %int1_1623 : !torch.vtensor<[1,96,224,224],f32>, !torch.vtensor<[32,96,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,224,224],f32> | |
%float1.015630e-01_1624 = torch.constant.float 0.1015625 | |
%7772 = torch.aten.leaky_relu %7771, %float1.015630e-01_1624 : !torch.vtensor<[1,32,224,224],f32>, !torch.float -> !torch.vtensor<[1,32,224,224],f32> | |
%7773 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7774 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1625 = torch.constant.int 12 | |
%7775 = torch.aten.item %7773 : !torch.vtensor<[],f32> -> !torch.float | |
%7776 = torch.aten.item %7774 : !torch.vtensor<[],si8> -> !torch.int | |
%7777 = torch.aten.quantize_per_tensor %7772, %7775, %7776, %int12_1625 : !torch.vtensor<[1,32,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7778 = torch.aten.int_repr %7777 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],si8> | |
%7779 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7780 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%7781 = torch.aten.item %7779 : !torch.vtensor<[],f32> -> !torch.float | |
%7782 = torch.aten.item %7780 : !torch.vtensor<[],si8> -> !torch.int | |
%7783 = torch.aten._make_per_tensor_quantized_tensor %7778, %7781, %7782 : !torch.vtensor<[1,32,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,224,224],!torch.qint8> | |
%7784 = torch.aten.dequantize.self %7783 : !torch.vtensor<[1,32,224,224],!torch.qint8> -> !torch.vtensor<[1,32,224,224],f32> | |
%7785 = torch.prim.ListConstruct %7686, %7728, %7784 : (!torch.vtensor<[1,64,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>, !torch.vtensor<[1,32,224,224],f32>) -> !torch.list<vtensor> | |
%int1_1626 = torch.constant.int 1 | |
%7786 = torch.aten.cat %7785, %int1_1626 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[1,128,224,224],f32> | |
%7787 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
%7788 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
%int12_1627 = torch.constant.int 12 | |
%7789 = torch.aten.item %7787 : !torch.vtensor<[],f32> -> !torch.float | |
%7790 = torch.aten.item %7788 : !torch.vtensor<[],si8> -> !torch.int | |
%7791 = torch.aten.quantize_per_tensor %7786, %7789, %7790, %int12_1627 : !torch.vtensor<[1,128,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,224,224],!torch.qint8> | |
%7792 = torch.aten.int_repr %7791 : !torch.vtensor<[1,128,224,2 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment