Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save AmosLewis/16a84001c502eaa91426ff7fa5c66275 to your computer and use it in GitHub Desktop.
Save AmosLewis/16a84001c502eaa91426ff7fa5c66275 to your computer and use it in GitHub Desktop.
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,224,224],f32>) -> !torch.vtensor<[1,1000],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} {
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x3x7x7xf32>) : !torch.vtensor<[64,3,7,7],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x1x1xf32>) : !torch.vtensor<[64,64,1,1],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x1x1xf32>) : !torch.vtensor<[64,256,1,1],f32>
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x256x1x1xf32>) : !torch.vtensor<[64,256,1,1],f32>
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x64x1x1xf32>) : !torch.vtensor<[256,64,1,1],f32>
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x256x1x1xf32>) : !torch.vtensor<[128,256,1,1],f32>
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x256x1x1xf32>) : !torch.vtensor<[512,256,1,1],f32>
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x512x1x1xf32>) : !torch.vtensor<[128,512,1,1],f32>
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x128x3x3xf32>) : !torch.vtensor<[128,128,3,3],f32>
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x128x1x1xf32>) : !torch.vtensor<[512,128,1,1],f32>
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x1xf32>) : !torch.vtensor<[256,512,1,1],f32>
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x512x1x1xf32>) : !torch.vtensor<[1024,512,1,1],f32>
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32>
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x3x3xf32>) : !torch.vtensor<[256,256,3,3],f32>
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32>
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x256x1x1xf32>) : !torch.vtensor<[1024,256,1,1],f32>
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32>
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x1024x1x1xf32>) : !torch.vtensor<[512,1024,1,1],f32>
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x1024x1x1xf32>) : !torch.vtensor<[2048,1024,1,1],f32>
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048x1x1xf32>) : !torch.vtensor<[512,2048,1,1],f32>
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x2048x1x1xf32>) : !torch.vtensor<[512,2048,1,1],f32>
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x512x3x3xf32>) : !torch.vtensor<[512,512,3,3],f32>
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32>
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048x512x1x1xf32>) : !torch.vtensor<[2048,512,1,1],f32>
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2048xf32>) : !torch.vtensor<[2048],f32>
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1000x2048xf32>) : !torch.vtensor<[1000,2048],f32>
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1000xf32>) : !torch.vtensor<[1000],f32>
%none = torch.constant.none
%108 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%109 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12 = torch.constant.int 12
%110 = torch.aten.item %108 : !torch.vtensor<[],f32> -> !torch.float
%111 = torch.aten.item %109 : !torch.vtensor<[],si8> -> !torch.int
%112 = torch.aten.quantize_per_tensor %arg0, %110, %111, %int12 : !torch.vtensor<[1,3,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%113 = torch.aten.int_repr %112 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],si8>
%114 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%115 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%116 = torch.aten.item %114 : !torch.vtensor<[],f32> -> !torch.float
%117 = torch.aten.item %115 : !torch.vtensor<[],si8> -> !torch.int
%118 = torch.aten._make_per_tensor_quantized_tensor %113, %116, %117 : !torch.vtensor<[1,3,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%119 = torch.aten.dequantize.self %118 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],f32>
%120 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%121 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_0 = torch.constant.int 12
%122 = torch.aten.item %120 : !torch.vtensor<[],f32> -> !torch.float
%123 = torch.aten.item %121 : !torch.vtensor<[],si8> -> !torch.int
%124 = torch.aten.quantize_per_tensor %0, %122, %123, %int12_0 : !torch.vtensor<[64,3,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,3,7,7],!torch.qint8>
%125 = torch.aten.int_repr %124 : !torch.vtensor<[64,3,7,7],!torch.qint8> -> !torch.vtensor<[64,3,7,7],si8>
%126 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%127 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%128 = torch.aten.item %126 : !torch.vtensor<[],f32> -> !torch.float
%129 = torch.aten.item %127 : !torch.vtensor<[],si8> -> !torch.int
%130 = torch.aten._make_per_tensor_quantized_tensor %125, %128, %129 : !torch.vtensor<[64,3,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,3,7,7],!torch.qint8>
%131 = torch.aten.dequantize.self %130 : !torch.vtensor<[64,3,7,7],!torch.qint8> -> !torch.vtensor<[64,3,7,7],f32>
%132 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%133 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1 = torch.constant.int 12
%134 = torch.aten.item %132 : !torch.vtensor<[],f32> -> !torch.float
%135 = torch.aten.item %133 : !torch.vtensor<[],si8> -> !torch.int
%136 = torch.aten.quantize_per_tensor %1, %134, %135, %int12_1 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%137 = torch.aten.int_repr %136 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%138 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%139 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%140 = torch.aten.item %138 : !torch.vtensor<[],f32> -> !torch.float
%141 = torch.aten.item %139 : !torch.vtensor<[],si8> -> !torch.int
%142 = torch.aten._make_per_tensor_quantized_tensor %137, %140, %141 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%143 = torch.aten.dequantize.self %142 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int3 = torch.constant.int 3
%int3_2 = torch.constant.int 3
%int1 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%int2 = torch.constant.int 2
%int2_4 = torch.constant.int 2
%int0 = torch.constant.int 0
%144 = torch.prim.ListConstruct %int3, %int3_2 : (!torch.int, !torch.int) -> !torch.list<int>
%145 = torch.prim.ListConstruct %int1, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int>
%146 = torch.prim.ListConstruct %int2, %int2_4 : (!torch.int, !torch.int) -> !torch.list<int>
%147 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%false = torch.constant.bool false
%int1_5 = torch.constant.int 1
%148 = torch.aten.convolution %119, %131, %143, %146, %144, %145, %false, %147, %int1_5 : !torch.vtensor<[1,3,224,224],f32>, !torch.vtensor<[64,3,7,7],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,112,112],f32>
%149 = torch.aten.relu %148 : !torch.vtensor<[1,64,112,112],f32> -> !torch.vtensor<[1,64,112,112],f32>
%150 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%151 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_6 = torch.constant.int 12
%152 = torch.aten.item %150 : !torch.vtensor<[],f32> -> !torch.float
%153 = torch.aten.item %151 : !torch.vtensor<[],si8> -> !torch.int
%154 = torch.aten.quantize_per_tensor %149, %152, %153, %int12_6 : !torch.vtensor<[1,64,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%155 = torch.aten.int_repr %154 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],si8>
%156 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%157 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%158 = torch.aten.item %156 : !torch.vtensor<[],f32> -> !torch.float
%159 = torch.aten.item %157 : !torch.vtensor<[],si8> -> !torch.int
%160 = torch.aten._make_per_tensor_quantized_tensor %155, %158, %159 : !torch.vtensor<[1,64,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%161 = torch.aten.dequantize.self %160 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],f32>
%int3_7 = torch.constant.int 3
%int3_8 = torch.constant.int 3
%162 = torch.prim.ListConstruct %int3_7, %int3_8 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_9 = torch.constant.int 1
%int1_10 = torch.constant.int 1
%163 = torch.prim.ListConstruct %int1_9, %int1_10 : (!torch.int, !torch.int) -> !torch.list<int>
%int2_11 = torch.constant.int 2
%int2_12 = torch.constant.int 2
%164 = torch.prim.ListConstruct %int2_11, %int2_12 : (!torch.int, !torch.int) -> !torch.list<int>
%int1_13 = torch.constant.int 1
%int1_14 = torch.constant.int 1
%165 = torch.prim.ListConstruct %int1_13, %int1_14 : (!torch.int, !torch.int) -> !torch.list<int>
%false_15 = torch.constant.bool false
%166 = torch.aten.max_pool2d %161, %162, %164, %163, %165, %false_15 : !torch.vtensor<[1,64,112,112],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[1,64,56,56],f32>
%167 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%168 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_16 = torch.constant.int 12
%169 = torch.aten.item %167 : !torch.vtensor<[],f32> -> !torch.float
%170 = torch.aten.item %168 : !torch.vtensor<[],si8> -> !torch.int
%171 = torch.aten.quantize_per_tensor %166, %169, %170, %int12_16 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%172 = torch.aten.int_repr %171 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%173 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%175 = torch.aten.item %173 : !torch.vtensor<[],f32> -> !torch.float
%176 = torch.aten.item %174 : !torch.vtensor<[],si8> -> !torch.int
%177 = torch.aten._make_per_tensor_quantized_tensor %172, %175, %176 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%178 = torch.aten.dequantize.self %177 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%179 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_17 = torch.constant.int 12
%181 = torch.aten.item %179 : !torch.vtensor<[],f32> -> !torch.float
%182 = torch.aten.item %180 : !torch.vtensor<[],si8> -> !torch.int
%183 = torch.aten.quantize_per_tensor %2, %181, %182, %int12_17 : !torch.vtensor<[64,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,1,1],!torch.qint8>
%184 = torch.aten.int_repr %183 : !torch.vtensor<[64,64,1,1],!torch.qint8> -> !torch.vtensor<[64,64,1,1],si8>
%185 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%187 = torch.aten.item %185 : !torch.vtensor<[],f32> -> !torch.float
%188 = torch.aten.item %186 : !torch.vtensor<[],si8> -> !torch.int
%189 = torch.aten._make_per_tensor_quantized_tensor %184, %187, %188 : !torch.vtensor<[64,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,1,1],!torch.qint8>
%190 = torch.aten.dequantize.self %189 : !torch.vtensor<[64,64,1,1],!torch.qint8> -> !torch.vtensor<[64,64,1,1],f32>
%191 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%192 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_18 = torch.constant.int 12
%193 = torch.aten.item %191 : !torch.vtensor<[],f32> -> !torch.float
%194 = torch.aten.item %192 : !torch.vtensor<[],si8> -> !torch.int
%195 = torch.aten.quantize_per_tensor %3, %193, %194, %int12_18 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%196 = torch.aten.int_repr %195 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%197 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%198 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%199 = torch.aten.item %197 : !torch.vtensor<[],f32> -> !torch.float
%200 = torch.aten.item %198 : !torch.vtensor<[],si8> -> !torch.int
%201 = torch.aten._make_per_tensor_quantized_tensor %196, %199, %200 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%202 = torch.aten.dequantize.self %201 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_19 = torch.constant.int 0
%int0_20 = torch.constant.int 0
%int1_21 = torch.constant.int 1
%int1_22 = torch.constant.int 1
%int1_23 = torch.constant.int 1
%int1_24 = torch.constant.int 1
%int0_25 = torch.constant.int 0
%203 = torch.prim.ListConstruct %int0_19, %int0_20 : (!torch.int, !torch.int) -> !torch.list<int>
%204 = torch.prim.ListConstruct %int1_21, %int1_22 : (!torch.int, !torch.int) -> !torch.list<int>
%205 = torch.prim.ListConstruct %int1_23, %int1_24 : (!torch.int, !torch.int) -> !torch.list<int>
%206 = torch.prim.ListConstruct %int0_25, %int0_25 : (!torch.int, !torch.int) -> !torch.list<int>
%false_26 = torch.constant.bool false
%int1_27 = torch.constant.int 1
%207 = torch.aten.convolution %178, %190, %202, %205, %203, %204, %false_26, %206, %int1_27 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%208 = torch.aten.relu %207 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%209 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%210 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_28 = torch.constant.int 12
%211 = torch.aten.item %209 : !torch.vtensor<[],f32> -> !torch.float
%212 = torch.aten.item %210 : !torch.vtensor<[],si8> -> !torch.int
%213 = torch.aten.quantize_per_tensor %208, %211, %212, %int12_28 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%214 = torch.aten.int_repr %213 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%215 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%216 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%217 = torch.aten.item %215 : !torch.vtensor<[],f32> -> !torch.float
%218 = torch.aten.item %216 : !torch.vtensor<[],si8> -> !torch.int
%219 = torch.aten._make_per_tensor_quantized_tensor %214, %217, %218 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%220 = torch.aten.dequantize.self %219 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%221 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%222 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_29 = torch.constant.int 12
%223 = torch.aten.item %221 : !torch.vtensor<[],f32> -> !torch.float
%224 = torch.aten.item %222 : !torch.vtensor<[],si8> -> !torch.int
%225 = torch.aten.quantize_per_tensor %4, %223, %224, %int12_29 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%226 = torch.aten.int_repr %225 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%227 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%228 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%229 = torch.aten.item %227 : !torch.vtensor<[],f32> -> !torch.float
%230 = torch.aten.item %228 : !torch.vtensor<[],si8> -> !torch.int
%231 = torch.aten._make_per_tensor_quantized_tensor %226, %229, %230 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%232 = torch.aten.dequantize.self %231 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%233 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%234 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_30 = torch.constant.int 12
%235 = torch.aten.item %233 : !torch.vtensor<[],f32> -> !torch.float
%236 = torch.aten.item %234 : !torch.vtensor<[],si8> -> !torch.int
%237 = torch.aten.quantize_per_tensor %5, %235, %236, %int12_30 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%238 = torch.aten.int_repr %237 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%239 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%240 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%241 = torch.aten.item %239 : !torch.vtensor<[],f32> -> !torch.float
%242 = torch.aten.item %240 : !torch.vtensor<[],si8> -> !torch.int
%243 = torch.aten._make_per_tensor_quantized_tensor %238, %241, %242 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%244 = torch.aten.dequantize.self %243 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_31 = torch.constant.int 1
%int1_32 = torch.constant.int 1
%int1_33 = torch.constant.int 1
%int1_34 = torch.constant.int 1
%int1_35 = torch.constant.int 1
%int1_36 = torch.constant.int 1
%int0_37 = torch.constant.int 0
%245 = torch.prim.ListConstruct %int1_31, %int1_32 : (!torch.int, !torch.int) -> !torch.list<int>
%246 = torch.prim.ListConstruct %int1_33, %int1_34 : (!torch.int, !torch.int) -> !torch.list<int>
%247 = torch.prim.ListConstruct %int1_35, %int1_36 : (!torch.int, !torch.int) -> !torch.list<int>
%248 = torch.prim.ListConstruct %int0_37, %int0_37 : (!torch.int, !torch.int) -> !torch.list<int>
%false_38 = torch.constant.bool false
%int1_39 = torch.constant.int 1
%249 = torch.aten.convolution %220, %232, %244, %247, %245, %246, %false_38, %248, %int1_39 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%250 = torch.aten.relu %249 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%251 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%252 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_40 = torch.constant.int 12
%253 = torch.aten.item %251 : !torch.vtensor<[],f32> -> !torch.float
%254 = torch.aten.item %252 : !torch.vtensor<[],si8> -> !torch.int
%255 = torch.aten.quantize_per_tensor %250, %253, %254, %int12_40 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%256 = torch.aten.int_repr %255 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%257 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%258 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%259 = torch.aten.item %257 : !torch.vtensor<[],f32> -> !torch.float
%260 = torch.aten.item %258 : !torch.vtensor<[],si8> -> !torch.int
%261 = torch.aten._make_per_tensor_quantized_tensor %256, %259, %260 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%262 = torch.aten.dequantize.self %261 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%263 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%264 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_41 = torch.constant.int 12
%265 = torch.aten.item %263 : !torch.vtensor<[],f32> -> !torch.float
%266 = torch.aten.item %264 : !torch.vtensor<[],si8> -> !torch.int
%267 = torch.aten.quantize_per_tensor %6, %265, %266, %int12_41 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%268 = torch.aten.int_repr %267 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%269 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%270 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%271 = torch.aten.item %269 : !torch.vtensor<[],f32> -> !torch.float
%272 = torch.aten.item %270 : !torch.vtensor<[],si8> -> !torch.int
%273 = torch.aten._make_per_tensor_quantized_tensor %268, %271, %272 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%274 = torch.aten.dequantize.self %273 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%275 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%276 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_42 = torch.constant.int 12
%277 = torch.aten.item %275 : !torch.vtensor<[],f32> -> !torch.float
%278 = torch.aten.item %276 : !torch.vtensor<[],si8> -> !torch.int
%279 = torch.aten.quantize_per_tensor %7, %277, %278, %int12_42 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%280 = torch.aten.int_repr %279 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%281 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%282 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%283 = torch.aten.item %281 : !torch.vtensor<[],f32> -> !torch.float
%284 = torch.aten.item %282 : !torch.vtensor<[],si8> -> !torch.int
%285 = torch.aten._make_per_tensor_quantized_tensor %280, %283, %284 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%286 = torch.aten.dequantize.self %285 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_43 = torch.constant.int 0
%int0_44 = torch.constant.int 0
%int1_45 = torch.constant.int 1
%int1_46 = torch.constant.int 1
%int1_47 = torch.constant.int 1
%int1_48 = torch.constant.int 1
%int0_49 = torch.constant.int 0
%287 = torch.prim.ListConstruct %int0_43, %int0_44 : (!torch.int, !torch.int) -> !torch.list<int>
%288 = torch.prim.ListConstruct %int1_45, %int1_46 : (!torch.int, !torch.int) -> !torch.list<int>
%289 = torch.prim.ListConstruct %int1_47, %int1_48 : (!torch.int, !torch.int) -> !torch.list<int>
%290 = torch.prim.ListConstruct %int0_49, %int0_49 : (!torch.int, !torch.int) -> !torch.list<int>
%false_50 = torch.constant.bool false
%int1_51 = torch.constant.int 1
%291 = torch.aten.convolution %262, %274, %286, %289, %287, %288, %false_50, %290, %int1_51 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%292 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_52 = torch.constant.int 12
%294 = torch.aten.item %292 : !torch.vtensor<[],f32> -> !torch.float
%295 = torch.aten.item %293 : !torch.vtensor<[],si8> -> !torch.int
%296 = torch.aten.quantize_per_tensor %291, %294, %295, %int12_52 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%297 = torch.aten.int_repr %296 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%298 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%299 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%300 = torch.aten.item %298 : !torch.vtensor<[],f32> -> !torch.float
%301 = torch.aten.item %299 : !torch.vtensor<[],si8> -> !torch.int
%302 = torch.aten._make_per_tensor_quantized_tensor %297, %300, %301 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%303 = torch.aten.dequantize.self %302 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%304 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%305 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_53 = torch.constant.int 12
%306 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float
%307 = torch.aten.item %305 : !torch.vtensor<[],si8> -> !torch.int
%308 = torch.aten.quantize_per_tensor %8, %306, %307, %int12_53 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%309 = torch.aten.int_repr %308 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%310 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%311 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%312 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float
%313 = torch.aten.item %311 : !torch.vtensor<[],si8> -> !torch.int
%314 = torch.aten._make_per_tensor_quantized_tensor %309, %312, %313 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%315 = torch.aten.dequantize.self %314 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%316 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%317 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_54 = torch.constant.int 12
%318 = torch.aten.item %316 : !torch.vtensor<[],f32> -> !torch.float
%319 = torch.aten.item %317 : !torch.vtensor<[],si8> -> !torch.int
%320 = torch.aten.quantize_per_tensor %9, %318, %319, %int12_54 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%321 = torch.aten.int_repr %320 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%322 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%323 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%324 = torch.aten.item %322 : !torch.vtensor<[],f32> -> !torch.float
%325 = torch.aten.item %323 : !torch.vtensor<[],si8> -> !torch.int
%326 = torch.aten._make_per_tensor_quantized_tensor %321, %324, %325 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%327 = torch.aten.dequantize.self %326 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_55 = torch.constant.int 0
%int0_56 = torch.constant.int 0
%int1_57 = torch.constant.int 1
%int1_58 = torch.constant.int 1
%int1_59 = torch.constant.int 1
%int1_60 = torch.constant.int 1
%int0_61 = torch.constant.int 0
%328 = torch.prim.ListConstruct %int0_55, %int0_56 : (!torch.int, !torch.int) -> !torch.list<int>
%329 = torch.prim.ListConstruct %int1_57, %int1_58 : (!torch.int, !torch.int) -> !torch.list<int>
%330 = torch.prim.ListConstruct %int1_59, %int1_60 : (!torch.int, !torch.int) -> !torch.list<int>
%331 = torch.prim.ListConstruct %int0_61, %int0_61 : (!torch.int, !torch.int) -> !torch.list<int>
%false_62 = torch.constant.bool false
%int1_63 = torch.constant.int 1
%332 = torch.aten.convolution %178, %315, %327, %330, %328, %329, %false_62, %331, %int1_63 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%333 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%334 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_64 = torch.constant.int 12
%335 = torch.aten.item %333 : !torch.vtensor<[],f32> -> !torch.float
%336 = torch.aten.item %334 : !torch.vtensor<[],si8> -> !torch.int
%337 = torch.aten.quantize_per_tensor %332, %335, %336, %int12_64 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%338 = torch.aten.int_repr %337 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%339 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%341 = torch.aten.item %339 : !torch.vtensor<[],f32> -> !torch.float
%342 = torch.aten.item %340 : !torch.vtensor<[],si8> -> !torch.int
%343 = torch.aten._make_per_tensor_quantized_tensor %338, %341, %342 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%344 = torch.aten.dequantize.self %343 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_65 = torch.constant.int 1
%345 = torch.aten.add.Tensor %303, %344, %int1_65 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%346 = torch.aten.relu %345 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%347 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_66 = torch.constant.int 12
%349 = torch.aten.item %347 : !torch.vtensor<[],f32> -> !torch.float
%350 = torch.aten.item %348 : !torch.vtensor<[],si8> -> !torch.int
%351 = torch.aten.quantize_per_tensor %346, %349, %350, %int12_66 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%352 = torch.aten.int_repr %351 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%353 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%355 = torch.aten.item %353 : !torch.vtensor<[],f32> -> !torch.float
%356 = torch.aten.item %354 : !torch.vtensor<[],si8> -> !torch.int
%357 = torch.aten._make_per_tensor_quantized_tensor %352, %355, %356 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%358 = torch.aten.dequantize.self %357 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%359 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_67 = torch.constant.int 12
%361 = torch.aten.item %359 : !torch.vtensor<[],f32> -> !torch.float
%362 = torch.aten.item %360 : !torch.vtensor<[],si8> -> !torch.int
%363 = torch.aten.quantize_per_tensor %10, %361, %362, %int12_67 : !torch.vtensor<[64,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%364 = torch.aten.int_repr %363 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],si8>
%365 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%367 = torch.aten.item %365 : !torch.vtensor<[],f32> -> !torch.float
%368 = torch.aten.item %366 : !torch.vtensor<[],si8> -> !torch.int
%369 = torch.aten._make_per_tensor_quantized_tensor %364, %367, %368 : !torch.vtensor<[64,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%370 = torch.aten.dequantize.self %369 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],f32>
%371 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%372 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_68 = torch.constant.int 12
%373 = torch.aten.item %371 : !torch.vtensor<[],f32> -> !torch.float
%374 = torch.aten.item %372 : !torch.vtensor<[],si8> -> !torch.int
%375 = torch.aten.quantize_per_tensor %11, %373, %374, %int12_68 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%376 = torch.aten.int_repr %375 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%377 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%378 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%379 = torch.aten.item %377 : !torch.vtensor<[],f32> -> !torch.float
%380 = torch.aten.item %378 : !torch.vtensor<[],si8> -> !torch.int
%381 = torch.aten._make_per_tensor_quantized_tensor %376, %379, %380 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%382 = torch.aten.dequantize.self %381 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_69 = torch.constant.int 0
%int0_70 = torch.constant.int 0
%int1_71 = torch.constant.int 1
%int1_72 = torch.constant.int 1
%int1_73 = torch.constant.int 1
%int1_74 = torch.constant.int 1
%int0_75 = torch.constant.int 0
%383 = torch.prim.ListConstruct %int0_69, %int0_70 : (!torch.int, !torch.int) -> !torch.list<int>
%384 = torch.prim.ListConstruct %int1_71, %int1_72 : (!torch.int, !torch.int) -> !torch.list<int>
%385 = torch.prim.ListConstruct %int1_73, %int1_74 : (!torch.int, !torch.int) -> !torch.list<int>
%386 = torch.prim.ListConstruct %int0_75, %int0_75 : (!torch.int, !torch.int) -> !torch.list<int>
%false_76 = torch.constant.bool false
%int1_77 = torch.constant.int 1
%387 = torch.aten.convolution %358, %370, %382, %385, %383, %384, %false_76, %386, %int1_77 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[64,256,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%388 = torch.aten.relu %387 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%389 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%390 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_78 = torch.constant.int 12
%391 = torch.aten.item %389 : !torch.vtensor<[],f32> -> !torch.float
%392 = torch.aten.item %390 : !torch.vtensor<[],si8> -> !torch.int
%393 = torch.aten.quantize_per_tensor %388, %391, %392, %int12_78 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%394 = torch.aten.int_repr %393 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%395 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%396 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%397 = torch.aten.item %395 : !torch.vtensor<[],f32> -> !torch.float
%398 = torch.aten.item %396 : !torch.vtensor<[],si8> -> !torch.int
%399 = torch.aten._make_per_tensor_quantized_tensor %394, %397, %398 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%400 = torch.aten.dequantize.self %399 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%401 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%402 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_79 = torch.constant.int 12
%403 = torch.aten.item %401 : !torch.vtensor<[],f32> -> !torch.float
%404 = torch.aten.item %402 : !torch.vtensor<[],si8> -> !torch.int
%405 = torch.aten.quantize_per_tensor %12, %403, %404, %int12_79 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%406 = torch.aten.int_repr %405 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%407 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%408 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%409 = torch.aten.item %407 : !torch.vtensor<[],f32> -> !torch.float
%410 = torch.aten.item %408 : !torch.vtensor<[],si8> -> !torch.int
%411 = torch.aten._make_per_tensor_quantized_tensor %406, %409, %410 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%412 = torch.aten.dequantize.self %411 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%413 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%414 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_80 = torch.constant.int 12
%415 = torch.aten.item %413 : !torch.vtensor<[],f32> -> !torch.float
%416 = torch.aten.item %414 : !torch.vtensor<[],si8> -> !torch.int
%417 = torch.aten.quantize_per_tensor %13, %415, %416, %int12_80 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%418 = torch.aten.int_repr %417 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%419 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%420 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%421 = torch.aten.item %419 : !torch.vtensor<[],f32> -> !torch.float
%422 = torch.aten.item %420 : !torch.vtensor<[],si8> -> !torch.int
%423 = torch.aten._make_per_tensor_quantized_tensor %418, %421, %422 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%424 = torch.aten.dequantize.self %423 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_81 = torch.constant.int 1
%int1_82 = torch.constant.int 1
%int1_83 = torch.constant.int 1
%int1_84 = torch.constant.int 1
%int1_85 = torch.constant.int 1
%int1_86 = torch.constant.int 1
%int0_87 = torch.constant.int 0
%425 = torch.prim.ListConstruct %int1_81, %int1_82 : (!torch.int, !torch.int) -> !torch.list<int>
%426 = torch.prim.ListConstruct %int1_83, %int1_84 : (!torch.int, !torch.int) -> !torch.list<int>
%427 = torch.prim.ListConstruct %int1_85, %int1_86 : (!torch.int, !torch.int) -> !torch.list<int>
%428 = torch.prim.ListConstruct %int0_87, %int0_87 : (!torch.int, !torch.int) -> !torch.list<int>
%false_88 = torch.constant.bool false
%int1_89 = torch.constant.int 1
%429 = torch.aten.convolution %400, %412, %424, %427, %425, %426, %false_88, %428, %int1_89 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%430 = torch.aten.relu %429 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%431 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%432 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_90 = torch.constant.int 12
%433 = torch.aten.item %431 : !torch.vtensor<[],f32> -> !torch.float
%434 = torch.aten.item %432 : !torch.vtensor<[],si8> -> !torch.int
%435 = torch.aten.quantize_per_tensor %430, %433, %434, %int12_90 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%436 = torch.aten.int_repr %435 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%437 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%438 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%439 = torch.aten.item %437 : !torch.vtensor<[],f32> -> !torch.float
%440 = torch.aten.item %438 : !torch.vtensor<[],si8> -> !torch.int
%441 = torch.aten._make_per_tensor_quantized_tensor %436, %439, %440 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%442 = torch.aten.dequantize.self %441 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%443 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%444 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_91 = torch.constant.int 12
%445 = torch.aten.item %443 : !torch.vtensor<[],f32> -> !torch.float
%446 = torch.aten.item %444 : !torch.vtensor<[],si8> -> !torch.int
%447 = torch.aten.quantize_per_tensor %14, %445, %446, %int12_91 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%448 = torch.aten.int_repr %447 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%449 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%450 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%451 = torch.aten.item %449 : !torch.vtensor<[],f32> -> !torch.float
%452 = torch.aten.item %450 : !torch.vtensor<[],si8> -> !torch.int
%453 = torch.aten._make_per_tensor_quantized_tensor %448, %451, %452 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%454 = torch.aten.dequantize.self %453 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%455 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%456 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_92 = torch.constant.int 12
%457 = torch.aten.item %455 : !torch.vtensor<[],f32> -> !torch.float
%458 = torch.aten.item %456 : !torch.vtensor<[],si8> -> !torch.int
%459 = torch.aten.quantize_per_tensor %15, %457, %458, %int12_92 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%460 = torch.aten.int_repr %459 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%461 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%462 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%463 = torch.aten.item %461 : !torch.vtensor<[],f32> -> !torch.float
%464 = torch.aten.item %462 : !torch.vtensor<[],si8> -> !torch.int
%465 = torch.aten._make_per_tensor_quantized_tensor %460, %463, %464 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%466 = torch.aten.dequantize.self %465 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_93 = torch.constant.int 0
%int0_94 = torch.constant.int 0
%int1_95 = torch.constant.int 1
%int1_96 = torch.constant.int 1
%int1_97 = torch.constant.int 1
%int1_98 = torch.constant.int 1
%int0_99 = torch.constant.int 0
%467 = torch.prim.ListConstruct %int0_93, %int0_94 : (!torch.int, !torch.int) -> !torch.list<int>
%468 = torch.prim.ListConstruct %int1_95, %int1_96 : (!torch.int, !torch.int) -> !torch.list<int>
%469 = torch.prim.ListConstruct %int1_97, %int1_98 : (!torch.int, !torch.int) -> !torch.list<int>
%470 = torch.prim.ListConstruct %int0_99, %int0_99 : (!torch.int, !torch.int) -> !torch.list<int>
%false_100 = torch.constant.bool false
%int1_101 = torch.constant.int 1
%471 = torch.aten.convolution %442, %454, %466, %469, %467, %468, %false_100, %470, %int1_101 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%472 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%473 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_102 = torch.constant.int 12
%474 = torch.aten.item %472 : !torch.vtensor<[],f32> -> !torch.float
%475 = torch.aten.item %473 : !torch.vtensor<[],si8> -> !torch.int
%476 = torch.aten.quantize_per_tensor %471, %474, %475, %int12_102 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%477 = torch.aten.int_repr %476 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%478 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%479 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%480 = torch.aten.item %478 : !torch.vtensor<[],f32> -> !torch.float
%481 = torch.aten.item %479 : !torch.vtensor<[],si8> -> !torch.int
%482 = torch.aten._make_per_tensor_quantized_tensor %477, %480, %481 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%483 = torch.aten.dequantize.self %482 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_103 = torch.constant.int 1
%484 = torch.aten.add.Tensor %483, %358, %int1_103 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%485 = torch.aten.relu %484 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%486 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%487 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_104 = torch.constant.int 12
%488 = torch.aten.item %486 : !torch.vtensor<[],f32> -> !torch.float
%489 = torch.aten.item %487 : !torch.vtensor<[],si8> -> !torch.int
%490 = torch.aten.quantize_per_tensor %485, %488, %489, %int12_104 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%491 = torch.aten.int_repr %490 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%492 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%494 = torch.aten.item %492 : !torch.vtensor<[],f32> -> !torch.float
%495 = torch.aten.item %493 : !torch.vtensor<[],si8> -> !torch.int
%496 = torch.aten._make_per_tensor_quantized_tensor %491, %494, %495 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%497 = torch.aten.dequantize.self %496 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%498 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_105 = torch.constant.int 12
%500 = torch.aten.item %498 : !torch.vtensor<[],f32> -> !torch.float
%501 = torch.aten.item %499 : !torch.vtensor<[],si8> -> !torch.int
%502 = torch.aten.quantize_per_tensor %16, %500, %501, %int12_105 : !torch.vtensor<[64,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%503 = torch.aten.int_repr %502 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],si8>
%504 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%505 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%506 = torch.aten.item %504 : !torch.vtensor<[],f32> -> !torch.float
%507 = torch.aten.item %505 : !torch.vtensor<[],si8> -> !torch.int
%508 = torch.aten._make_per_tensor_quantized_tensor %503, %506, %507 : !torch.vtensor<[64,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,256,1,1],!torch.qint8>
%509 = torch.aten.dequantize.self %508 : !torch.vtensor<[64,256,1,1],!torch.qint8> -> !torch.vtensor<[64,256,1,1],f32>
%510 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%511 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_106 = torch.constant.int 12
%512 = torch.aten.item %510 : !torch.vtensor<[],f32> -> !torch.float
%513 = torch.aten.item %511 : !torch.vtensor<[],si8> -> !torch.int
%514 = torch.aten.quantize_per_tensor %17, %512, %513, %int12_106 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%515 = torch.aten.int_repr %514 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%516 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%517 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%518 = torch.aten.item %516 : !torch.vtensor<[],f32> -> !torch.float
%519 = torch.aten.item %517 : !torch.vtensor<[],si8> -> !torch.int
%520 = torch.aten._make_per_tensor_quantized_tensor %515, %518, %519 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%521 = torch.aten.dequantize.self %520 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_107 = torch.constant.int 0
%int0_108 = torch.constant.int 0
%int1_109 = torch.constant.int 1
%int1_110 = torch.constant.int 1
%int1_111 = torch.constant.int 1
%int1_112 = torch.constant.int 1
%int0_113 = torch.constant.int 0
%522 = torch.prim.ListConstruct %int0_107, %int0_108 : (!torch.int, !torch.int) -> !torch.list<int>
%523 = torch.prim.ListConstruct %int1_109, %int1_110 : (!torch.int, !torch.int) -> !torch.list<int>
%524 = torch.prim.ListConstruct %int1_111, %int1_112 : (!torch.int, !torch.int) -> !torch.list<int>
%525 = torch.prim.ListConstruct %int0_113, %int0_113 : (!torch.int, !torch.int) -> !torch.list<int>
%false_114 = torch.constant.bool false
%int1_115 = torch.constant.int 1
%526 = torch.aten.convolution %497, %509, %521, %524, %522, %523, %false_114, %525, %int1_115 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[64,256,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%527 = torch.aten.relu %526 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%528 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_116 = torch.constant.int 12
%530 = torch.aten.item %528 : !torch.vtensor<[],f32> -> !torch.float
%531 = torch.aten.item %529 : !torch.vtensor<[],si8> -> !torch.int
%532 = torch.aten.quantize_per_tensor %527, %530, %531, %int12_116 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%533 = torch.aten.int_repr %532 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%534 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%535 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%536 = torch.aten.item %534 : !torch.vtensor<[],f32> -> !torch.float
%537 = torch.aten.item %535 : !torch.vtensor<[],si8> -> !torch.int
%538 = torch.aten._make_per_tensor_quantized_tensor %533, %536, %537 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%539 = torch.aten.dequantize.self %538 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%540 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%541 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_117 = torch.constant.int 12
%542 = torch.aten.item %540 : !torch.vtensor<[],f32> -> !torch.float
%543 = torch.aten.item %541 : !torch.vtensor<[],si8> -> !torch.int
%544 = torch.aten.quantize_per_tensor %18, %542, %543, %int12_117 : !torch.vtensor<[64,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%545 = torch.aten.int_repr %544 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],si8>
%546 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%547 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%548 = torch.aten.item %546 : !torch.vtensor<[],f32> -> !torch.float
%549 = torch.aten.item %547 : !torch.vtensor<[],si8> -> !torch.int
%550 = torch.aten._make_per_tensor_quantized_tensor %545, %548, %549 : !torch.vtensor<[64,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,3,3],!torch.qint8>
%551 = torch.aten.dequantize.self %550 : !torch.vtensor<[64,64,3,3],!torch.qint8> -> !torch.vtensor<[64,64,3,3],f32>
%552 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%553 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_118 = torch.constant.int 12
%554 = torch.aten.item %552 : !torch.vtensor<[],f32> -> !torch.float
%555 = torch.aten.item %553 : !torch.vtensor<[],si8> -> !torch.int
%556 = torch.aten.quantize_per_tensor %19, %554, %555, %int12_118 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%557 = torch.aten.int_repr %556 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%558 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%559 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%560 = torch.aten.item %558 : !torch.vtensor<[],f32> -> !torch.float
%561 = torch.aten.item %559 : !torch.vtensor<[],si8> -> !torch.int
%562 = torch.aten._make_per_tensor_quantized_tensor %557, %560, %561 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%563 = torch.aten.dequantize.self %562 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_119 = torch.constant.int 1
%int1_120 = torch.constant.int 1
%int1_121 = torch.constant.int 1
%int1_122 = torch.constant.int 1
%int1_123 = torch.constant.int 1
%int1_124 = torch.constant.int 1
%int0_125 = torch.constant.int 0
%564 = torch.prim.ListConstruct %int1_119, %int1_120 : (!torch.int, !torch.int) -> !torch.list<int>
%565 = torch.prim.ListConstruct %int1_121, %int1_122 : (!torch.int, !torch.int) -> !torch.list<int>
%566 = torch.prim.ListConstruct %int1_123, %int1_124 : (!torch.int, !torch.int) -> !torch.list<int>
%567 = torch.prim.ListConstruct %int0_125, %int0_125 : (!torch.int, !torch.int) -> !torch.list<int>
%false_126 = torch.constant.bool false
%int1_127 = torch.constant.int 1
%568 = torch.aten.convolution %539, %551, %563, %566, %564, %565, %false_126, %567, %int1_127 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%569 = torch.aten.relu %568 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%570 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%571 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_128 = torch.constant.int 12
%572 = torch.aten.item %570 : !torch.vtensor<[],f32> -> !torch.float
%573 = torch.aten.item %571 : !torch.vtensor<[],si8> -> !torch.int
%574 = torch.aten.quantize_per_tensor %569, %572, %573, %int12_128 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%575 = torch.aten.int_repr %574 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%576 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%577 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%578 = torch.aten.item %576 : !torch.vtensor<[],f32> -> !torch.float
%579 = torch.aten.item %577 : !torch.vtensor<[],si8> -> !torch.int
%580 = torch.aten._make_per_tensor_quantized_tensor %575, %578, %579 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%581 = torch.aten.dequantize.self %580 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%582 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%583 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_129 = torch.constant.int 12
%584 = torch.aten.item %582 : !torch.vtensor<[],f32> -> !torch.float
%585 = torch.aten.item %583 : !torch.vtensor<[],si8> -> !torch.int
%586 = torch.aten.quantize_per_tensor %20, %584, %585, %int12_129 : !torch.vtensor<[256,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%587 = torch.aten.int_repr %586 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],si8>
%588 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%589 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%590 = torch.aten.item %588 : !torch.vtensor<[],f32> -> !torch.float
%591 = torch.aten.item %589 : !torch.vtensor<[],si8> -> !torch.int
%592 = torch.aten._make_per_tensor_quantized_tensor %587, %590, %591 : !torch.vtensor<[256,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,64,1,1],!torch.qint8>
%593 = torch.aten.dequantize.self %592 : !torch.vtensor<[256,64,1,1],!torch.qint8> -> !torch.vtensor<[256,64,1,1],f32>
%594 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%595 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_130 = torch.constant.int 12
%596 = torch.aten.item %594 : !torch.vtensor<[],f32> -> !torch.float
%597 = torch.aten.item %595 : !torch.vtensor<[],si8> -> !torch.int
%598 = torch.aten.quantize_per_tensor %21, %596, %597, %int12_130 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%599 = torch.aten.int_repr %598 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%600 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%601 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%602 = torch.aten.item %600 : !torch.vtensor<[],f32> -> !torch.float
%603 = torch.aten.item %601 : !torch.vtensor<[],si8> -> !torch.int
%604 = torch.aten._make_per_tensor_quantized_tensor %599, %602, %603 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%605 = torch.aten.dequantize.self %604 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_131 = torch.constant.int 0
%int0_132 = torch.constant.int 0
%int1_133 = torch.constant.int 1
%int1_134 = torch.constant.int 1
%int1_135 = torch.constant.int 1
%int1_136 = torch.constant.int 1
%int0_137 = torch.constant.int 0
%606 = torch.prim.ListConstruct %int0_131, %int0_132 : (!torch.int, !torch.int) -> !torch.list<int>
%607 = torch.prim.ListConstruct %int1_133, %int1_134 : (!torch.int, !torch.int) -> !torch.list<int>
%608 = torch.prim.ListConstruct %int1_135, %int1_136 : (!torch.int, !torch.int) -> !torch.list<int>
%609 = torch.prim.ListConstruct %int0_137, %int0_137 : (!torch.int, !torch.int) -> !torch.list<int>
%false_138 = torch.constant.bool false
%int1_139 = torch.constant.int 1
%610 = torch.aten.convolution %581, %593, %605, %608, %606, %607, %false_138, %609, %int1_139 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[256,64,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%611 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%612 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_140 = torch.constant.int 12
%613 = torch.aten.item %611 : !torch.vtensor<[],f32> -> !torch.float
%614 = torch.aten.item %612 : !torch.vtensor<[],si8> -> !torch.int
%615 = torch.aten.quantize_per_tensor %610, %613, %614, %int12_140 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%616 = torch.aten.int_repr %615 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%617 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%618 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%619 = torch.aten.item %617 : !torch.vtensor<[],f32> -> !torch.float
%620 = torch.aten.item %618 : !torch.vtensor<[],si8> -> !torch.int
%621 = torch.aten._make_per_tensor_quantized_tensor %616, %619, %620 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%622 = torch.aten.dequantize.self %621 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%int1_141 = torch.constant.int 1
%623 = torch.aten.add.Tensor %622, %497, %int1_141 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[1,256,56,56],f32>, !torch.int -> !torch.vtensor<[1,256,56,56],f32>
%624 = torch.aten.relu %623 : !torch.vtensor<[1,256,56,56],f32> -> !torch.vtensor<[1,256,56,56],f32>
%625 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%626 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_142 = torch.constant.int 12
%627 = torch.aten.item %625 : !torch.vtensor<[],f32> -> !torch.float
%628 = torch.aten.item %626 : !torch.vtensor<[],si8> -> !torch.int
%629 = torch.aten.quantize_per_tensor %624, %627, %628, %int12_142 : !torch.vtensor<[1,256,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%630 = torch.aten.int_repr %629 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],si8>
%631 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%632 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%633 = torch.aten.item %631 : !torch.vtensor<[],f32> -> !torch.float
%634 = torch.aten.item %632 : !torch.vtensor<[],si8> -> !torch.int
%635 = torch.aten._make_per_tensor_quantized_tensor %630, %633, %634 : !torch.vtensor<[1,256,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,56,56],!torch.qint8>
%636 = torch.aten.dequantize.self %635 : !torch.vtensor<[1,256,56,56],!torch.qint8> -> !torch.vtensor<[1,256,56,56],f32>
%637 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%638 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_143 = torch.constant.int 12
%639 = torch.aten.item %637 : !torch.vtensor<[],f32> -> !torch.float
%640 = torch.aten.item %638 : !torch.vtensor<[],si8> -> !torch.int
%641 = torch.aten.quantize_per_tensor %22, %639, %640, %int12_143 : !torch.vtensor<[128,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,256,1,1],!torch.qint8>
%642 = torch.aten.int_repr %641 : !torch.vtensor<[128,256,1,1],!torch.qint8> -> !torch.vtensor<[128,256,1,1],si8>
%643 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%644 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%645 = torch.aten.item %643 : !torch.vtensor<[],f32> -> !torch.float
%646 = torch.aten.item %644 : !torch.vtensor<[],si8> -> !torch.int
%647 = torch.aten._make_per_tensor_quantized_tensor %642, %645, %646 : !torch.vtensor<[128,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,256,1,1],!torch.qint8>
%648 = torch.aten.dequantize.self %647 : !torch.vtensor<[128,256,1,1],!torch.qint8> -> !torch.vtensor<[128,256,1,1],f32>
%649 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%650 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_144 = torch.constant.int 12
%651 = torch.aten.item %649 : !torch.vtensor<[],f32> -> !torch.float
%652 = torch.aten.item %650 : !torch.vtensor<[],si8> -> !torch.int
%653 = torch.aten.quantize_per_tensor %23, %651, %652, %int12_144 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%654 = torch.aten.int_repr %653 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%655 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%656 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%657 = torch.aten.item %655 : !torch.vtensor<[],f32> -> !torch.float
%658 = torch.aten.item %656 : !torch.vtensor<[],si8> -> !torch.int
%659 = torch.aten._make_per_tensor_quantized_tensor %654, %657, %658 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%660 = torch.aten.dequantize.self %659 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_145 = torch.constant.int 0
%int0_146 = torch.constant.int 0
%int1_147 = torch.constant.int 1
%int1_148 = torch.constant.int 1
%int1_149 = torch.constant.int 1
%int1_150 = torch.constant.int 1
%int0_151 = torch.constant.int 0
%661 = torch.prim.ListConstruct %int0_145, %int0_146 : (!torch.int, !torch.int) -> !torch.list<int>
%662 = torch.prim.ListConstruct %int1_147, %int1_148 : (!torch.int, !torch.int) -> !torch.list<int>
%663 = torch.prim.ListConstruct %int1_149, %int1_150 : (!torch.int, !torch.int) -> !torch.list<int>
%664 = torch.prim.ListConstruct %int0_151, %int0_151 : (!torch.int, !torch.int) -> !torch.list<int>
%false_152 = torch.constant.bool false
%int1_153 = torch.constant.int 1
%665 = torch.aten.convolution %636, %648, %660, %663, %661, %662, %false_152, %664, %int1_153 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[128,256,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,56,56],f32>
%666 = torch.aten.relu %665 : !torch.vtensor<[1,128,56,56],f32> -> !torch.vtensor<[1,128,56,56],f32>
%667 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%668 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_154 = torch.constant.int 12
%669 = torch.aten.item %667 : !torch.vtensor<[],f32> -> !torch.float
%670 = torch.aten.item %668 : !torch.vtensor<[],si8> -> !torch.int
%671 = torch.aten.quantize_per_tensor %666, %669, %670, %int12_154 : !torch.vtensor<[1,128,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,56,56],!torch.qint8>
%672 = torch.aten.int_repr %671 : !torch.vtensor<[1,128,56,56],!torch.qint8> -> !torch.vtensor<[1,128,56,56],si8>
%673 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%674 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%675 = torch.aten.item %673 : !torch.vtensor<[],f32> -> !torch.float
%676 = torch.aten.item %674 : !torch.vtensor<[],si8> -> !torch.int
%677 = torch.aten._make_per_tensor_quantized_tensor %672, %675, %676 : !torch.vtensor<[1,128,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,56,56],!torch.qint8>
%678 = torch.aten.dequantize.self %677 : !torch.vtensor<[1,128,56,56],!torch.qint8> -> !torch.vtensor<[1,128,56,56],f32>
%679 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%680 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_155 = torch.constant.int 12
%681 = torch.aten.item %679 : !torch.vtensor<[],f32> -> !torch.float
%682 = torch.aten.item %680 : !torch.vtensor<[],si8> -> !torch.int
%683 = torch.aten.quantize_per_tensor %24, %681, %682, %int12_155 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%684 = torch.aten.int_repr %683 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%685 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%686 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%687 = torch.aten.item %685 : !torch.vtensor<[],f32> -> !torch.float
%688 = torch.aten.item %686 : !torch.vtensor<[],si8> -> !torch.int
%689 = torch.aten._make_per_tensor_quantized_tensor %684, %687, %688 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%690 = torch.aten.dequantize.self %689 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%691 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%692 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_156 = torch.constant.int 12
%693 = torch.aten.item %691 : !torch.vtensor<[],f32> -> !torch.float
%694 = torch.aten.item %692 : !torch.vtensor<[],si8> -> !torch.int
%695 = torch.aten.quantize_per_tensor %25, %693, %694, %int12_156 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%696 = torch.aten.int_repr %695 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%697 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%698 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%699 = torch.aten.item %697 : !torch.vtensor<[],f32> -> !torch.float
%700 = torch.aten.item %698 : !torch.vtensor<[],si8> -> !torch.int
%701 = torch.aten._make_per_tensor_quantized_tensor %696, %699, %700 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%702 = torch.aten.dequantize.self %701 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_157 = torch.constant.int 1
%int1_158 = torch.constant.int 1
%int1_159 = torch.constant.int 1
%int1_160 = torch.constant.int 1
%int2_161 = torch.constant.int 2
%int2_162 = torch.constant.int 2
%int0_163 = torch.constant.int 0
%703 = torch.prim.ListConstruct %int1_157, %int1_158 : (!torch.int, !torch.int) -> !torch.list<int>
%704 = torch.prim.ListConstruct %int1_159, %int1_160 : (!torch.int, !torch.int) -> !torch.list<int>
%705 = torch.prim.ListConstruct %int2_161, %int2_162 : (!torch.int, !torch.int) -> !torch.list<int>
%706 = torch.prim.ListConstruct %int0_163, %int0_163 : (!torch.int, !torch.int) -> !torch.list<int>
%false_164 = torch.constant.bool false
%int1_165 = torch.constant.int 1
%707 = torch.aten.convolution %678, %690, %702, %705, %703, %704, %false_164, %706, %int1_165 : !torch.vtensor<[1,128,56,56],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%708 = torch.aten.relu %707 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%709 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%710 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_166 = torch.constant.int 12
%711 = torch.aten.item %709 : !torch.vtensor<[],f32> -> !torch.float
%712 = torch.aten.item %710 : !torch.vtensor<[],si8> -> !torch.int
%713 = torch.aten.quantize_per_tensor %708, %711, %712, %int12_166 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%714 = torch.aten.int_repr %713 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%715 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%716 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%717 = torch.aten.item %715 : !torch.vtensor<[],f32> -> !torch.float
%718 = torch.aten.item %716 : !torch.vtensor<[],si8> -> !torch.int
%719 = torch.aten._make_per_tensor_quantized_tensor %714, %717, %718 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%720 = torch.aten.dequantize.self %719 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%721 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%722 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_167 = torch.constant.int 12
%723 = torch.aten.item %721 : !torch.vtensor<[],f32> -> !torch.float
%724 = torch.aten.item %722 : !torch.vtensor<[],si8> -> !torch.int
%725 = torch.aten.quantize_per_tensor %26, %723, %724, %int12_167 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%726 = torch.aten.int_repr %725 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%727 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%729 = torch.aten.item %727 : !torch.vtensor<[],f32> -> !torch.float
%730 = torch.aten.item %728 : !torch.vtensor<[],si8> -> !torch.int
%731 = torch.aten._make_per_tensor_quantized_tensor %726, %729, %730 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%732 = torch.aten.dequantize.self %731 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%733 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_168 = torch.constant.int 12
%735 = torch.aten.item %733 : !torch.vtensor<[],f32> -> !torch.float
%736 = torch.aten.item %734 : !torch.vtensor<[],si8> -> !torch.int
%737 = torch.aten.quantize_per_tensor %27, %735, %736, %int12_168 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%738 = torch.aten.int_repr %737 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%739 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%741 = torch.aten.item %739 : !torch.vtensor<[],f32> -> !torch.float
%742 = torch.aten.item %740 : !torch.vtensor<[],si8> -> !torch.int
%743 = torch.aten._make_per_tensor_quantized_tensor %738, %741, %742 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%744 = torch.aten.dequantize.self %743 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_169 = torch.constant.int 0
%int0_170 = torch.constant.int 0
%int1_171 = torch.constant.int 1
%int1_172 = torch.constant.int 1
%int1_173 = torch.constant.int 1
%int1_174 = torch.constant.int 1
%int0_175 = torch.constant.int 0
%745 = torch.prim.ListConstruct %int0_169, %int0_170 : (!torch.int, !torch.int) -> !torch.list<int>
%746 = torch.prim.ListConstruct %int1_171, %int1_172 : (!torch.int, !torch.int) -> !torch.list<int>
%747 = torch.prim.ListConstruct %int1_173, %int1_174 : (!torch.int, !torch.int) -> !torch.list<int>
%748 = torch.prim.ListConstruct %int0_175, %int0_175 : (!torch.int, !torch.int) -> !torch.list<int>
%false_176 = torch.constant.bool false
%int1_177 = torch.constant.int 1
%749 = torch.aten.convolution %720, %732, %744, %747, %745, %746, %false_176, %748, %int1_177 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%750 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%751 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_178 = torch.constant.int 12
%752 = torch.aten.item %750 : !torch.vtensor<[],f32> -> !torch.float
%753 = torch.aten.item %751 : !torch.vtensor<[],si8> -> !torch.int
%754 = torch.aten.quantize_per_tensor %749, %752, %753, %int12_178 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%755 = torch.aten.int_repr %754 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%756 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%757 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%758 = torch.aten.item %756 : !torch.vtensor<[],f32> -> !torch.float
%759 = torch.aten.item %757 : !torch.vtensor<[],si8> -> !torch.int
%760 = torch.aten._make_per_tensor_quantized_tensor %755, %758, %759 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%761 = torch.aten.dequantize.self %760 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%762 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%763 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_179 = torch.constant.int 12
%764 = torch.aten.item %762 : !torch.vtensor<[],f32> -> !torch.float
%765 = torch.aten.item %763 : !torch.vtensor<[],si8> -> !torch.int
%766 = torch.aten.quantize_per_tensor %28, %764, %765, %int12_179 : !torch.vtensor<[512,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,256,1,1],!torch.qint8>
%767 = torch.aten.int_repr %766 : !torch.vtensor<[512,256,1,1],!torch.qint8> -> !torch.vtensor<[512,256,1,1],si8>
%768 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%769 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%770 = torch.aten.item %768 : !torch.vtensor<[],f32> -> !torch.float
%771 = torch.aten.item %769 : !torch.vtensor<[],si8> -> !torch.int
%772 = torch.aten._make_per_tensor_quantized_tensor %767, %770, %771 : !torch.vtensor<[512,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,256,1,1],!torch.qint8>
%773 = torch.aten.dequantize.self %772 : !torch.vtensor<[512,256,1,1],!torch.qint8> -> !torch.vtensor<[512,256,1,1],f32>
%774 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%775 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_180 = torch.constant.int 12
%776 = torch.aten.item %774 : !torch.vtensor<[],f32> -> !torch.float
%777 = torch.aten.item %775 : !torch.vtensor<[],si8> -> !torch.int
%778 = torch.aten.quantize_per_tensor %29, %776, %777, %int12_180 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%779 = torch.aten.int_repr %778 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%780 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%781 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%782 = torch.aten.item %780 : !torch.vtensor<[],f32> -> !torch.float
%783 = torch.aten.item %781 : !torch.vtensor<[],si8> -> !torch.int
%784 = torch.aten._make_per_tensor_quantized_tensor %779, %782, %783 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%785 = torch.aten.dequantize.self %784 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_181 = torch.constant.int 0
%int0_182 = torch.constant.int 0
%int1_183 = torch.constant.int 1
%int1_184 = torch.constant.int 1
%int2_185 = torch.constant.int 2
%int2_186 = torch.constant.int 2
%int0_187 = torch.constant.int 0
%786 = torch.prim.ListConstruct %int0_181, %int0_182 : (!torch.int, !torch.int) -> !torch.list<int>
%787 = torch.prim.ListConstruct %int1_183, %int1_184 : (!torch.int, !torch.int) -> !torch.list<int>
%788 = torch.prim.ListConstruct %int2_185, %int2_186 : (!torch.int, !torch.int) -> !torch.list<int>
%789 = torch.prim.ListConstruct %int0_187, %int0_187 : (!torch.int, !torch.int) -> !torch.list<int>
%false_188 = torch.constant.bool false
%int1_189 = torch.constant.int 1
%790 = torch.aten.convolution %636, %773, %785, %788, %786, %787, %false_188, %789, %int1_189 : !torch.vtensor<[1,256,56,56],f32>, !torch.vtensor<[512,256,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%791 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%792 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_190 = torch.constant.int 12
%793 = torch.aten.item %791 : !torch.vtensor<[],f32> -> !torch.float
%794 = torch.aten.item %792 : !torch.vtensor<[],si8> -> !torch.int
%795 = torch.aten.quantize_per_tensor %790, %793, %794, %int12_190 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%796 = torch.aten.int_repr %795 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%797 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%798 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%799 = torch.aten.item %797 : !torch.vtensor<[],f32> -> !torch.float
%800 = torch.aten.item %798 : !torch.vtensor<[],si8> -> !torch.int
%801 = torch.aten._make_per_tensor_quantized_tensor %796, %799, %800 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%802 = torch.aten.dequantize.self %801 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_191 = torch.constant.int 1
%803 = torch.aten.add.Tensor %761, %802, %int1_191 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%804 = torch.aten.relu %803 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%805 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%806 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_192 = torch.constant.int 12
%807 = torch.aten.item %805 : !torch.vtensor<[],f32> -> !torch.float
%808 = torch.aten.item %806 : !torch.vtensor<[],si8> -> !torch.int
%809 = torch.aten.quantize_per_tensor %804, %807, %808, %int12_192 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%810 = torch.aten.int_repr %809 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%811 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%812 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%813 = torch.aten.item %811 : !torch.vtensor<[],f32> -> !torch.float
%814 = torch.aten.item %812 : !torch.vtensor<[],si8> -> !torch.int
%815 = torch.aten._make_per_tensor_quantized_tensor %810, %813, %814 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%816 = torch.aten.dequantize.self %815 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%817 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%818 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_193 = torch.constant.int 12
%819 = torch.aten.item %817 : !torch.vtensor<[],f32> -> !torch.float
%820 = torch.aten.item %818 : !torch.vtensor<[],si8> -> !torch.int
%821 = torch.aten.quantize_per_tensor %30, %819, %820, %int12_193 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%822 = torch.aten.int_repr %821 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%823 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%824 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%825 = torch.aten.item %823 : !torch.vtensor<[],f32> -> !torch.float
%826 = torch.aten.item %824 : !torch.vtensor<[],si8> -> !torch.int
%827 = torch.aten._make_per_tensor_quantized_tensor %822, %825, %826 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%828 = torch.aten.dequantize.self %827 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%829 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%830 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_194 = torch.constant.int 12
%831 = torch.aten.item %829 : !torch.vtensor<[],f32> -> !torch.float
%832 = torch.aten.item %830 : !torch.vtensor<[],si8> -> !torch.int
%833 = torch.aten.quantize_per_tensor %31, %831, %832, %int12_194 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%834 = torch.aten.int_repr %833 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%835 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%836 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%837 = torch.aten.item %835 : !torch.vtensor<[],f32> -> !torch.float
%838 = torch.aten.item %836 : !torch.vtensor<[],si8> -> !torch.int
%839 = torch.aten._make_per_tensor_quantized_tensor %834, %837, %838 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%840 = torch.aten.dequantize.self %839 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_195 = torch.constant.int 0
%int0_196 = torch.constant.int 0
%int1_197 = torch.constant.int 1
%int1_198 = torch.constant.int 1
%int1_199 = torch.constant.int 1
%int1_200 = torch.constant.int 1
%int0_201 = torch.constant.int 0
%841 = torch.prim.ListConstruct %int0_195, %int0_196 : (!torch.int, !torch.int) -> !torch.list<int>
%842 = torch.prim.ListConstruct %int1_197, %int1_198 : (!torch.int, !torch.int) -> !torch.list<int>
%843 = torch.prim.ListConstruct %int1_199, %int1_200 : (!torch.int, !torch.int) -> !torch.list<int>
%844 = torch.prim.ListConstruct %int0_201, %int0_201 : (!torch.int, !torch.int) -> !torch.list<int>
%false_202 = torch.constant.bool false
%int1_203 = torch.constant.int 1
%845 = torch.aten.convolution %816, %828, %840, %843, %841, %842, %false_202, %844, %int1_203 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%846 = torch.aten.relu %845 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%847 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%848 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_204 = torch.constant.int 12
%849 = torch.aten.item %847 : !torch.vtensor<[],f32> -> !torch.float
%850 = torch.aten.item %848 : !torch.vtensor<[],si8> -> !torch.int
%851 = torch.aten.quantize_per_tensor %846, %849, %850, %int12_204 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%852 = torch.aten.int_repr %851 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%853 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%854 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%855 = torch.aten.item %853 : !torch.vtensor<[],f32> -> !torch.float
%856 = torch.aten.item %854 : !torch.vtensor<[],si8> -> !torch.int
%857 = torch.aten._make_per_tensor_quantized_tensor %852, %855, %856 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%858 = torch.aten.dequantize.self %857 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%859 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%860 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_205 = torch.constant.int 12
%861 = torch.aten.item %859 : !torch.vtensor<[],f32> -> !torch.float
%862 = torch.aten.item %860 : !torch.vtensor<[],si8> -> !torch.int
%863 = torch.aten.quantize_per_tensor %32, %861, %862, %int12_205 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%864 = torch.aten.int_repr %863 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%865 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%866 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%867 = torch.aten.item %865 : !torch.vtensor<[],f32> -> !torch.float
%868 = torch.aten.item %866 : !torch.vtensor<[],si8> -> !torch.int
%869 = torch.aten._make_per_tensor_quantized_tensor %864, %867, %868 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%870 = torch.aten.dequantize.self %869 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%871 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%872 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_206 = torch.constant.int 12
%873 = torch.aten.item %871 : !torch.vtensor<[],f32> -> !torch.float
%874 = torch.aten.item %872 : !torch.vtensor<[],si8> -> !torch.int
%875 = torch.aten.quantize_per_tensor %33, %873, %874, %int12_206 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%876 = torch.aten.int_repr %875 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%877 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%878 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%879 = torch.aten.item %877 : !torch.vtensor<[],f32> -> !torch.float
%880 = torch.aten.item %878 : !torch.vtensor<[],si8> -> !torch.int
%881 = torch.aten._make_per_tensor_quantized_tensor %876, %879, %880 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%882 = torch.aten.dequantize.self %881 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_207 = torch.constant.int 1
%int1_208 = torch.constant.int 1
%int1_209 = torch.constant.int 1
%int1_210 = torch.constant.int 1
%int1_211 = torch.constant.int 1
%int1_212 = torch.constant.int 1
%int0_213 = torch.constant.int 0
%883 = torch.prim.ListConstruct %int1_207, %int1_208 : (!torch.int, !torch.int) -> !torch.list<int>
%884 = torch.prim.ListConstruct %int1_209, %int1_210 : (!torch.int, !torch.int) -> !torch.list<int>
%885 = torch.prim.ListConstruct %int1_211, %int1_212 : (!torch.int, !torch.int) -> !torch.list<int>
%886 = torch.prim.ListConstruct %int0_213, %int0_213 : (!torch.int, !torch.int) -> !torch.list<int>
%false_214 = torch.constant.bool false
%int1_215 = torch.constant.int 1
%887 = torch.aten.convolution %858, %870, %882, %885, %883, %884, %false_214, %886, %int1_215 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%888 = torch.aten.relu %887 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%889 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%890 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_216 = torch.constant.int 12
%891 = torch.aten.item %889 : !torch.vtensor<[],f32> -> !torch.float
%892 = torch.aten.item %890 : !torch.vtensor<[],si8> -> !torch.int
%893 = torch.aten.quantize_per_tensor %888, %891, %892, %int12_216 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%894 = torch.aten.int_repr %893 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%895 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%896 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%897 = torch.aten.item %895 : !torch.vtensor<[],f32> -> !torch.float
%898 = torch.aten.item %896 : !torch.vtensor<[],si8> -> !torch.int
%899 = torch.aten._make_per_tensor_quantized_tensor %894, %897, %898 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%900 = torch.aten.dequantize.self %899 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%901 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%902 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_217 = torch.constant.int 12
%903 = torch.aten.item %901 : !torch.vtensor<[],f32> -> !torch.float
%904 = torch.aten.item %902 : !torch.vtensor<[],si8> -> !torch.int
%905 = torch.aten.quantize_per_tensor %34, %903, %904, %int12_217 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%906 = torch.aten.int_repr %905 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%907 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%908 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%909 = torch.aten.item %907 : !torch.vtensor<[],f32> -> !torch.float
%910 = torch.aten.item %908 : !torch.vtensor<[],si8> -> !torch.int
%911 = torch.aten._make_per_tensor_quantized_tensor %906, %909, %910 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%912 = torch.aten.dequantize.self %911 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%913 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%914 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_218 = torch.constant.int 12
%915 = torch.aten.item %913 : !torch.vtensor<[],f32> -> !torch.float
%916 = torch.aten.item %914 : !torch.vtensor<[],si8> -> !torch.int
%917 = torch.aten.quantize_per_tensor %35, %915, %916, %int12_218 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%918 = torch.aten.int_repr %917 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%919 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%920 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%921 = torch.aten.item %919 : !torch.vtensor<[],f32> -> !torch.float
%922 = torch.aten.item %920 : !torch.vtensor<[],si8> -> !torch.int
%923 = torch.aten._make_per_tensor_quantized_tensor %918, %921, %922 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%924 = torch.aten.dequantize.self %923 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_219 = torch.constant.int 0
%int0_220 = torch.constant.int 0
%int1_221 = torch.constant.int 1
%int1_222 = torch.constant.int 1
%int1_223 = torch.constant.int 1
%int1_224 = torch.constant.int 1
%int0_225 = torch.constant.int 0
%925 = torch.prim.ListConstruct %int0_219, %int0_220 : (!torch.int, !torch.int) -> !torch.list<int>
%926 = torch.prim.ListConstruct %int1_221, %int1_222 : (!torch.int, !torch.int) -> !torch.list<int>
%927 = torch.prim.ListConstruct %int1_223, %int1_224 : (!torch.int, !torch.int) -> !torch.list<int>
%928 = torch.prim.ListConstruct %int0_225, %int0_225 : (!torch.int, !torch.int) -> !torch.list<int>
%false_226 = torch.constant.bool false
%int1_227 = torch.constant.int 1
%929 = torch.aten.convolution %900, %912, %924, %927, %925, %926, %false_226, %928, %int1_227 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%930 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%931 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_228 = torch.constant.int 12
%932 = torch.aten.item %930 : !torch.vtensor<[],f32> -> !torch.float
%933 = torch.aten.item %931 : !torch.vtensor<[],si8> -> !torch.int
%934 = torch.aten.quantize_per_tensor %929, %932, %933, %int12_228 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%935 = torch.aten.int_repr %934 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%936 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%937 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%938 = torch.aten.item %936 : !torch.vtensor<[],f32> -> !torch.float
%939 = torch.aten.item %937 : !torch.vtensor<[],si8> -> !torch.int
%940 = torch.aten._make_per_tensor_quantized_tensor %935, %938, %939 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%941 = torch.aten.dequantize.self %940 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_229 = torch.constant.int 1
%942 = torch.aten.add.Tensor %941, %816, %int1_229 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%943 = torch.aten.relu %942 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%944 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_230 = torch.constant.int 12
%946 = torch.aten.item %944 : !torch.vtensor<[],f32> -> !torch.float
%947 = torch.aten.item %945 : !torch.vtensor<[],si8> -> !torch.int
%948 = torch.aten.quantize_per_tensor %943, %946, %947, %int12_230 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%949 = torch.aten.int_repr %948 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%950 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%952 = torch.aten.item %950 : !torch.vtensor<[],f32> -> !torch.float
%953 = torch.aten.item %951 : !torch.vtensor<[],si8> -> !torch.int
%954 = torch.aten._make_per_tensor_quantized_tensor %949, %952, %953 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%955 = torch.aten.dequantize.self %954 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%956 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_231 = torch.constant.int 12
%958 = torch.aten.item %956 : !torch.vtensor<[],f32> -> !torch.float
%959 = torch.aten.item %957 : !torch.vtensor<[],si8> -> !torch.int
%960 = torch.aten.quantize_per_tensor %36, %958, %959, %int12_231 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%961 = torch.aten.int_repr %960 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%962 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%963 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%964 = torch.aten.item %962 : !torch.vtensor<[],f32> -> !torch.float
%965 = torch.aten.item %963 : !torch.vtensor<[],si8> -> !torch.int
%966 = torch.aten._make_per_tensor_quantized_tensor %961, %964, %965 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%967 = torch.aten.dequantize.self %966 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%968 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%969 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_232 = torch.constant.int 12
%970 = torch.aten.item %968 : !torch.vtensor<[],f32> -> !torch.float
%971 = torch.aten.item %969 : !torch.vtensor<[],si8> -> !torch.int
%972 = torch.aten.quantize_per_tensor %37, %970, %971, %int12_232 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%973 = torch.aten.int_repr %972 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%974 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%975 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%976 = torch.aten.item %974 : !torch.vtensor<[],f32> -> !torch.float
%977 = torch.aten.item %975 : !torch.vtensor<[],si8> -> !torch.int
%978 = torch.aten._make_per_tensor_quantized_tensor %973, %976, %977 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%979 = torch.aten.dequantize.self %978 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_233 = torch.constant.int 0
%int0_234 = torch.constant.int 0
%int1_235 = torch.constant.int 1
%int1_236 = torch.constant.int 1
%int1_237 = torch.constant.int 1
%int1_238 = torch.constant.int 1
%int0_239 = torch.constant.int 0
%980 = torch.prim.ListConstruct %int0_233, %int0_234 : (!torch.int, !torch.int) -> !torch.list<int>
%981 = torch.prim.ListConstruct %int1_235, %int1_236 : (!torch.int, !torch.int) -> !torch.list<int>
%982 = torch.prim.ListConstruct %int1_237, %int1_238 : (!torch.int, !torch.int) -> !torch.list<int>
%983 = torch.prim.ListConstruct %int0_239, %int0_239 : (!torch.int, !torch.int) -> !torch.list<int>
%false_240 = torch.constant.bool false
%int1_241 = torch.constant.int 1
%984 = torch.aten.convolution %955, %967, %979, %982, %980, %981, %false_240, %983, %int1_241 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%985 = torch.aten.relu %984 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%986 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%987 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_242 = torch.constant.int 12
%988 = torch.aten.item %986 : !torch.vtensor<[],f32> -> !torch.float
%989 = torch.aten.item %987 : !torch.vtensor<[],si8> -> !torch.int
%990 = torch.aten.quantize_per_tensor %985, %988, %989, %int12_242 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%991 = torch.aten.int_repr %990 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%992 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%994 = torch.aten.item %992 : !torch.vtensor<[],f32> -> !torch.float
%995 = torch.aten.item %993 : !torch.vtensor<[],si8> -> !torch.int
%996 = torch.aten._make_per_tensor_quantized_tensor %991, %994, %995 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%997 = torch.aten.dequantize.self %996 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%998 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%999 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_243 = torch.constant.int 12
%1000 = torch.aten.item %998 : !torch.vtensor<[],f32> -> !torch.float
%1001 = torch.aten.item %999 : !torch.vtensor<[],si8> -> !torch.int
%1002 = torch.aten.quantize_per_tensor %38, %1000, %1001, %int12_243 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1003 = torch.aten.int_repr %1002 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%1004 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1005 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1006 = torch.aten.item %1004 : !torch.vtensor<[],f32> -> !torch.float
%1007 = torch.aten.item %1005 : !torch.vtensor<[],si8> -> !torch.int
%1008 = torch.aten._make_per_tensor_quantized_tensor %1003, %1006, %1007 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1009 = torch.aten.dequantize.self %1008 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%1010 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1011 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_244 = torch.constant.int 12
%1012 = torch.aten.item %1010 : !torch.vtensor<[],f32> -> !torch.float
%1013 = torch.aten.item %1011 : !torch.vtensor<[],si8> -> !torch.int
%1014 = torch.aten.quantize_per_tensor %39, %1012, %1013, %int12_244 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1015 = torch.aten.int_repr %1014 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1016 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1017 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1018 = torch.aten.item %1016 : !torch.vtensor<[],f32> -> !torch.float
%1019 = torch.aten.item %1017 : !torch.vtensor<[],si8> -> !torch.int
%1020 = torch.aten._make_per_tensor_quantized_tensor %1015, %1018, %1019 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1021 = torch.aten.dequantize.self %1020 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_245 = torch.constant.int 1
%int1_246 = torch.constant.int 1
%int1_247 = torch.constant.int 1
%int1_248 = torch.constant.int 1
%int1_249 = torch.constant.int 1
%int1_250 = torch.constant.int 1
%int0_251 = torch.constant.int 0
%1022 = torch.prim.ListConstruct %int1_245, %int1_246 : (!torch.int, !torch.int) -> !torch.list<int>
%1023 = torch.prim.ListConstruct %int1_247, %int1_248 : (!torch.int, !torch.int) -> !torch.list<int>
%1024 = torch.prim.ListConstruct %int1_249, %int1_250 : (!torch.int, !torch.int) -> !torch.list<int>
%1025 = torch.prim.ListConstruct %int0_251, %int0_251 : (!torch.int, !torch.int) -> !torch.list<int>
%false_252 = torch.constant.bool false
%int1_253 = torch.constant.int 1
%1026 = torch.aten.convolution %997, %1009, %1021, %1024, %1022, %1023, %false_252, %1025, %int1_253 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1027 = torch.aten.relu %1026 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1028 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1029 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_254 = torch.constant.int 12
%1030 = torch.aten.item %1028 : !torch.vtensor<[],f32> -> !torch.float
%1031 = torch.aten.item %1029 : !torch.vtensor<[],si8> -> !torch.int
%1032 = torch.aten.quantize_per_tensor %1027, %1030, %1031, %int12_254 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1033 = torch.aten.int_repr %1032 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1034 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1035 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1036 = torch.aten.item %1034 : !torch.vtensor<[],f32> -> !torch.float
%1037 = torch.aten.item %1035 : !torch.vtensor<[],si8> -> !torch.int
%1038 = torch.aten._make_per_tensor_quantized_tensor %1033, %1036, %1037 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1039 = torch.aten.dequantize.self %1038 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1040 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1041 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_255 = torch.constant.int 12
%1042 = torch.aten.item %1040 : !torch.vtensor<[],f32> -> !torch.float
%1043 = torch.aten.item %1041 : !torch.vtensor<[],si8> -> !torch.int
%1044 = torch.aten.quantize_per_tensor %40, %1042, %1043, %int12_255 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1045 = torch.aten.int_repr %1044 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%1046 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1047 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1048 = torch.aten.item %1046 : !torch.vtensor<[],f32> -> !torch.float
%1049 = torch.aten.item %1047 : !torch.vtensor<[],si8> -> !torch.int
%1050 = torch.aten._make_per_tensor_quantized_tensor %1045, %1048, %1049 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1051 = torch.aten.dequantize.self %1050 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%1052 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1053 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_256 = torch.constant.int 12
%1054 = torch.aten.item %1052 : !torch.vtensor<[],f32> -> !torch.float
%1055 = torch.aten.item %1053 : !torch.vtensor<[],si8> -> !torch.int
%1056 = torch.aten.quantize_per_tensor %41, %1054, %1055, %int12_256 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1057 = torch.aten.int_repr %1056 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%1058 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1059 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1060 = torch.aten.item %1058 : !torch.vtensor<[],f32> -> !torch.float
%1061 = torch.aten.item %1059 : !torch.vtensor<[],si8> -> !torch.int
%1062 = torch.aten._make_per_tensor_quantized_tensor %1057, %1060, %1061 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1063 = torch.aten.dequantize.self %1062 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_257 = torch.constant.int 0
%int0_258 = torch.constant.int 0
%int1_259 = torch.constant.int 1
%int1_260 = torch.constant.int 1
%int1_261 = torch.constant.int 1
%int1_262 = torch.constant.int 1
%int0_263 = torch.constant.int 0
%1064 = torch.prim.ListConstruct %int0_257, %int0_258 : (!torch.int, !torch.int) -> !torch.list<int>
%1065 = torch.prim.ListConstruct %int1_259, %int1_260 : (!torch.int, !torch.int) -> !torch.list<int>
%1066 = torch.prim.ListConstruct %int1_261, %int1_262 : (!torch.int, !torch.int) -> !torch.list<int>
%1067 = torch.prim.ListConstruct %int0_263, %int0_263 : (!torch.int, !torch.int) -> !torch.list<int>
%false_264 = torch.constant.bool false
%int1_265 = torch.constant.int 1
%1068 = torch.aten.convolution %1039, %1051, %1063, %1066, %1064, %1065, %false_264, %1067, %int1_265 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1069 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1070 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_266 = torch.constant.int 12
%1071 = torch.aten.item %1069 : !torch.vtensor<[],f32> -> !torch.float
%1072 = torch.aten.item %1070 : !torch.vtensor<[],si8> -> !torch.int
%1073 = torch.aten.quantize_per_tensor %1068, %1071, %1072, %int12_266 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1074 = torch.aten.int_repr %1073 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1075 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1076 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1077 = torch.aten.item %1075 : !torch.vtensor<[],f32> -> !torch.float
%1078 = torch.aten.item %1076 : !torch.vtensor<[],si8> -> !torch.int
%1079 = torch.aten._make_per_tensor_quantized_tensor %1074, %1077, %1078 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1080 = torch.aten.dequantize.self %1079 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_267 = torch.constant.int 1
%1081 = torch.aten.add.Tensor %1080, %955, %int1_267 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1082 = torch.aten.relu %1081 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%1083 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1084 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_268 = torch.constant.int 12
%1085 = torch.aten.item %1083 : !torch.vtensor<[],f32> -> !torch.float
%1086 = torch.aten.item %1084 : !torch.vtensor<[],si8> -> !torch.int
%1087 = torch.aten.quantize_per_tensor %1082, %1085, %1086, %int12_268 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1088 = torch.aten.int_repr %1087 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1089 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1090 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1091 = torch.aten.item %1089 : !torch.vtensor<[],f32> -> !torch.float
%1092 = torch.aten.item %1090 : !torch.vtensor<[],si8> -> !torch.int
%1093 = torch.aten._make_per_tensor_quantized_tensor %1088, %1091, %1092 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1094 = torch.aten.dequantize.self %1093 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%1095 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1096 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_269 = torch.constant.int 12
%1097 = torch.aten.item %1095 : !torch.vtensor<[],f32> -> !torch.float
%1098 = torch.aten.item %1096 : !torch.vtensor<[],si8> -> !torch.int
%1099 = torch.aten.quantize_per_tensor %42, %1097, %1098, %int12_269 : !torch.vtensor<[128,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%1100 = torch.aten.int_repr %1099 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],si8>
%1101 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1102 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1103 = torch.aten.item %1101 : !torch.vtensor<[],f32> -> !torch.float
%1104 = torch.aten.item %1102 : !torch.vtensor<[],si8> -> !torch.int
%1105 = torch.aten._make_per_tensor_quantized_tensor %1100, %1103, %1104 : !torch.vtensor<[128,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,512,1,1],!torch.qint8>
%1106 = torch.aten.dequantize.self %1105 : !torch.vtensor<[128,512,1,1],!torch.qint8> -> !torch.vtensor<[128,512,1,1],f32>
%1107 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1108 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_270 = torch.constant.int 12
%1109 = torch.aten.item %1107 : !torch.vtensor<[],f32> -> !torch.float
%1110 = torch.aten.item %1108 : !torch.vtensor<[],si8> -> !torch.int
%1111 = torch.aten.quantize_per_tensor %43, %1109, %1110, %int12_270 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1112 = torch.aten.int_repr %1111 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1113 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1114 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1115 = torch.aten.item %1113 : !torch.vtensor<[],f32> -> !torch.float
%1116 = torch.aten.item %1114 : !torch.vtensor<[],si8> -> !torch.int
%1117 = torch.aten._make_per_tensor_quantized_tensor %1112, %1115, %1116 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1118 = torch.aten.dequantize.self %1117 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_271 = torch.constant.int 0
%int0_272 = torch.constant.int 0
%int1_273 = torch.constant.int 1
%int1_274 = torch.constant.int 1
%int1_275 = torch.constant.int 1
%int1_276 = torch.constant.int 1
%int0_277 = torch.constant.int 0
%1119 = torch.prim.ListConstruct %int0_271, %int0_272 : (!torch.int, !torch.int) -> !torch.list<int>
%1120 = torch.prim.ListConstruct %int1_273, %int1_274 : (!torch.int, !torch.int) -> !torch.list<int>
%1121 = torch.prim.ListConstruct %int1_275, %int1_276 : (!torch.int, !torch.int) -> !torch.list<int>
%1122 = torch.prim.ListConstruct %int0_277, %int0_277 : (!torch.int, !torch.int) -> !torch.list<int>
%false_278 = torch.constant.bool false
%int1_279 = torch.constant.int 1
%1123 = torch.aten.convolution %1094, %1106, %1118, %1121, %1119, %1120, %false_278, %1122, %int1_279 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[128,512,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1124 = torch.aten.relu %1123 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1125 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_280 = torch.constant.int 12
%1127 = torch.aten.item %1125 : !torch.vtensor<[],f32> -> !torch.float
%1128 = torch.aten.item %1126 : !torch.vtensor<[],si8> -> !torch.int
%1129 = torch.aten.quantize_per_tensor %1124, %1127, %1128, %int12_280 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1130 = torch.aten.int_repr %1129 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1131 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1132 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1133 = torch.aten.item %1131 : !torch.vtensor<[],f32> -> !torch.float
%1134 = torch.aten.item %1132 : !torch.vtensor<[],si8> -> !torch.int
%1135 = torch.aten._make_per_tensor_quantized_tensor %1130, %1133, %1134 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1136 = torch.aten.dequantize.self %1135 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1137 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1138 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_281 = torch.constant.int 12
%1139 = torch.aten.item %1137 : !torch.vtensor<[],f32> -> !torch.float
%1140 = torch.aten.item %1138 : !torch.vtensor<[],si8> -> !torch.int
%1141 = torch.aten.quantize_per_tensor %44, %1139, %1140, %int12_281 : !torch.vtensor<[128,128,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1142 = torch.aten.int_repr %1141 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],si8>
%1143 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1144 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1145 = torch.aten.item %1143 : !torch.vtensor<[],f32> -> !torch.float
%1146 = torch.aten.item %1144 : !torch.vtensor<[],si8> -> !torch.int
%1147 = torch.aten._make_per_tensor_quantized_tensor %1142, %1145, %1146 : !torch.vtensor<[128,128,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,128,3,3],!torch.qint8>
%1148 = torch.aten.dequantize.self %1147 : !torch.vtensor<[128,128,3,3],!torch.qint8> -> !torch.vtensor<[128,128,3,3],f32>
%1149 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1150 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_282 = torch.constant.int 12
%1151 = torch.aten.item %1149 : !torch.vtensor<[],f32> -> !torch.float
%1152 = torch.aten.item %1150 : !torch.vtensor<[],si8> -> !torch.int
%1153 = torch.aten.quantize_per_tensor %45, %1151, %1152, %int12_282 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1154 = torch.aten.int_repr %1153 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%1155 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1156 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1157 = torch.aten.item %1155 : !torch.vtensor<[],f32> -> !torch.float
%1158 = torch.aten.item %1156 : !torch.vtensor<[],si8> -> !torch.int
%1159 = torch.aten._make_per_tensor_quantized_tensor %1154, %1157, %1158 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%1160 = torch.aten.dequantize.self %1159 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int1_283 = torch.constant.int 1
%int1_284 = torch.constant.int 1
%int1_285 = torch.constant.int 1
%int1_286 = torch.constant.int 1
%int1_287 = torch.constant.int 1
%int1_288 = torch.constant.int 1
%int0_289 = torch.constant.int 0
%1161 = torch.prim.ListConstruct %int1_283, %int1_284 : (!torch.int, !torch.int) -> !torch.list<int>
%1162 = torch.prim.ListConstruct %int1_285, %int1_286 : (!torch.int, !torch.int) -> !torch.list<int>
%1163 = torch.prim.ListConstruct %int1_287, %int1_288 : (!torch.int, !torch.int) -> !torch.list<int>
%1164 = torch.prim.ListConstruct %int0_289, %int0_289 : (!torch.int, !torch.int) -> !torch.list<int>
%false_290 = torch.constant.bool false
%int1_291 = torch.constant.int 1
%1165 = torch.aten.convolution %1136, %1148, %1160, %1163, %1161, %1162, %false_290, %1164, %int1_291 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[128,128,3,3],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,28,28],f32>
%1166 = torch.aten.relu %1165 : !torch.vtensor<[1,128,28,28],f32> -> !torch.vtensor<[1,128,28,28],f32>
%1167 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1168 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_292 = torch.constant.int 12
%1169 = torch.aten.item %1167 : !torch.vtensor<[],f32> -> !torch.float
%1170 = torch.aten.item %1168 : !torch.vtensor<[],si8> -> !torch.int
%1171 = torch.aten.quantize_per_tensor %1166, %1169, %1170, %int12_292 : !torch.vtensor<[1,128,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1172 = torch.aten.int_repr %1171 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],si8>
%1173 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1175 = torch.aten.item %1173 : !torch.vtensor<[],f32> -> !torch.float
%1176 = torch.aten.item %1174 : !torch.vtensor<[],si8> -> !torch.int
%1177 = torch.aten._make_per_tensor_quantized_tensor %1172, %1175, %1176 : !torch.vtensor<[1,128,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,28,28],!torch.qint8>
%1178 = torch.aten.dequantize.self %1177 : !torch.vtensor<[1,128,28,28],!torch.qint8> -> !torch.vtensor<[1,128,28,28],f32>
%1179 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_293 = torch.constant.int 12
%1181 = torch.aten.item %1179 : !torch.vtensor<[],f32> -> !torch.float
%1182 = torch.aten.item %1180 : !torch.vtensor<[],si8> -> !torch.int
%1183 = torch.aten.quantize_per_tensor %46, %1181, %1182, %int12_293 : !torch.vtensor<[512,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1184 = torch.aten.int_repr %1183 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],si8>
%1185 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1187 = torch.aten.item %1185 : !torch.vtensor<[],f32> -> !torch.float
%1188 = torch.aten.item %1186 : !torch.vtensor<[],si8> -> !torch.int
%1189 = torch.aten._make_per_tensor_quantized_tensor %1184, %1187, %1188 : !torch.vtensor<[512,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,128,1,1],!torch.qint8>
%1190 = torch.aten.dequantize.self %1189 : !torch.vtensor<[512,128,1,1],!torch.qint8> -> !torch.vtensor<[512,128,1,1],f32>
%1191 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1192 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_294 = torch.constant.int 12
%1193 = torch.aten.item %1191 : !torch.vtensor<[],f32> -> !torch.float
%1194 = torch.aten.item %1192 : !torch.vtensor<[],si8> -> !torch.int
%1195 = torch.aten.quantize_per_tensor %47, %1193, %1194, %int12_294 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1196 = torch.aten.int_repr %1195 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%1197 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1198 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1199 = torch.aten.item %1197 : !torch.vtensor<[],f32> -> !torch.float
%1200 = torch.aten.item %1198 : !torch.vtensor<[],si8> -> !torch.int
%1201 = torch.aten._make_per_tensor_quantized_tensor %1196, %1199, %1200 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%1202 = torch.aten.dequantize.self %1201 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_295 = torch.constant.int 0
%int0_296 = torch.constant.int 0
%int1_297 = torch.constant.int 1
%int1_298 = torch.constant.int 1
%int1_299 = torch.constant.int 1
%int1_300 = torch.constant.int 1
%int0_301 = torch.constant.int 0
%1203 = torch.prim.ListConstruct %int0_295, %int0_296 : (!torch.int, !torch.int) -> !torch.list<int>
%1204 = torch.prim.ListConstruct %int1_297, %int1_298 : (!torch.int, !torch.int) -> !torch.list<int>
%1205 = torch.prim.ListConstruct %int1_299, %int1_300 : (!torch.int, !torch.int) -> !torch.list<int>
%1206 = torch.prim.ListConstruct %int0_301, %int0_301 : (!torch.int, !torch.int) -> !torch.list<int>
%false_302 = torch.constant.bool false
%int1_303 = torch.constant.int 1
%1207 = torch.aten.convolution %1178, %1190, %1202, %1205, %1203, %1204, %false_302, %1206, %int1_303 : !torch.vtensor<[1,128,28,28],f32>, !torch.vtensor<[512,128,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1208 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1209 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_304 = torch.constant.int 12
%1210 = torch.aten.item %1208 : !torch.vtensor<[],f32> -> !torch.float
%1211 = torch.aten.item %1209 : !torch.vtensor<[],si8> -> !torch.int
%1212 = torch.aten.quantize_per_tensor %1207, %1210, %1211, %int12_304 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1213 = torch.aten.int_repr %1212 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1214 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1215 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1216 = torch.aten.item %1214 : !torch.vtensor<[],f32> -> !torch.float
%1217 = torch.aten.item %1215 : !torch.vtensor<[],si8> -> !torch.int
%1218 = torch.aten._make_per_tensor_quantized_tensor %1213, %1216, %1217 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1219 = torch.aten.dequantize.self %1218 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%int1_305 = torch.constant.int 1
%1220 = torch.aten.add.Tensor %1219, %1094, %int1_305 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1,512,28,28],f32>, !torch.int -> !torch.vtensor<[1,512,28,28],f32>
%1221 = torch.aten.relu %1220 : !torch.vtensor<[1,512,28,28],f32> -> !torch.vtensor<[1,512,28,28],f32>
%1222 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1223 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_306 = torch.constant.int 12
%1224 = torch.aten.item %1222 : !torch.vtensor<[],f32> -> !torch.float
%1225 = torch.aten.item %1223 : !torch.vtensor<[],si8> -> !torch.int
%1226 = torch.aten.quantize_per_tensor %1221, %1224, %1225, %int12_306 : !torch.vtensor<[1,512,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1227 = torch.aten.int_repr %1226 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],si8>
%1228 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1230 = torch.aten.item %1228 : !torch.vtensor<[],f32> -> !torch.float
%1231 = torch.aten.item %1229 : !torch.vtensor<[],si8> -> !torch.int
%1232 = torch.aten._make_per_tensor_quantized_tensor %1227, %1230, %1231 : !torch.vtensor<[1,512,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,28,28],!torch.qint8>
%1233 = torch.aten.dequantize.self %1232 : !torch.vtensor<[1,512,28,28],!torch.qint8> -> !torch.vtensor<[1,512,28,28],f32>
%1234 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1235 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_307 = torch.constant.int 12
%1236 = torch.aten.item %1234 : !torch.vtensor<[],f32> -> !torch.float
%1237 = torch.aten.item %1235 : !torch.vtensor<[],si8> -> !torch.int
%1238 = torch.aten.quantize_per_tensor %48, %1236, %1237, %int12_307 : !torch.vtensor<[256,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,1,1],!torch.qint8>
%1239 = torch.aten.int_repr %1238 : !torch.vtensor<[256,512,1,1],!torch.qint8> -> !torch.vtensor<[256,512,1,1],si8>
%1240 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1241 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1242 = torch.aten.item %1240 : !torch.vtensor<[],f32> -> !torch.float
%1243 = torch.aten.item %1241 : !torch.vtensor<[],si8> -> !torch.int
%1244 = torch.aten._make_per_tensor_quantized_tensor %1239, %1242, %1243 : !torch.vtensor<[256,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,1,1],!torch.qint8>
%1245 = torch.aten.dequantize.self %1244 : !torch.vtensor<[256,512,1,1],!torch.qint8> -> !torch.vtensor<[256,512,1,1],f32>
%1246 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1247 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_308 = torch.constant.int 12
%1248 = torch.aten.item %1246 : !torch.vtensor<[],f32> -> !torch.float
%1249 = torch.aten.item %1247 : !torch.vtensor<[],si8> -> !torch.int
%1250 = torch.aten.quantize_per_tensor %49, %1248, %1249, %int12_308 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1251 = torch.aten.int_repr %1250 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1252 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1253 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1254 = torch.aten.item %1252 : !torch.vtensor<[],f32> -> !torch.float
%1255 = torch.aten.item %1253 : !torch.vtensor<[],si8> -> !torch.int
%1256 = torch.aten._make_per_tensor_quantized_tensor %1251, %1254, %1255 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1257 = torch.aten.dequantize.self %1256 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_309 = torch.constant.int 0
%int0_310 = torch.constant.int 0
%int1_311 = torch.constant.int 1
%int1_312 = torch.constant.int 1
%int1_313 = torch.constant.int 1
%int1_314 = torch.constant.int 1
%int0_315 = torch.constant.int 0
%1258 = torch.prim.ListConstruct %int0_309, %int0_310 : (!torch.int, !torch.int) -> !torch.list<int>
%1259 = torch.prim.ListConstruct %int1_311, %int1_312 : (!torch.int, !torch.int) -> !torch.list<int>
%1260 = torch.prim.ListConstruct %int1_313, %int1_314 : (!torch.int, !torch.int) -> !torch.list<int>
%1261 = torch.prim.ListConstruct %int0_315, %int0_315 : (!torch.int, !torch.int) -> !torch.list<int>
%false_316 = torch.constant.bool false
%int1_317 = torch.constant.int 1
%1262 = torch.aten.convolution %1233, %1245, %1257, %1260, %1258, %1259, %false_316, %1261, %int1_317 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[256,512,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,28,28],f32>
%1263 = torch.aten.relu %1262 : !torch.vtensor<[1,256,28,28],f32> -> !torch.vtensor<[1,256,28,28],f32>
%1264 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1265 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_318 = torch.constant.int 12
%1266 = torch.aten.item %1264 : !torch.vtensor<[],f32> -> !torch.float
%1267 = torch.aten.item %1265 : !torch.vtensor<[],si8> -> !torch.int
%1268 = torch.aten.quantize_per_tensor %1263, %1266, %1267, %int12_318 : !torch.vtensor<[1,256,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1269 = torch.aten.int_repr %1268 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],si8>
%1270 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1271 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1272 = torch.aten.item %1270 : !torch.vtensor<[],f32> -> !torch.float
%1273 = torch.aten.item %1271 : !torch.vtensor<[],si8> -> !torch.int
%1274 = torch.aten._make_per_tensor_quantized_tensor %1269, %1272, %1273 : !torch.vtensor<[1,256,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,28,28],!torch.qint8>
%1275 = torch.aten.dequantize.self %1274 : !torch.vtensor<[1,256,28,28],!torch.qint8> -> !torch.vtensor<[1,256,28,28],f32>
%1276 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1277 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_319 = torch.constant.int 12
%1278 = torch.aten.item %1276 : !torch.vtensor<[],f32> -> !torch.float
%1279 = torch.aten.item %1277 : !torch.vtensor<[],si8> -> !torch.int
%1280 = torch.aten.quantize_per_tensor %50, %1278, %1279, %int12_319 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1281 = torch.aten.int_repr %1280 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1282 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1283 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1284 = torch.aten.item %1282 : !torch.vtensor<[],f32> -> !torch.float
%1285 = torch.aten.item %1283 : !torch.vtensor<[],si8> -> !torch.int
%1286 = torch.aten._make_per_tensor_quantized_tensor %1281, %1284, %1285 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1287 = torch.aten.dequantize.self %1286 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1288 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1289 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_320 = torch.constant.int 12
%1290 = torch.aten.item %1288 : !torch.vtensor<[],f32> -> !torch.float
%1291 = torch.aten.item %1289 : !torch.vtensor<[],si8> -> !torch.int
%1292 = torch.aten.quantize_per_tensor %51, %1290, %1291, %int12_320 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1293 = torch.aten.int_repr %1292 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1294 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1295 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1296 = torch.aten.item %1294 : !torch.vtensor<[],f32> -> !torch.float
%1297 = torch.aten.item %1295 : !torch.vtensor<[],si8> -> !torch.int
%1298 = torch.aten._make_per_tensor_quantized_tensor %1293, %1296, %1297 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1299 = torch.aten.dequantize.self %1298 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_321 = torch.constant.int 1
%int1_322 = torch.constant.int 1
%int1_323 = torch.constant.int 1
%int1_324 = torch.constant.int 1
%int2_325 = torch.constant.int 2
%int2_326 = torch.constant.int 2
%int0_327 = torch.constant.int 0
%1300 = torch.prim.ListConstruct %int1_321, %int1_322 : (!torch.int, !torch.int) -> !torch.list<int>
%1301 = torch.prim.ListConstruct %int1_323, %int1_324 : (!torch.int, !torch.int) -> !torch.list<int>
%1302 = torch.prim.ListConstruct %int2_325, %int2_326 : (!torch.int, !torch.int) -> !torch.list<int>
%1303 = torch.prim.ListConstruct %int0_327, %int0_327 : (!torch.int, !torch.int) -> !torch.list<int>
%false_328 = torch.constant.bool false
%int1_329 = torch.constant.int 1
%1304 = torch.aten.convolution %1275, %1287, %1299, %1302, %1300, %1301, %false_328, %1303, %int1_329 : !torch.vtensor<[1,256,28,28],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1305 = torch.aten.relu %1304 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1306 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1307 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_330 = torch.constant.int 12
%1308 = torch.aten.item %1306 : !torch.vtensor<[],f32> -> !torch.float
%1309 = torch.aten.item %1307 : !torch.vtensor<[],si8> -> !torch.int
%1310 = torch.aten.quantize_per_tensor %1305, %1308, %1309, %int12_330 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1311 = torch.aten.int_repr %1310 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1312 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1313 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1314 = torch.aten.item %1312 : !torch.vtensor<[],f32> -> !torch.float
%1315 = torch.aten.item %1313 : !torch.vtensor<[],si8> -> !torch.int
%1316 = torch.aten._make_per_tensor_quantized_tensor %1311, %1314, %1315 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1317 = torch.aten.dequantize.self %1316 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1318 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1319 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_331 = torch.constant.int 12
%1320 = torch.aten.item %1318 : !torch.vtensor<[],f32> -> !torch.float
%1321 = torch.aten.item %1319 : !torch.vtensor<[],si8> -> !torch.int
%1322 = torch.aten.quantize_per_tensor %52, %1320, %1321, %int12_331 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1323 = torch.aten.int_repr %1322 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1324 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1325 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1326 = torch.aten.item %1324 : !torch.vtensor<[],f32> -> !torch.float
%1327 = torch.aten.item %1325 : !torch.vtensor<[],si8> -> !torch.int
%1328 = torch.aten._make_per_tensor_quantized_tensor %1323, %1326, %1327 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1329 = torch.aten.dequantize.self %1328 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1330 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1331 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_332 = torch.constant.int 12
%1332 = torch.aten.item %1330 : !torch.vtensor<[],f32> -> !torch.float
%1333 = torch.aten.item %1331 : !torch.vtensor<[],si8> -> !torch.int
%1334 = torch.aten.quantize_per_tensor %53, %1332, %1333, %int12_332 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1335 = torch.aten.int_repr %1334 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1336 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1337 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1338 = torch.aten.item %1336 : !torch.vtensor<[],f32> -> !torch.float
%1339 = torch.aten.item %1337 : !torch.vtensor<[],si8> -> !torch.int
%1340 = torch.aten._make_per_tensor_quantized_tensor %1335, %1338, %1339 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1341 = torch.aten.dequantize.self %1340 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_333 = torch.constant.int 0
%int0_334 = torch.constant.int 0
%int1_335 = torch.constant.int 1
%int1_336 = torch.constant.int 1
%int1_337 = torch.constant.int 1
%int1_338 = torch.constant.int 1
%int0_339 = torch.constant.int 0
%1342 = torch.prim.ListConstruct %int0_333, %int0_334 : (!torch.int, !torch.int) -> !torch.list<int>
%1343 = torch.prim.ListConstruct %int1_335, %int1_336 : (!torch.int, !torch.int) -> !torch.list<int>
%1344 = torch.prim.ListConstruct %int1_337, %int1_338 : (!torch.int, !torch.int) -> !torch.list<int>
%1345 = torch.prim.ListConstruct %int0_339, %int0_339 : (!torch.int, !torch.int) -> !torch.list<int>
%false_340 = torch.constant.bool false
%int1_341 = torch.constant.int 1
%1346 = torch.aten.convolution %1317, %1329, %1341, %1344, %1342, %1343, %false_340, %1345, %int1_341 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1347 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1348 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_342 = torch.constant.int 12
%1349 = torch.aten.item %1347 : !torch.vtensor<[],f32> -> !torch.float
%1350 = torch.aten.item %1348 : !torch.vtensor<[],si8> -> !torch.int
%1351 = torch.aten.quantize_per_tensor %1346, %1349, %1350, %int12_342 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1352 = torch.aten.int_repr %1351 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1353 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1354 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1355 = torch.aten.item %1353 : !torch.vtensor<[],f32> -> !torch.float
%1356 = torch.aten.item %1354 : !torch.vtensor<[],si8> -> !torch.int
%1357 = torch.aten._make_per_tensor_quantized_tensor %1352, %1355, %1356 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1358 = torch.aten.dequantize.self %1357 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1359 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1360 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_343 = torch.constant.int 12
%1361 = torch.aten.item %1359 : !torch.vtensor<[],f32> -> !torch.float
%1362 = torch.aten.item %1360 : !torch.vtensor<[],si8> -> !torch.int
%1363 = torch.aten.quantize_per_tensor %54, %1361, %1362, %int12_343 : !torch.vtensor<[1024,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,512,1,1],!torch.qint8>
%1364 = torch.aten.int_repr %1363 : !torch.vtensor<[1024,512,1,1],!torch.qint8> -> !torch.vtensor<[1024,512,1,1],si8>
%1365 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1366 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1367 = torch.aten.item %1365 : !torch.vtensor<[],f32> -> !torch.float
%1368 = torch.aten.item %1366 : !torch.vtensor<[],si8> -> !torch.int
%1369 = torch.aten._make_per_tensor_quantized_tensor %1364, %1367, %1368 : !torch.vtensor<[1024,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,512,1,1],!torch.qint8>
%1370 = torch.aten.dequantize.self %1369 : !torch.vtensor<[1024,512,1,1],!torch.qint8> -> !torch.vtensor<[1024,512,1,1],f32>
%1371 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1372 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_344 = torch.constant.int 12
%1373 = torch.aten.item %1371 : !torch.vtensor<[],f32> -> !torch.float
%1374 = torch.aten.item %1372 : !torch.vtensor<[],si8> -> !torch.int
%1375 = torch.aten.quantize_per_tensor %55, %1373, %1374, %int12_344 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1376 = torch.aten.int_repr %1375 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1377 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1378 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1379 = torch.aten.item %1377 : !torch.vtensor<[],f32> -> !torch.float
%1380 = torch.aten.item %1378 : !torch.vtensor<[],si8> -> !torch.int
%1381 = torch.aten._make_per_tensor_quantized_tensor %1376, %1379, %1380 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1382 = torch.aten.dequantize.self %1381 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_345 = torch.constant.int 0
%int0_346 = torch.constant.int 0
%int1_347 = torch.constant.int 1
%int1_348 = torch.constant.int 1
%int2_349 = torch.constant.int 2
%int2_350 = torch.constant.int 2
%int0_351 = torch.constant.int 0
%1383 = torch.prim.ListConstruct %int0_345, %int0_346 : (!torch.int, !torch.int) -> !torch.list<int>
%1384 = torch.prim.ListConstruct %int1_347, %int1_348 : (!torch.int, !torch.int) -> !torch.list<int>
%1385 = torch.prim.ListConstruct %int2_349, %int2_350 : (!torch.int, !torch.int) -> !torch.list<int>
%1386 = torch.prim.ListConstruct %int0_351, %int0_351 : (!torch.int, !torch.int) -> !torch.list<int>
%false_352 = torch.constant.bool false
%int1_353 = torch.constant.int 1
%1387 = torch.aten.convolution %1233, %1370, %1382, %1385, %1383, %1384, %false_352, %1386, %int1_353 : !torch.vtensor<[1,512,28,28],f32>, !torch.vtensor<[1024,512,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1388 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1389 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_354 = torch.constant.int 12
%1390 = torch.aten.item %1388 : !torch.vtensor<[],f32> -> !torch.float
%1391 = torch.aten.item %1389 : !torch.vtensor<[],si8> -> !torch.int
%1392 = torch.aten.quantize_per_tensor %1387, %1390, %1391, %int12_354 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1393 = torch.aten.int_repr %1392 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1394 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1395 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1396 = torch.aten.item %1394 : !torch.vtensor<[],f32> -> !torch.float
%1397 = torch.aten.item %1395 : !torch.vtensor<[],si8> -> !torch.int
%1398 = torch.aten._make_per_tensor_quantized_tensor %1393, %1396, %1397 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1399 = torch.aten.dequantize.self %1398 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_355 = torch.constant.int 1
%1400 = torch.aten.add.Tensor %1358, %1399, %int1_355 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1401 = torch.aten.relu %1400 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%1402 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_356 = torch.constant.int 12
%1404 = torch.aten.item %1402 : !torch.vtensor<[],f32> -> !torch.float
%1405 = torch.aten.item %1403 : !torch.vtensor<[],si8> -> !torch.int
%1406 = torch.aten.quantize_per_tensor %1401, %1404, %1405, %int12_356 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1407 = torch.aten.int_repr %1406 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1408 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1410 = torch.aten.item %1408 : !torch.vtensor<[],f32> -> !torch.float
%1411 = torch.aten.item %1409 : !torch.vtensor<[],si8> -> !torch.int
%1412 = torch.aten._make_per_tensor_quantized_tensor %1407, %1410, %1411 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1413 = torch.aten.dequantize.self %1412 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1414 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1415 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_357 = torch.constant.int 12
%1416 = torch.aten.item %1414 : !torch.vtensor<[],f32> -> !torch.float
%1417 = torch.aten.item %1415 : !torch.vtensor<[],si8> -> !torch.int
%1418 = torch.aten.quantize_per_tensor %56, %1416, %1417, %int12_357 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1419 = torch.aten.int_repr %1418 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1420 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1421 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1422 = torch.aten.item %1420 : !torch.vtensor<[],f32> -> !torch.float
%1423 = torch.aten.item %1421 : !torch.vtensor<[],si8> -> !torch.int
%1424 = torch.aten._make_per_tensor_quantized_tensor %1419, %1422, %1423 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1425 = torch.aten.dequantize.self %1424 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1426 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1427 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_358 = torch.constant.int 12
%1428 = torch.aten.item %1426 : !torch.vtensor<[],f32> -> !torch.float
%1429 = torch.aten.item %1427 : !torch.vtensor<[],si8> -> !torch.int
%1430 = torch.aten.quantize_per_tensor %57, %1428, %1429, %int12_358 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1431 = torch.aten.int_repr %1430 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1432 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1433 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1434 = torch.aten.item %1432 : !torch.vtensor<[],f32> -> !torch.float
%1435 = torch.aten.item %1433 : !torch.vtensor<[],si8> -> !torch.int
%1436 = torch.aten._make_per_tensor_quantized_tensor %1431, %1434, %1435 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1437 = torch.aten.dequantize.self %1436 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_359 = torch.constant.int 0
%int0_360 = torch.constant.int 0
%int1_361 = torch.constant.int 1
%int1_362 = torch.constant.int 1
%int1_363 = torch.constant.int 1
%int1_364 = torch.constant.int 1
%int0_365 = torch.constant.int 0
%1438 = torch.prim.ListConstruct %int0_359, %int0_360 : (!torch.int, !torch.int) -> !torch.list<int>
%1439 = torch.prim.ListConstruct %int1_361, %int1_362 : (!torch.int, !torch.int) -> !torch.list<int>
%1440 = torch.prim.ListConstruct %int1_363, %int1_364 : (!torch.int, !torch.int) -> !torch.list<int>
%1441 = torch.prim.ListConstruct %int0_365, %int0_365 : (!torch.int, !torch.int) -> !torch.list<int>
%false_366 = torch.constant.bool false
%int1_367 = torch.constant.int 1
%1442 = torch.aten.convolution %1413, %1425, %1437, %1440, %1438, %1439, %false_366, %1441, %int1_367 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1443 = torch.aten.relu %1442 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1444 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1445 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_368 = torch.constant.int 12
%1446 = torch.aten.item %1444 : !torch.vtensor<[],f32> -> !torch.float
%1447 = torch.aten.item %1445 : !torch.vtensor<[],si8> -> !torch.int
%1448 = torch.aten.quantize_per_tensor %1443, %1446, %1447, %int12_368 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1449 = torch.aten.int_repr %1448 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1450 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1451 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1452 = torch.aten.item %1450 : !torch.vtensor<[],f32> -> !torch.float
%1453 = torch.aten.item %1451 : !torch.vtensor<[],si8> -> !torch.int
%1454 = torch.aten._make_per_tensor_quantized_tensor %1449, %1452, %1453 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1455 = torch.aten.dequantize.self %1454 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1456 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1457 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_369 = torch.constant.int 12
%1458 = torch.aten.item %1456 : !torch.vtensor<[],f32> -> !torch.float
%1459 = torch.aten.item %1457 : !torch.vtensor<[],si8> -> !torch.int
%1460 = torch.aten.quantize_per_tensor %58, %1458, %1459, %int12_369 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1461 = torch.aten.int_repr %1460 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1462 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1463 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1464 = torch.aten.item %1462 : !torch.vtensor<[],f32> -> !torch.float
%1465 = torch.aten.item %1463 : !torch.vtensor<[],si8> -> !torch.int
%1466 = torch.aten._make_per_tensor_quantized_tensor %1461, %1464, %1465 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1467 = torch.aten.dequantize.self %1466 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1468 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1469 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_370 = torch.constant.int 12
%1470 = torch.aten.item %1468 : !torch.vtensor<[],f32> -> !torch.float
%1471 = torch.aten.item %1469 : !torch.vtensor<[],si8> -> !torch.int
%1472 = torch.aten.quantize_per_tensor %59, %1470, %1471, %int12_370 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1473 = torch.aten.int_repr %1472 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1474 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1475 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1476 = torch.aten.item %1474 : !torch.vtensor<[],f32> -> !torch.float
%1477 = torch.aten.item %1475 : !torch.vtensor<[],si8> -> !torch.int
%1478 = torch.aten._make_per_tensor_quantized_tensor %1473, %1476, %1477 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1479 = torch.aten.dequantize.self %1478 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_371 = torch.constant.int 1
%int1_372 = torch.constant.int 1
%int1_373 = torch.constant.int 1
%int1_374 = torch.constant.int 1
%int1_375 = torch.constant.int 1
%int1_376 = torch.constant.int 1
%int0_377 = torch.constant.int 0
%1480 = torch.prim.ListConstruct %int1_371, %int1_372 : (!torch.int, !torch.int) -> !torch.list<int>
%1481 = torch.prim.ListConstruct %int1_373, %int1_374 : (!torch.int, !torch.int) -> !torch.list<int>
%1482 = torch.prim.ListConstruct %int1_375, %int1_376 : (!torch.int, !torch.int) -> !torch.list<int>
%1483 = torch.prim.ListConstruct %int0_377, %int0_377 : (!torch.int, !torch.int) -> !torch.list<int>
%false_378 = torch.constant.bool false
%int1_379 = torch.constant.int 1
%1484 = torch.aten.convolution %1455, %1467, %1479, %1482, %1480, %1481, %false_378, %1483, %int1_379 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1485 = torch.aten.relu %1484 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1486 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1487 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_380 = torch.constant.int 12
%1488 = torch.aten.item %1486 : !torch.vtensor<[],f32> -> !torch.float
%1489 = torch.aten.item %1487 : !torch.vtensor<[],si8> -> !torch.int
%1490 = torch.aten.quantize_per_tensor %1485, %1488, %1489, %int12_380 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1491 = torch.aten.int_repr %1490 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1492 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1494 = torch.aten.item %1492 : !torch.vtensor<[],f32> -> !torch.float
%1495 = torch.aten.item %1493 : !torch.vtensor<[],si8> -> !torch.int
%1496 = torch.aten._make_per_tensor_quantized_tensor %1491, %1494, %1495 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1497 = torch.aten.dequantize.self %1496 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1498 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_381 = torch.constant.int 12
%1500 = torch.aten.item %1498 : !torch.vtensor<[],f32> -> !torch.float
%1501 = torch.aten.item %1499 : !torch.vtensor<[],si8> -> !torch.int
%1502 = torch.aten.quantize_per_tensor %60, %1500, %1501, %int12_381 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1503 = torch.aten.int_repr %1502 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1504 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1505 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1506 = torch.aten.item %1504 : !torch.vtensor<[],f32> -> !torch.float
%1507 = torch.aten.item %1505 : !torch.vtensor<[],si8> -> !torch.int
%1508 = torch.aten._make_per_tensor_quantized_tensor %1503, %1506, %1507 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1509 = torch.aten.dequantize.self %1508 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1510 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1511 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_382 = torch.constant.int 12
%1512 = torch.aten.item %1510 : !torch.vtensor<[],f32> -> !torch.float
%1513 = torch.aten.item %1511 : !torch.vtensor<[],si8> -> !torch.int
%1514 = torch.aten.quantize_per_tensor %61, %1512, %1513, %int12_382 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1515 = torch.aten.int_repr %1514 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1516 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1517 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1518 = torch.aten.item %1516 : !torch.vtensor<[],f32> -> !torch.float
%1519 = torch.aten.item %1517 : !torch.vtensor<[],si8> -> !torch.int
%1520 = torch.aten._make_per_tensor_quantized_tensor %1515, %1518, %1519 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1521 = torch.aten.dequantize.self %1520 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_383 = torch.constant.int 0
%int0_384 = torch.constant.int 0
%int1_385 = torch.constant.int 1
%int1_386 = torch.constant.int 1
%int1_387 = torch.constant.int 1
%int1_388 = torch.constant.int 1
%int0_389 = torch.constant.int 0
%1522 = torch.prim.ListConstruct %int0_383, %int0_384 : (!torch.int, !torch.int) -> !torch.list<int>
%1523 = torch.prim.ListConstruct %int1_385, %int1_386 : (!torch.int, !torch.int) -> !torch.list<int>
%1524 = torch.prim.ListConstruct %int1_387, %int1_388 : (!torch.int, !torch.int) -> !torch.list<int>
%1525 = torch.prim.ListConstruct %int0_389, %int0_389 : (!torch.int, !torch.int) -> !torch.list<int>
%false_390 = torch.constant.bool false
%int1_391 = torch.constant.int 1
%1526 = torch.aten.convolution %1497, %1509, %1521, %1524, %1522, %1523, %false_390, %1525, %int1_391 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1527 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1528 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_392 = torch.constant.int 12
%1529 = torch.aten.item %1527 : !torch.vtensor<[],f32> -> !torch.float
%1530 = torch.aten.item %1528 : !torch.vtensor<[],si8> -> !torch.int
%1531 = torch.aten.quantize_per_tensor %1526, %1529, %1530, %int12_392 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1532 = torch.aten.int_repr %1531 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1533 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1534 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1535 = torch.aten.item %1533 : !torch.vtensor<[],f32> -> !torch.float
%1536 = torch.aten.item %1534 : !torch.vtensor<[],si8> -> !torch.int
%1537 = torch.aten._make_per_tensor_quantized_tensor %1532, %1535, %1536 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1538 = torch.aten.dequantize.self %1537 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_393 = torch.constant.int 1
%1539 = torch.aten.add.Tensor %1538, %1413, %int1_393 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1540 = torch.aten.relu %1539 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%1541 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1542 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_394 = torch.constant.int 12
%1543 = torch.aten.item %1541 : !torch.vtensor<[],f32> -> !torch.float
%1544 = torch.aten.item %1542 : !torch.vtensor<[],si8> -> !torch.int
%1545 = torch.aten.quantize_per_tensor %1540, %1543, %1544, %int12_394 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1546 = torch.aten.int_repr %1545 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1547 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1548 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1549 = torch.aten.item %1547 : !torch.vtensor<[],f32> -> !torch.float
%1550 = torch.aten.item %1548 : !torch.vtensor<[],si8> -> !torch.int
%1551 = torch.aten._make_per_tensor_quantized_tensor %1546, %1549, %1550 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1552 = torch.aten.dequantize.self %1551 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1553 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1554 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_395 = torch.constant.int 12
%1555 = torch.aten.item %1553 : !torch.vtensor<[],f32> -> !torch.float
%1556 = torch.aten.item %1554 : !torch.vtensor<[],si8> -> !torch.int
%1557 = torch.aten.quantize_per_tensor %62, %1555, %1556, %int12_395 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1558 = torch.aten.int_repr %1557 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1559 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1560 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1561 = torch.aten.item %1559 : !torch.vtensor<[],f32> -> !torch.float
%1562 = torch.aten.item %1560 : !torch.vtensor<[],si8> -> !torch.int
%1563 = torch.aten._make_per_tensor_quantized_tensor %1558, %1561, %1562 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1564 = torch.aten.dequantize.self %1563 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1565 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1566 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_396 = torch.constant.int 12
%1567 = torch.aten.item %1565 : !torch.vtensor<[],f32> -> !torch.float
%1568 = torch.aten.item %1566 : !torch.vtensor<[],si8> -> !torch.int
%1569 = torch.aten.quantize_per_tensor %63, %1567, %1568, %int12_396 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1570 = torch.aten.int_repr %1569 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1571 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1572 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1573 = torch.aten.item %1571 : !torch.vtensor<[],f32> -> !torch.float
%1574 = torch.aten.item %1572 : !torch.vtensor<[],si8> -> !torch.int
%1575 = torch.aten._make_per_tensor_quantized_tensor %1570, %1573, %1574 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1576 = torch.aten.dequantize.self %1575 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_397 = torch.constant.int 0
%int0_398 = torch.constant.int 0
%int1_399 = torch.constant.int 1
%int1_400 = torch.constant.int 1
%int1_401 = torch.constant.int 1
%int1_402 = torch.constant.int 1
%int0_403 = torch.constant.int 0
%1577 = torch.prim.ListConstruct %int0_397, %int0_398 : (!torch.int, !torch.int) -> !torch.list<int>
%1578 = torch.prim.ListConstruct %int1_399, %int1_400 : (!torch.int, !torch.int) -> !torch.list<int>
%1579 = torch.prim.ListConstruct %int1_401, %int1_402 : (!torch.int, !torch.int) -> !torch.list<int>
%1580 = torch.prim.ListConstruct %int0_403, %int0_403 : (!torch.int, !torch.int) -> !torch.list<int>
%false_404 = torch.constant.bool false
%int1_405 = torch.constant.int 1
%1581 = torch.aten.convolution %1552, %1564, %1576, %1579, %1577, %1578, %false_404, %1580, %int1_405 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1582 = torch.aten.relu %1581 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1583 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1584 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_406 = torch.constant.int 12
%1585 = torch.aten.item %1583 : !torch.vtensor<[],f32> -> !torch.float
%1586 = torch.aten.item %1584 : !torch.vtensor<[],si8> -> !torch.int
%1587 = torch.aten.quantize_per_tensor %1582, %1585, %1586, %int12_406 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1588 = torch.aten.int_repr %1587 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1589 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1590 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1591 = torch.aten.item %1589 : !torch.vtensor<[],f32> -> !torch.float
%1592 = torch.aten.item %1590 : !torch.vtensor<[],si8> -> !torch.int
%1593 = torch.aten._make_per_tensor_quantized_tensor %1588, %1591, %1592 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1594 = torch.aten.dequantize.self %1593 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1595 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1596 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_407 = torch.constant.int 12
%1597 = torch.aten.item %1595 : !torch.vtensor<[],f32> -> !torch.float
%1598 = torch.aten.item %1596 : !torch.vtensor<[],si8> -> !torch.int
%1599 = torch.aten.quantize_per_tensor %64, %1597, %1598, %int12_407 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1600 = torch.aten.int_repr %1599 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1601 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1602 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1603 = torch.aten.item %1601 : !torch.vtensor<[],f32> -> !torch.float
%1604 = torch.aten.item %1602 : !torch.vtensor<[],si8> -> !torch.int
%1605 = torch.aten._make_per_tensor_quantized_tensor %1600, %1603, %1604 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1606 = torch.aten.dequantize.self %1605 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1607 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1608 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_408 = torch.constant.int 12
%1609 = torch.aten.item %1607 : !torch.vtensor<[],f32> -> !torch.float
%1610 = torch.aten.item %1608 : !torch.vtensor<[],si8> -> !torch.int
%1611 = torch.aten.quantize_per_tensor %65, %1609, %1610, %int12_408 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1612 = torch.aten.int_repr %1611 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1613 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1614 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1615 = torch.aten.item %1613 : !torch.vtensor<[],f32> -> !torch.float
%1616 = torch.aten.item %1614 : !torch.vtensor<[],si8> -> !torch.int
%1617 = torch.aten._make_per_tensor_quantized_tensor %1612, %1615, %1616 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1618 = torch.aten.dequantize.self %1617 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_409 = torch.constant.int 1
%int1_410 = torch.constant.int 1
%int1_411 = torch.constant.int 1
%int1_412 = torch.constant.int 1
%int1_413 = torch.constant.int 1
%int1_414 = torch.constant.int 1
%int0_415 = torch.constant.int 0
%1619 = torch.prim.ListConstruct %int1_409, %int1_410 : (!torch.int, !torch.int) -> !torch.list<int>
%1620 = torch.prim.ListConstruct %int1_411, %int1_412 : (!torch.int, !torch.int) -> !torch.list<int>
%1621 = torch.prim.ListConstruct %int1_413, %int1_414 : (!torch.int, !torch.int) -> !torch.list<int>
%1622 = torch.prim.ListConstruct %int0_415, %int0_415 : (!torch.int, !torch.int) -> !torch.list<int>
%false_416 = torch.constant.bool false
%int1_417 = torch.constant.int 1
%1623 = torch.aten.convolution %1594, %1606, %1618, %1621, %1619, %1620, %false_416, %1622, %int1_417 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1624 = torch.aten.relu %1623 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1625 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1626 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_418 = torch.constant.int 12
%1627 = torch.aten.item %1625 : !torch.vtensor<[],f32> -> !torch.float
%1628 = torch.aten.item %1626 : !torch.vtensor<[],si8> -> !torch.int
%1629 = torch.aten.quantize_per_tensor %1624, %1627, %1628, %int12_418 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1630 = torch.aten.int_repr %1629 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1631 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1632 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1633 = torch.aten.item %1631 : !torch.vtensor<[],f32> -> !torch.float
%1634 = torch.aten.item %1632 : !torch.vtensor<[],si8> -> !torch.int
%1635 = torch.aten._make_per_tensor_quantized_tensor %1630, %1633, %1634 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1636 = torch.aten.dequantize.self %1635 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1637 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1638 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_419 = torch.constant.int 12
%1639 = torch.aten.item %1637 : !torch.vtensor<[],f32> -> !torch.float
%1640 = torch.aten.item %1638 : !torch.vtensor<[],si8> -> !torch.int
%1641 = torch.aten.quantize_per_tensor %66, %1639, %1640, %int12_419 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1642 = torch.aten.int_repr %1641 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1643 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1644 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1645 = torch.aten.item %1643 : !torch.vtensor<[],f32> -> !torch.float
%1646 = torch.aten.item %1644 : !torch.vtensor<[],si8> -> !torch.int
%1647 = torch.aten._make_per_tensor_quantized_tensor %1642, %1645, %1646 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1648 = torch.aten.dequantize.self %1647 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1649 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1650 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_420 = torch.constant.int 12
%1651 = torch.aten.item %1649 : !torch.vtensor<[],f32> -> !torch.float
%1652 = torch.aten.item %1650 : !torch.vtensor<[],si8> -> !torch.int
%1653 = torch.aten.quantize_per_tensor %67, %1651, %1652, %int12_420 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1654 = torch.aten.int_repr %1653 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1655 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1656 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1657 = torch.aten.item %1655 : !torch.vtensor<[],f32> -> !torch.float
%1658 = torch.aten.item %1656 : !torch.vtensor<[],si8> -> !torch.int
%1659 = torch.aten._make_per_tensor_quantized_tensor %1654, %1657, %1658 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1660 = torch.aten.dequantize.self %1659 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_421 = torch.constant.int 0
%int0_422 = torch.constant.int 0
%int1_423 = torch.constant.int 1
%int1_424 = torch.constant.int 1
%int1_425 = torch.constant.int 1
%int1_426 = torch.constant.int 1
%int0_427 = torch.constant.int 0
%1661 = torch.prim.ListConstruct %int0_421, %int0_422 : (!torch.int, !torch.int) -> !torch.list<int>
%1662 = torch.prim.ListConstruct %int1_423, %int1_424 : (!torch.int, !torch.int) -> !torch.list<int>
%1663 = torch.prim.ListConstruct %int1_425, %int1_426 : (!torch.int, !torch.int) -> !torch.list<int>
%1664 = torch.prim.ListConstruct %int0_427, %int0_427 : (!torch.int, !torch.int) -> !torch.list<int>
%false_428 = torch.constant.bool false
%int1_429 = torch.constant.int 1
%1665 = torch.aten.convolution %1636, %1648, %1660, %1663, %1661, %1662, %false_428, %1664, %int1_429 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1666 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1667 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_430 = torch.constant.int 12
%1668 = torch.aten.item %1666 : !torch.vtensor<[],f32> -> !torch.float
%1669 = torch.aten.item %1667 : !torch.vtensor<[],si8> -> !torch.int
%1670 = torch.aten.quantize_per_tensor %1665, %1668, %1669, %int12_430 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1671 = torch.aten.int_repr %1670 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1672 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1673 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1674 = torch.aten.item %1672 : !torch.vtensor<[],f32> -> !torch.float
%1675 = torch.aten.item %1673 : !torch.vtensor<[],si8> -> !torch.int
%1676 = torch.aten._make_per_tensor_quantized_tensor %1671, %1674, %1675 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1677 = torch.aten.dequantize.self %1676 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_431 = torch.constant.int 1
%1678 = torch.aten.add.Tensor %1677, %1552, %int1_431 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1679 = torch.aten.relu %1678 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%1680 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1681 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_432 = torch.constant.int 12
%1682 = torch.aten.item %1680 : !torch.vtensor<[],f32> -> !torch.float
%1683 = torch.aten.item %1681 : !torch.vtensor<[],si8> -> !torch.int
%1684 = torch.aten.quantize_per_tensor %1679, %1682, %1683, %int12_432 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1685 = torch.aten.int_repr %1684 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1686 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1687 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1688 = torch.aten.item %1686 : !torch.vtensor<[],f32> -> !torch.float
%1689 = torch.aten.item %1687 : !torch.vtensor<[],si8> -> !torch.int
%1690 = torch.aten._make_per_tensor_quantized_tensor %1685, %1688, %1689 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1691 = torch.aten.dequantize.self %1690 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1692 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1693 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_433 = torch.constant.int 12
%1694 = torch.aten.item %1692 : !torch.vtensor<[],f32> -> !torch.float
%1695 = torch.aten.item %1693 : !torch.vtensor<[],si8> -> !torch.int
%1696 = torch.aten.quantize_per_tensor %68, %1694, %1695, %int12_433 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1697 = torch.aten.int_repr %1696 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1698 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1699 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1700 = torch.aten.item %1698 : !torch.vtensor<[],f32> -> !torch.float
%1701 = torch.aten.item %1699 : !torch.vtensor<[],si8> -> !torch.int
%1702 = torch.aten._make_per_tensor_quantized_tensor %1697, %1700, %1701 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1703 = torch.aten.dequantize.self %1702 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1704 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1705 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_434 = torch.constant.int 12
%1706 = torch.aten.item %1704 : !torch.vtensor<[],f32> -> !torch.float
%1707 = torch.aten.item %1705 : !torch.vtensor<[],si8> -> !torch.int
%1708 = torch.aten.quantize_per_tensor %69, %1706, %1707, %int12_434 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1709 = torch.aten.int_repr %1708 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1710 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1712 = torch.aten.item %1710 : !torch.vtensor<[],f32> -> !torch.float
%1713 = torch.aten.item %1711 : !torch.vtensor<[],si8> -> !torch.int
%1714 = torch.aten._make_per_tensor_quantized_tensor %1709, %1712, %1713 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1715 = torch.aten.dequantize.self %1714 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_435 = torch.constant.int 0
%int0_436 = torch.constant.int 0
%int1_437 = torch.constant.int 1
%int1_438 = torch.constant.int 1
%int1_439 = torch.constant.int 1
%int1_440 = torch.constant.int 1
%int0_441 = torch.constant.int 0
%1716 = torch.prim.ListConstruct %int0_435, %int0_436 : (!torch.int, !torch.int) -> !torch.list<int>
%1717 = torch.prim.ListConstruct %int1_437, %int1_438 : (!torch.int, !torch.int) -> !torch.list<int>
%1718 = torch.prim.ListConstruct %int1_439, %int1_440 : (!torch.int, !torch.int) -> !torch.list<int>
%1719 = torch.prim.ListConstruct %int0_441, %int0_441 : (!torch.int, !torch.int) -> !torch.list<int>
%false_442 = torch.constant.bool false
%int1_443 = torch.constant.int 1
%1720 = torch.aten.convolution %1691, %1703, %1715, %1718, %1716, %1717, %false_442, %1719, %int1_443 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1721 = torch.aten.relu %1720 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1722 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_444 = torch.constant.int 12
%1724 = torch.aten.item %1722 : !torch.vtensor<[],f32> -> !torch.float
%1725 = torch.aten.item %1723 : !torch.vtensor<[],si8> -> !torch.int
%1726 = torch.aten.quantize_per_tensor %1721, %1724, %1725, %int12_444 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1727 = torch.aten.int_repr %1726 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1728 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1730 = torch.aten.item %1728 : !torch.vtensor<[],f32> -> !torch.float
%1731 = torch.aten.item %1729 : !torch.vtensor<[],si8> -> !torch.int
%1732 = torch.aten._make_per_tensor_quantized_tensor %1727, %1730, %1731 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1733 = torch.aten.dequantize.self %1732 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1734 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_445 = torch.constant.int 12
%1736 = torch.aten.item %1734 : !torch.vtensor<[],f32> -> !torch.float
%1737 = torch.aten.item %1735 : !torch.vtensor<[],si8> -> !torch.int
%1738 = torch.aten.quantize_per_tensor %70, %1736, %1737, %int12_445 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1739 = torch.aten.int_repr %1738 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1740 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1742 = torch.aten.item %1740 : !torch.vtensor<[],f32> -> !torch.float
%1743 = torch.aten.item %1741 : !torch.vtensor<[],si8> -> !torch.int
%1744 = torch.aten._make_per_tensor_quantized_tensor %1739, %1742, %1743 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1745 = torch.aten.dequantize.self %1744 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1746 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_446 = torch.constant.int 12
%1748 = torch.aten.item %1746 : !torch.vtensor<[],f32> -> !torch.float
%1749 = torch.aten.item %1747 : !torch.vtensor<[],si8> -> !torch.int
%1750 = torch.aten.quantize_per_tensor %71, %1748, %1749, %int12_446 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1751 = torch.aten.int_repr %1750 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1752 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1754 = torch.aten.item %1752 : !torch.vtensor<[],f32> -> !torch.float
%1755 = torch.aten.item %1753 : !torch.vtensor<[],si8> -> !torch.int
%1756 = torch.aten._make_per_tensor_quantized_tensor %1751, %1754, %1755 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1757 = torch.aten.dequantize.self %1756 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_447 = torch.constant.int 1
%int1_448 = torch.constant.int 1
%int1_449 = torch.constant.int 1
%int1_450 = torch.constant.int 1
%int1_451 = torch.constant.int 1
%int1_452 = torch.constant.int 1
%int0_453 = torch.constant.int 0
%1758 = torch.prim.ListConstruct %int1_447, %int1_448 : (!torch.int, !torch.int) -> !torch.list<int>
%1759 = torch.prim.ListConstruct %int1_449, %int1_450 : (!torch.int, !torch.int) -> !torch.list<int>
%1760 = torch.prim.ListConstruct %int1_451, %int1_452 : (!torch.int, !torch.int) -> !torch.list<int>
%1761 = torch.prim.ListConstruct %int0_453, %int0_453 : (!torch.int, !torch.int) -> !torch.list<int>
%false_454 = torch.constant.bool false
%int1_455 = torch.constant.int 1
%1762 = torch.aten.convolution %1733, %1745, %1757, %1760, %1758, %1759, %false_454, %1761, %int1_455 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1763 = torch.aten.relu %1762 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1764 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1765 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_456 = torch.constant.int 12
%1766 = torch.aten.item %1764 : !torch.vtensor<[],f32> -> !torch.float
%1767 = torch.aten.item %1765 : !torch.vtensor<[],si8> -> !torch.int
%1768 = torch.aten.quantize_per_tensor %1763, %1766, %1767, %int12_456 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1769 = torch.aten.int_repr %1768 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1770 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1772 = torch.aten.item %1770 : !torch.vtensor<[],f32> -> !torch.float
%1773 = torch.aten.item %1771 : !torch.vtensor<[],si8> -> !torch.int
%1774 = torch.aten._make_per_tensor_quantized_tensor %1769, %1772, %1773 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1775 = torch.aten.dequantize.self %1774 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1776 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1777 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_457 = torch.constant.int 12
%1778 = torch.aten.item %1776 : !torch.vtensor<[],f32> -> !torch.float
%1779 = torch.aten.item %1777 : !torch.vtensor<[],si8> -> !torch.int
%1780 = torch.aten.quantize_per_tensor %72, %1778, %1779, %int12_457 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1781 = torch.aten.int_repr %1780 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1782 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1783 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1784 = torch.aten.item %1782 : !torch.vtensor<[],f32> -> !torch.float
%1785 = torch.aten.item %1783 : !torch.vtensor<[],si8> -> !torch.int
%1786 = torch.aten._make_per_tensor_quantized_tensor %1781, %1784, %1785 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1787 = torch.aten.dequantize.self %1786 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1788 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1789 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_458 = torch.constant.int 12
%1790 = torch.aten.item %1788 : !torch.vtensor<[],f32> -> !torch.float
%1791 = torch.aten.item %1789 : !torch.vtensor<[],si8> -> !torch.int
%1792 = torch.aten.quantize_per_tensor %73, %1790, %1791, %int12_458 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1793 = torch.aten.int_repr %1792 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1794 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1795 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1796 = torch.aten.item %1794 : !torch.vtensor<[],f32> -> !torch.float
%1797 = torch.aten.item %1795 : !torch.vtensor<[],si8> -> !torch.int
%1798 = torch.aten._make_per_tensor_quantized_tensor %1793, %1796, %1797 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1799 = torch.aten.dequantize.self %1798 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_459 = torch.constant.int 0
%int0_460 = torch.constant.int 0
%int1_461 = torch.constant.int 1
%int1_462 = torch.constant.int 1
%int1_463 = torch.constant.int 1
%int1_464 = torch.constant.int 1
%int0_465 = torch.constant.int 0
%1800 = torch.prim.ListConstruct %int0_459, %int0_460 : (!torch.int, !torch.int) -> !torch.list<int>
%1801 = torch.prim.ListConstruct %int1_461, %int1_462 : (!torch.int, !torch.int) -> !torch.list<int>
%1802 = torch.prim.ListConstruct %int1_463, %int1_464 : (!torch.int, !torch.int) -> !torch.list<int>
%1803 = torch.prim.ListConstruct %int0_465, %int0_465 : (!torch.int, !torch.int) -> !torch.list<int>
%false_466 = torch.constant.bool false
%int1_467 = torch.constant.int 1
%1804 = torch.aten.convolution %1775, %1787, %1799, %1802, %1800, %1801, %false_466, %1803, %int1_467 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1805 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1806 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_468 = torch.constant.int 12
%1807 = torch.aten.item %1805 : !torch.vtensor<[],f32> -> !torch.float
%1808 = torch.aten.item %1806 : !torch.vtensor<[],si8> -> !torch.int
%1809 = torch.aten.quantize_per_tensor %1804, %1807, %1808, %int12_468 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1810 = torch.aten.int_repr %1809 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1811 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1812 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1813 = torch.aten.item %1811 : !torch.vtensor<[],f32> -> !torch.float
%1814 = torch.aten.item %1812 : !torch.vtensor<[],si8> -> !torch.int
%1815 = torch.aten._make_per_tensor_quantized_tensor %1810, %1813, %1814 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1816 = torch.aten.dequantize.self %1815 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_469 = torch.constant.int 1
%1817 = torch.aten.add.Tensor %1816, %1691, %int1_469 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1818 = torch.aten.relu %1817 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%1819 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1820 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_470 = torch.constant.int 12
%1821 = torch.aten.item %1819 : !torch.vtensor<[],f32> -> !torch.float
%1822 = torch.aten.item %1820 : !torch.vtensor<[],si8> -> !torch.int
%1823 = torch.aten.quantize_per_tensor %1818, %1821, %1822, %int12_470 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1824 = torch.aten.int_repr %1823 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1825 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1826 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1827 = torch.aten.item %1825 : !torch.vtensor<[],f32> -> !torch.float
%1828 = torch.aten.item %1826 : !torch.vtensor<[],si8> -> !torch.int
%1829 = torch.aten._make_per_tensor_quantized_tensor %1824, %1827, %1828 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1830 = torch.aten.dequantize.self %1829 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1831 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1832 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_471 = torch.constant.int 12
%1833 = torch.aten.item %1831 : !torch.vtensor<[],f32> -> !torch.float
%1834 = torch.aten.item %1832 : !torch.vtensor<[],si8> -> !torch.int
%1835 = torch.aten.quantize_per_tensor %74, %1833, %1834, %int12_471 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1836 = torch.aten.int_repr %1835 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1837 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1839 = torch.aten.item %1837 : !torch.vtensor<[],f32> -> !torch.float
%1840 = torch.aten.item %1838 : !torch.vtensor<[],si8> -> !torch.int
%1841 = torch.aten._make_per_tensor_quantized_tensor %1836, %1839, %1840 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1842 = torch.aten.dequantize.self %1841 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1843 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1844 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_472 = torch.constant.int 12
%1845 = torch.aten.item %1843 : !torch.vtensor<[],f32> -> !torch.float
%1846 = torch.aten.item %1844 : !torch.vtensor<[],si8> -> !torch.int
%1847 = torch.aten.quantize_per_tensor %75, %1845, %1846, %int12_472 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1848 = torch.aten.int_repr %1847 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1849 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1850 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1851 = torch.aten.item %1849 : !torch.vtensor<[],f32> -> !torch.float
%1852 = torch.aten.item %1850 : !torch.vtensor<[],si8> -> !torch.int
%1853 = torch.aten._make_per_tensor_quantized_tensor %1848, %1851, %1852 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1854 = torch.aten.dequantize.self %1853 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_473 = torch.constant.int 0
%int0_474 = torch.constant.int 0
%int1_475 = torch.constant.int 1
%int1_476 = torch.constant.int 1
%int1_477 = torch.constant.int 1
%int1_478 = torch.constant.int 1
%int0_479 = torch.constant.int 0
%1855 = torch.prim.ListConstruct %int0_473, %int0_474 : (!torch.int, !torch.int) -> !torch.list<int>
%1856 = torch.prim.ListConstruct %int1_475, %int1_476 : (!torch.int, !torch.int) -> !torch.list<int>
%1857 = torch.prim.ListConstruct %int1_477, %int1_478 : (!torch.int, !torch.int) -> !torch.list<int>
%1858 = torch.prim.ListConstruct %int0_479, %int0_479 : (!torch.int, !torch.int) -> !torch.list<int>
%false_480 = torch.constant.bool false
%int1_481 = torch.constant.int 1
%1859 = torch.aten.convolution %1830, %1842, %1854, %1857, %1855, %1856, %false_480, %1858, %int1_481 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1860 = torch.aten.relu %1859 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1861 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1862 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_482 = torch.constant.int 12
%1863 = torch.aten.item %1861 : !torch.vtensor<[],f32> -> !torch.float
%1864 = torch.aten.item %1862 : !torch.vtensor<[],si8> -> !torch.int
%1865 = torch.aten.quantize_per_tensor %1860, %1863, %1864, %int12_482 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1866 = torch.aten.int_repr %1865 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1867 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1868 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1869 = torch.aten.item %1867 : !torch.vtensor<[],f32> -> !torch.float
%1870 = torch.aten.item %1868 : !torch.vtensor<[],si8> -> !torch.int
%1871 = torch.aten._make_per_tensor_quantized_tensor %1866, %1869, %1870 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1872 = torch.aten.dequantize.self %1871 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1873 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1874 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_483 = torch.constant.int 12
%1875 = torch.aten.item %1873 : !torch.vtensor<[],f32> -> !torch.float
%1876 = torch.aten.item %1874 : !torch.vtensor<[],si8> -> !torch.int
%1877 = torch.aten.quantize_per_tensor %76, %1875, %1876, %int12_483 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1878 = torch.aten.int_repr %1877 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%1879 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1880 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1881 = torch.aten.item %1879 : !torch.vtensor<[],f32> -> !torch.float
%1882 = torch.aten.item %1880 : !torch.vtensor<[],si8> -> !torch.int
%1883 = torch.aten._make_per_tensor_quantized_tensor %1878, %1881, %1882 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%1884 = torch.aten.dequantize.self %1883 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%1885 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1886 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_484 = torch.constant.int 12
%1887 = torch.aten.item %1885 : !torch.vtensor<[],f32> -> !torch.float
%1888 = torch.aten.item %1886 : !torch.vtensor<[],si8> -> !torch.int
%1889 = torch.aten.quantize_per_tensor %77, %1887, %1888, %int12_484 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1890 = torch.aten.int_repr %1889 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1891 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1892 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1893 = torch.aten.item %1891 : !torch.vtensor<[],f32> -> !torch.float
%1894 = torch.aten.item %1892 : !torch.vtensor<[],si8> -> !torch.int
%1895 = torch.aten._make_per_tensor_quantized_tensor %1890, %1893, %1894 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1896 = torch.aten.dequantize.self %1895 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_485 = torch.constant.int 1
%int1_486 = torch.constant.int 1
%int1_487 = torch.constant.int 1
%int1_488 = torch.constant.int 1
%int1_489 = torch.constant.int 1
%int1_490 = torch.constant.int 1
%int0_491 = torch.constant.int 0
%1897 = torch.prim.ListConstruct %int1_485, %int1_486 : (!torch.int, !torch.int) -> !torch.list<int>
%1898 = torch.prim.ListConstruct %int1_487, %int1_488 : (!torch.int, !torch.int) -> !torch.list<int>
%1899 = torch.prim.ListConstruct %int1_489, %int1_490 : (!torch.int, !torch.int) -> !torch.list<int>
%1900 = torch.prim.ListConstruct %int0_491, %int0_491 : (!torch.int, !torch.int) -> !torch.list<int>
%false_492 = torch.constant.bool false
%int1_493 = torch.constant.int 1
%1901 = torch.aten.convolution %1872, %1884, %1896, %1899, %1897, %1898, %false_492, %1900, %int1_493 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1902 = torch.aten.relu %1901 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%1903 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1904 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_494 = torch.constant.int 12
%1905 = torch.aten.item %1903 : !torch.vtensor<[],f32> -> !torch.float
%1906 = torch.aten.item %1904 : !torch.vtensor<[],si8> -> !torch.int
%1907 = torch.aten.quantize_per_tensor %1902, %1905, %1906, %int12_494 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1908 = torch.aten.int_repr %1907 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%1909 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1910 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1911 = torch.aten.item %1909 : !torch.vtensor<[],f32> -> !torch.float
%1912 = torch.aten.item %1910 : !torch.vtensor<[],si8> -> !torch.int
%1913 = torch.aten._make_per_tensor_quantized_tensor %1908, %1911, %1912 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%1914 = torch.aten.dequantize.self %1913 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%1915 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1916 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_495 = torch.constant.int 12
%1917 = torch.aten.item %1915 : !torch.vtensor<[],f32> -> !torch.float
%1918 = torch.aten.item %1916 : !torch.vtensor<[],si8> -> !torch.int
%1919 = torch.aten.quantize_per_tensor %78, %1917, %1918, %int12_495 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1920 = torch.aten.int_repr %1919 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%1921 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1922 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1923 = torch.aten.item %1921 : !torch.vtensor<[],f32> -> !torch.float
%1924 = torch.aten.item %1922 : !torch.vtensor<[],si8> -> !torch.int
%1925 = torch.aten._make_per_tensor_quantized_tensor %1920, %1923, %1924 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%1926 = torch.aten.dequantize.self %1925 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%1927 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1928 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_496 = torch.constant.int 12
%1929 = torch.aten.item %1927 : !torch.vtensor<[],f32> -> !torch.float
%1930 = torch.aten.item %1928 : !torch.vtensor<[],si8> -> !torch.int
%1931 = torch.aten.quantize_per_tensor %79, %1929, %1930, %int12_496 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1932 = torch.aten.int_repr %1931 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%1933 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1934 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1935 = torch.aten.item %1933 : !torch.vtensor<[],f32> -> !torch.float
%1936 = torch.aten.item %1934 : !torch.vtensor<[],si8> -> !torch.int
%1937 = torch.aten._make_per_tensor_quantized_tensor %1932, %1935, %1936 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%1938 = torch.aten.dequantize.self %1937 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_497 = torch.constant.int 0
%int0_498 = torch.constant.int 0
%int1_499 = torch.constant.int 1
%int1_500 = torch.constant.int 1
%int1_501 = torch.constant.int 1
%int1_502 = torch.constant.int 1
%int0_503 = torch.constant.int 0
%1939 = torch.prim.ListConstruct %int0_497, %int0_498 : (!torch.int, !torch.int) -> !torch.list<int>
%1940 = torch.prim.ListConstruct %int1_499, %int1_500 : (!torch.int, !torch.int) -> !torch.list<int>
%1941 = torch.prim.ListConstruct %int1_501, %int1_502 : (!torch.int, !torch.int) -> !torch.list<int>
%1942 = torch.prim.ListConstruct %int0_503, %int0_503 : (!torch.int, !torch.int) -> !torch.list<int>
%false_504 = torch.constant.bool false
%int1_505 = torch.constant.int 1
%1943 = torch.aten.convolution %1914, %1926, %1938, %1941, %1939, %1940, %false_504, %1942, %int1_505 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1944 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_506 = torch.constant.int 12
%1946 = torch.aten.item %1944 : !torch.vtensor<[],f32> -> !torch.float
%1947 = torch.aten.item %1945 : !torch.vtensor<[],si8> -> !torch.int
%1948 = torch.aten.quantize_per_tensor %1943, %1946, %1947, %int12_506 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1949 = torch.aten.int_repr %1948 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1950 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1952 = torch.aten.item %1950 : !torch.vtensor<[],f32> -> !torch.float
%1953 = torch.aten.item %1951 : !torch.vtensor<[],si8> -> !torch.int
%1954 = torch.aten._make_per_tensor_quantized_tensor %1949, %1952, %1953 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1955 = torch.aten.dequantize.self %1954 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_507 = torch.constant.int 1
%1956 = torch.aten.add.Tensor %1955, %1830, %int1_507 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%1957 = torch.aten.relu %1956 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%1958 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1959 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_508 = torch.constant.int 12
%1960 = torch.aten.item %1958 : !torch.vtensor<[],f32> -> !torch.float
%1961 = torch.aten.item %1959 : !torch.vtensor<[],si8> -> !torch.int
%1962 = torch.aten.quantize_per_tensor %1957, %1960, %1961, %int12_508 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1963 = torch.aten.int_repr %1962 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%1964 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1965 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1966 = torch.aten.item %1964 : !torch.vtensor<[],f32> -> !torch.float
%1967 = torch.aten.item %1965 : !torch.vtensor<[],si8> -> !torch.int
%1968 = torch.aten._make_per_tensor_quantized_tensor %1963, %1966, %1967 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%1969 = torch.aten.dequantize.self %1968 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%1970 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1971 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_509 = torch.constant.int 12
%1972 = torch.aten.item %1970 : !torch.vtensor<[],f32> -> !torch.float
%1973 = torch.aten.item %1971 : !torch.vtensor<[],si8> -> !torch.int
%1974 = torch.aten.quantize_per_tensor %80, %1972, %1973, %int12_509 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1975 = torch.aten.int_repr %1974 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8>
%1976 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1977 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1978 = torch.aten.item %1976 : !torch.vtensor<[],f32> -> !torch.float
%1979 = torch.aten.item %1977 : !torch.vtensor<[],si8> -> !torch.int
%1980 = torch.aten._make_per_tensor_quantized_tensor %1975, %1978, %1979 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8>
%1981 = torch.aten.dequantize.self %1980 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32>
%1982 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1983 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_510 = torch.constant.int 12
%1984 = torch.aten.item %1982 : !torch.vtensor<[],f32> -> !torch.float
%1985 = torch.aten.item %1983 : !torch.vtensor<[],si8> -> !torch.int
%1986 = torch.aten.quantize_per_tensor %81, %1984, %1985, %int12_510 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1987 = torch.aten.int_repr %1986 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%1988 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1989 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1990 = torch.aten.item %1988 : !torch.vtensor<[],f32> -> !torch.float
%1991 = torch.aten.item %1989 : !torch.vtensor<[],si8> -> !torch.int
%1992 = torch.aten._make_per_tensor_quantized_tensor %1987, %1990, %1991 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%1993 = torch.aten.dequantize.self %1992 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int0_511 = torch.constant.int 0
%int0_512 = torch.constant.int 0
%int1_513 = torch.constant.int 1
%int1_514 = torch.constant.int 1
%int1_515 = torch.constant.int 1
%int1_516 = torch.constant.int 1
%int0_517 = torch.constant.int 0
%1994 = torch.prim.ListConstruct %int0_511, %int0_512 : (!torch.int, !torch.int) -> !torch.list<int>
%1995 = torch.prim.ListConstruct %int1_513, %int1_514 : (!torch.int, !torch.int) -> !torch.list<int>
%1996 = torch.prim.ListConstruct %int1_515, %int1_516 : (!torch.int, !torch.int) -> !torch.list<int>
%1997 = torch.prim.ListConstruct %int0_517, %int0_517 : (!torch.int, !torch.int) -> !torch.list<int>
%false_518 = torch.constant.bool false
%int1_519 = torch.constant.int 1
%1998 = torch.aten.convolution %1969, %1981, %1993, %1996, %1994, %1995, %false_518, %1997, %int1_519 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%1999 = torch.aten.relu %1998 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%2000 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2001 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_520 = torch.constant.int 12
%2002 = torch.aten.item %2000 : !torch.vtensor<[],f32> -> !torch.float
%2003 = torch.aten.item %2001 : !torch.vtensor<[],si8> -> !torch.int
%2004 = torch.aten.quantize_per_tensor %1999, %2002, %2003, %int12_520 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%2005 = torch.aten.int_repr %2004 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%2006 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2007 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2008 = torch.aten.item %2006 : !torch.vtensor<[],f32> -> !torch.float
%2009 = torch.aten.item %2007 : !torch.vtensor<[],si8> -> !torch.int
%2010 = torch.aten._make_per_tensor_quantized_tensor %2005, %2008, %2009 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%2011 = torch.aten.dequantize.self %2010 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%2012 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_521 = torch.constant.int 12
%2014 = torch.aten.item %2012 : !torch.vtensor<[],f32> -> !torch.float
%2015 = torch.aten.item %2013 : !torch.vtensor<[],si8> -> !torch.int
%2016 = torch.aten.quantize_per_tensor %82, %2014, %2015, %int12_521 : !torch.vtensor<[256,256,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2017 = torch.aten.int_repr %2016 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],si8>
%2018 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2020 = torch.aten.item %2018 : !torch.vtensor<[],f32> -> !torch.float
%2021 = torch.aten.item %2019 : !torch.vtensor<[],si8> -> !torch.int
%2022 = torch.aten._make_per_tensor_quantized_tensor %2017, %2020, %2021 : !torch.vtensor<[256,256,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,3,3],!torch.qint8>
%2023 = torch.aten.dequantize.self %2022 : !torch.vtensor<[256,256,3,3],!torch.qint8> -> !torch.vtensor<[256,256,3,3],f32>
%2024 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_522 = torch.constant.int 12
%2026 = torch.aten.item %2024 : !torch.vtensor<[],f32> -> !torch.float
%2027 = torch.aten.item %2025 : !torch.vtensor<[],si8> -> !torch.int
%2028 = torch.aten.quantize_per_tensor %83, %2026, %2027, %int12_522 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2029 = torch.aten.int_repr %2028 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8>
%2030 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2031 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2032 = torch.aten.item %2030 : !torch.vtensor<[],f32> -> !torch.float
%2033 = torch.aten.item %2031 : !torch.vtensor<[],si8> -> !torch.int
%2034 = torch.aten._make_per_tensor_quantized_tensor %2029, %2032, %2033 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8>
%2035 = torch.aten.dequantize.self %2034 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32>
%int1_523 = torch.constant.int 1
%int1_524 = torch.constant.int 1
%int1_525 = torch.constant.int 1
%int1_526 = torch.constant.int 1
%int1_527 = torch.constant.int 1
%int1_528 = torch.constant.int 1
%int0_529 = torch.constant.int 0
%2036 = torch.prim.ListConstruct %int1_523, %int1_524 : (!torch.int, !torch.int) -> !torch.list<int>
%2037 = torch.prim.ListConstruct %int1_525, %int1_526 : (!torch.int, !torch.int) -> !torch.list<int>
%2038 = torch.prim.ListConstruct %int1_527, %int1_528 : (!torch.int, !torch.int) -> !torch.list<int>
%2039 = torch.prim.ListConstruct %int0_529, %int0_529 : (!torch.int, !torch.int) -> !torch.list<int>
%false_530 = torch.constant.bool false
%int1_531 = torch.constant.int 1
%2040 = torch.aten.convolution %2011, %2023, %2035, %2038, %2036, %2037, %false_530, %2039, %int1_531 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[256,256,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,256,14,14],f32>
%2041 = torch.aten.relu %2040 : !torch.vtensor<[1,256,14,14],f32> -> !torch.vtensor<[1,256,14,14],f32>
%2042 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2043 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_532 = torch.constant.int 12
%2044 = torch.aten.item %2042 : !torch.vtensor<[],f32> -> !torch.float
%2045 = torch.aten.item %2043 : !torch.vtensor<[],si8> -> !torch.int
%2046 = torch.aten.quantize_per_tensor %2041, %2044, %2045, %int12_532 : !torch.vtensor<[1,256,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%2047 = torch.aten.int_repr %2046 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],si8>
%2048 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2050 = torch.aten.item %2048 : !torch.vtensor<[],f32> -> !torch.float
%2051 = torch.aten.item %2049 : !torch.vtensor<[],si8> -> !torch.int
%2052 = torch.aten._make_per_tensor_quantized_tensor %2047, %2050, %2051 : !torch.vtensor<[1,256,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,256,14,14],!torch.qint8>
%2053 = torch.aten.dequantize.self %2052 : !torch.vtensor<[1,256,14,14],!torch.qint8> -> !torch.vtensor<[1,256,14,14],f32>
%2054 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_533 = torch.constant.int 12
%2056 = torch.aten.item %2054 : !torch.vtensor<[],f32> -> !torch.float
%2057 = torch.aten.item %2055 : !torch.vtensor<[],si8> -> !torch.int
%2058 = torch.aten.quantize_per_tensor %84, %2056, %2057, %int12_533 : !torch.vtensor<[1024,256,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%2059 = torch.aten.int_repr %2058 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],si8>
%2060 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2061 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2062 = torch.aten.item %2060 : !torch.vtensor<[],f32> -> !torch.float
%2063 = torch.aten.item %2061 : !torch.vtensor<[],si8> -> !torch.int
%2064 = torch.aten._make_per_tensor_quantized_tensor %2059, %2062, %2063 : !torch.vtensor<[1024,256,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024,256,1,1],!torch.qint8>
%2065 = torch.aten.dequantize.self %2064 : !torch.vtensor<[1024,256,1,1],!torch.qint8> -> !torch.vtensor<[1024,256,1,1],f32>
%2066 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2067 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_534 = torch.constant.int 12
%2068 = torch.aten.item %2066 : !torch.vtensor<[],f32> -> !torch.float
%2069 = torch.aten.item %2067 : !torch.vtensor<[],si8> -> !torch.int
%2070 = torch.aten.quantize_per_tensor %85, %2068, %2069, %int12_534 : !torch.vtensor<[1024],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%2071 = torch.aten.int_repr %2070 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],si8>
%2072 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2073 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2074 = torch.aten.item %2072 : !torch.vtensor<[],f32> -> !torch.float
%2075 = torch.aten.item %2073 : !torch.vtensor<[],si8> -> !torch.int
%2076 = torch.aten._make_per_tensor_quantized_tensor %2071, %2074, %2075 : !torch.vtensor<[1024],si8>, !torch.float, !torch.int -> !torch.vtensor<[1024],!torch.qint8>
%2077 = torch.aten.dequantize.self %2076 : !torch.vtensor<[1024],!torch.qint8> -> !torch.vtensor<[1024],f32>
%int0_535 = torch.constant.int 0
%int0_536 = torch.constant.int 0
%int1_537 = torch.constant.int 1
%int1_538 = torch.constant.int 1
%int1_539 = torch.constant.int 1
%int1_540 = torch.constant.int 1
%int0_541 = torch.constant.int 0
%2078 = torch.prim.ListConstruct %int0_535, %int0_536 : (!torch.int, !torch.int) -> !torch.list<int>
%2079 = torch.prim.ListConstruct %int1_537, %int1_538 : (!torch.int, !torch.int) -> !torch.list<int>
%2080 = torch.prim.ListConstruct %int1_539, %int1_540 : (!torch.int, !torch.int) -> !torch.list<int>
%2081 = torch.prim.ListConstruct %int0_541, %int0_541 : (!torch.int, !torch.int) -> !torch.list<int>
%false_542 = torch.constant.bool false
%int1_543 = torch.constant.int 1
%2082 = torch.aten.convolution %2053, %2065, %2077, %2080, %2078, %2079, %false_542, %2081, %int1_543 : !torch.vtensor<[1,256,14,14],f32>, !torch.vtensor<[1024,256,1,1],f32>, !torch.vtensor<[1024],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%2083 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2084 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_544 = torch.constant.int 12
%2085 = torch.aten.item %2083 : !torch.vtensor<[],f32> -> !torch.float
%2086 = torch.aten.item %2084 : !torch.vtensor<[],si8> -> !torch.int
%2087 = torch.aten.quantize_per_tensor %2082, %2085, %2086, %int12_544 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%2088 = torch.aten.int_repr %2087 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%2089 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2090 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2091 = torch.aten.item %2089 : !torch.vtensor<[],f32> -> !torch.float
%2092 = torch.aten.item %2090 : !torch.vtensor<[],si8> -> !torch.int
%2093 = torch.aten._make_per_tensor_quantized_tensor %2088, %2091, %2092 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%2094 = torch.aten.dequantize.self %2093 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%int1_545 = torch.constant.int 1
%2095 = torch.aten.add.Tensor %2094, %1969, %int1_545 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[1,1024,14,14],f32>, !torch.int -> !torch.vtensor<[1,1024,14,14],f32>
%2096 = torch.aten.relu %2095 : !torch.vtensor<[1,1024,14,14],f32> -> !torch.vtensor<[1,1024,14,14],f32>
%2097 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2098 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_546 = torch.constant.int 12
%2099 = torch.aten.item %2097 : !torch.vtensor<[],f32> -> !torch.float
%2100 = torch.aten.item %2098 : !torch.vtensor<[],si8> -> !torch.int
%2101 = torch.aten.quantize_per_tensor %2096, %2099, %2100, %int12_546 : !torch.vtensor<[1,1024,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%2102 = torch.aten.int_repr %2101 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],si8>
%2103 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2105 = torch.aten.item %2103 : !torch.vtensor<[],f32> -> !torch.float
%2106 = torch.aten.item %2104 : !torch.vtensor<[],si8> -> !torch.int
%2107 = torch.aten._make_per_tensor_quantized_tensor %2102, %2105, %2106 : !torch.vtensor<[1,1024,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1024,14,14],!torch.qint8>
%2108 = torch.aten.dequantize.self %2107 : !torch.vtensor<[1,1024,14,14],!torch.qint8> -> !torch.vtensor<[1,1024,14,14],f32>
%2109 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2110 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_547 = torch.constant.int 12
%2111 = torch.aten.item %2109 : !torch.vtensor<[],f32> -> !torch.float
%2112 = torch.aten.item %2110 : !torch.vtensor<[],si8> -> !torch.int
%2113 = torch.aten.quantize_per_tensor %86, %2111, %2112, %int12_547 : !torch.vtensor<[512,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,1024,1,1],!torch.qint8>
%2114 = torch.aten.int_repr %2113 : !torch.vtensor<[512,1024,1,1],!torch.qint8> -> !torch.vtensor<[512,1024,1,1],si8>
%2115 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2116 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2117 = torch.aten.item %2115 : !torch.vtensor<[],f32> -> !torch.float
%2118 = torch.aten.item %2116 : !torch.vtensor<[],si8> -> !torch.int
%2119 = torch.aten._make_per_tensor_quantized_tensor %2114, %2117, %2118 : !torch.vtensor<[512,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,1024,1,1],!torch.qint8>
%2120 = torch.aten.dequantize.self %2119 : !torch.vtensor<[512,1024,1,1],!torch.qint8> -> !torch.vtensor<[512,1024,1,1],f32>
%2121 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_548 = torch.constant.int 12
%2123 = torch.aten.item %2121 : !torch.vtensor<[],f32> -> !torch.float
%2124 = torch.aten.item %2122 : !torch.vtensor<[],si8> -> !torch.int
%2125 = torch.aten.quantize_per_tensor %87, %2123, %2124, %int12_548 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2126 = torch.aten.int_repr %2125 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2127 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2129 = torch.aten.item %2127 : !torch.vtensor<[],f32> -> !torch.float
%2130 = torch.aten.item %2128 : !torch.vtensor<[],si8> -> !torch.int
%2131 = torch.aten._make_per_tensor_quantized_tensor %2126, %2129, %2130 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2132 = torch.aten.dequantize.self %2131 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_549 = torch.constant.int 0
%int0_550 = torch.constant.int 0
%int1_551 = torch.constant.int 1
%int1_552 = torch.constant.int 1
%int1_553 = torch.constant.int 1
%int1_554 = torch.constant.int 1
%int0_555 = torch.constant.int 0
%2133 = torch.prim.ListConstruct %int0_549, %int0_550 : (!torch.int, !torch.int) -> !torch.list<int>
%2134 = torch.prim.ListConstruct %int1_551, %int1_552 : (!torch.int, !torch.int) -> !torch.list<int>
%2135 = torch.prim.ListConstruct %int1_553, %int1_554 : (!torch.int, !torch.int) -> !torch.list<int>
%2136 = torch.prim.ListConstruct %int0_555, %int0_555 : (!torch.int, !torch.int) -> !torch.list<int>
%false_556 = torch.constant.bool false
%int1_557 = torch.constant.int 1
%2137 = torch.aten.convolution %2108, %2120, %2132, %2135, %2133, %2134, %false_556, %2136, %int1_557 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[512,1024,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,14,14],f32>
%2138 = torch.aten.relu %2137 : !torch.vtensor<[1,512,14,14],f32> -> !torch.vtensor<[1,512,14,14],f32>
%2139 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2140 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_558 = torch.constant.int 12
%2141 = torch.aten.item %2139 : !torch.vtensor<[],f32> -> !torch.float
%2142 = torch.aten.item %2140 : !torch.vtensor<[],si8> -> !torch.int
%2143 = torch.aten.quantize_per_tensor %2138, %2141, %2142, %int12_558 : !torch.vtensor<[1,512,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,14,14],!torch.qint8>
%2144 = torch.aten.int_repr %2143 : !torch.vtensor<[1,512,14,14],!torch.qint8> -> !torch.vtensor<[1,512,14,14],si8>
%2145 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2147 = torch.aten.item %2145 : !torch.vtensor<[],f32> -> !torch.float
%2148 = torch.aten.item %2146 : !torch.vtensor<[],si8> -> !torch.int
%2149 = torch.aten._make_per_tensor_quantized_tensor %2144, %2147, %2148 : !torch.vtensor<[1,512,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,14,14],!torch.qint8>
%2150 = torch.aten.dequantize.self %2149 : !torch.vtensor<[1,512,14,14],!torch.qint8> -> !torch.vtensor<[1,512,14,14],f32>
%2151 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_559 = torch.constant.int 12
%2153 = torch.aten.item %2151 : !torch.vtensor<[],f32> -> !torch.float
%2154 = torch.aten.item %2152 : !torch.vtensor<[],si8> -> !torch.int
%2155 = torch.aten.quantize_per_tensor %88, %2153, %2154, %int12_559 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2156 = torch.aten.int_repr %2155 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2157 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2159 = torch.aten.item %2157 : !torch.vtensor<[],f32> -> !torch.float
%2160 = torch.aten.item %2158 : !torch.vtensor<[],si8> -> !torch.int
%2161 = torch.aten._make_per_tensor_quantized_tensor %2156, %2159, %2160 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2162 = torch.aten.dequantize.self %2161 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2163 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_560 = torch.constant.int 12
%2165 = torch.aten.item %2163 : !torch.vtensor<[],f32> -> !torch.float
%2166 = torch.aten.item %2164 : !torch.vtensor<[],si8> -> !torch.int
%2167 = torch.aten.quantize_per_tensor %89, %2165, %2166, %int12_560 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2168 = torch.aten.int_repr %2167 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2169 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2171 = torch.aten.item %2169 : !torch.vtensor<[],f32> -> !torch.float
%2172 = torch.aten.item %2170 : !torch.vtensor<[],si8> -> !torch.int
%2173 = torch.aten._make_per_tensor_quantized_tensor %2168, %2171, %2172 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2174 = torch.aten.dequantize.self %2173 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_561 = torch.constant.int 1
%int1_562 = torch.constant.int 1
%int1_563 = torch.constant.int 1
%int1_564 = torch.constant.int 1
%int2_565 = torch.constant.int 2
%int2_566 = torch.constant.int 2
%int0_567 = torch.constant.int 0
%2175 = torch.prim.ListConstruct %int1_561, %int1_562 : (!torch.int, !torch.int) -> !torch.list<int>
%2176 = torch.prim.ListConstruct %int1_563, %int1_564 : (!torch.int, !torch.int) -> !torch.list<int>
%2177 = torch.prim.ListConstruct %int2_565, %int2_566 : (!torch.int, !torch.int) -> !torch.list<int>
%2178 = torch.prim.ListConstruct %int0_567, %int0_567 : (!torch.int, !torch.int) -> !torch.list<int>
%false_568 = torch.constant.bool false
%int1_569 = torch.constant.int 1
%2179 = torch.aten.convolution %2150, %2162, %2174, %2177, %2175, %2176, %false_568, %2178, %int1_569 : !torch.vtensor<[1,512,14,14],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,7,7],f32>
%2180 = torch.aten.relu %2179 : !torch.vtensor<[1,512,7,7],f32> -> !torch.vtensor<[1,512,7,7],f32>
%2181 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2182 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_570 = torch.constant.int 12
%2183 = torch.aten.item %2181 : !torch.vtensor<[],f32> -> !torch.float
%2184 = torch.aten.item %2182 : !torch.vtensor<[],si8> -> !torch.int
%2185 = torch.aten.quantize_per_tensor %2180, %2183, %2184, %int12_570 : !torch.vtensor<[1,512,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2186 = torch.aten.int_repr %2185 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],si8>
%2187 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2188 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2189 = torch.aten.item %2187 : !torch.vtensor<[],f32> -> !torch.float
%2190 = torch.aten.item %2188 : !torch.vtensor<[],si8> -> !torch.int
%2191 = torch.aten._make_per_tensor_quantized_tensor %2186, %2189, %2190 : !torch.vtensor<[1,512,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2192 = torch.aten.dequantize.self %2191 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],f32>
%2193 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2194 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_571 = torch.constant.int 12
%2195 = torch.aten.item %2193 : !torch.vtensor<[],f32> -> !torch.float
%2196 = torch.aten.item %2194 : !torch.vtensor<[],si8> -> !torch.int
%2197 = torch.aten.quantize_per_tensor %90, %2195, %2196, %int12_571 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2198 = torch.aten.int_repr %2197 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2199 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2200 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2201 = torch.aten.item %2199 : !torch.vtensor<[],f32> -> !torch.float
%2202 = torch.aten.item %2200 : !torch.vtensor<[],si8> -> !torch.int
%2203 = torch.aten._make_per_tensor_quantized_tensor %2198, %2201, %2202 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2204 = torch.aten.dequantize.self %2203 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2205 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2206 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_572 = torch.constant.int 12
%2207 = torch.aten.item %2205 : !torch.vtensor<[],f32> -> !torch.float
%2208 = torch.aten.item %2206 : !torch.vtensor<[],si8> -> !torch.int
%2209 = torch.aten.quantize_per_tensor %91, %2207, %2208, %int12_572 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2210 = torch.aten.int_repr %2209 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2211 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2212 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2213 = torch.aten.item %2211 : !torch.vtensor<[],f32> -> !torch.float
%2214 = torch.aten.item %2212 : !torch.vtensor<[],si8> -> !torch.int
%2215 = torch.aten._make_per_tensor_quantized_tensor %2210, %2213, %2214 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2216 = torch.aten.dequantize.self %2215 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_573 = torch.constant.int 0
%int0_574 = torch.constant.int 0
%int1_575 = torch.constant.int 1
%int1_576 = torch.constant.int 1
%int1_577 = torch.constant.int 1
%int1_578 = torch.constant.int 1
%int0_579 = torch.constant.int 0
%2217 = torch.prim.ListConstruct %int0_573, %int0_574 : (!torch.int, !torch.int) -> !torch.list<int>
%2218 = torch.prim.ListConstruct %int1_575, %int1_576 : (!torch.int, !torch.int) -> !torch.list<int>
%2219 = torch.prim.ListConstruct %int1_577, %int1_578 : (!torch.int, !torch.int) -> !torch.list<int>
%2220 = torch.prim.ListConstruct %int0_579, %int0_579 : (!torch.int, !torch.int) -> !torch.list<int>
%false_580 = torch.constant.bool false
%int1_581 = torch.constant.int 1
%2221 = torch.aten.convolution %2192, %2204, %2216, %2219, %2217, %2218, %false_580, %2220, %int1_581 : !torch.vtensor<[1,512,7,7],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2222 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2223 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_582 = torch.constant.int 12
%2224 = torch.aten.item %2222 : !torch.vtensor<[],f32> -> !torch.float
%2225 = torch.aten.item %2223 : !torch.vtensor<[],si8> -> !torch.int
%2226 = torch.aten.quantize_per_tensor %2221, %2224, %2225, %int12_582 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2227 = torch.aten.int_repr %2226 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2228 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2229 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2230 = torch.aten.item %2228 : !torch.vtensor<[],f32> -> !torch.float
%2231 = torch.aten.item %2229 : !torch.vtensor<[],si8> -> !torch.int
%2232 = torch.aten._make_per_tensor_quantized_tensor %2227, %2230, %2231 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2233 = torch.aten.dequantize.self %2232 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%2234 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2235 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_583 = torch.constant.int 12
%2236 = torch.aten.item %2234 : !torch.vtensor<[],f32> -> !torch.float
%2237 = torch.aten.item %2235 : !torch.vtensor<[],si8> -> !torch.int
%2238 = torch.aten.quantize_per_tensor %92, %2236, %2237, %int12_583 : !torch.vtensor<[2048,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,1024,1,1],!torch.qint8>
%2239 = torch.aten.int_repr %2238 : !torch.vtensor<[2048,1024,1,1],!torch.qint8> -> !torch.vtensor<[2048,1024,1,1],si8>
%2240 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2241 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2242 = torch.aten.item %2240 : !torch.vtensor<[],f32> -> !torch.float
%2243 = torch.aten.item %2241 : !torch.vtensor<[],si8> -> !torch.int
%2244 = torch.aten._make_per_tensor_quantized_tensor %2239, %2242, %2243 : !torch.vtensor<[2048,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,1024,1,1],!torch.qint8>
%2245 = torch.aten.dequantize.self %2244 : !torch.vtensor<[2048,1024,1,1],!torch.qint8> -> !torch.vtensor<[2048,1024,1,1],f32>
%2246 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2247 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_584 = torch.constant.int 12
%2248 = torch.aten.item %2246 : !torch.vtensor<[],f32> -> !torch.float
%2249 = torch.aten.item %2247 : !torch.vtensor<[],si8> -> !torch.int
%2250 = torch.aten.quantize_per_tensor %93, %2248, %2249, %int12_584 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2251 = torch.aten.int_repr %2250 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2252 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2253 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2254 = torch.aten.item %2252 : !torch.vtensor<[],f32> -> !torch.float
%2255 = torch.aten.item %2253 : !torch.vtensor<[],si8> -> !torch.int
%2256 = torch.aten._make_per_tensor_quantized_tensor %2251, %2254, %2255 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2257 = torch.aten.dequantize.self %2256 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_585 = torch.constant.int 0
%int0_586 = torch.constant.int 0
%int1_587 = torch.constant.int 1
%int1_588 = torch.constant.int 1
%int2_589 = torch.constant.int 2
%int2_590 = torch.constant.int 2
%int0_591 = torch.constant.int 0
%2258 = torch.prim.ListConstruct %int0_585, %int0_586 : (!torch.int, !torch.int) -> !torch.list<int>
%2259 = torch.prim.ListConstruct %int1_587, %int1_588 : (!torch.int, !torch.int) -> !torch.list<int>
%2260 = torch.prim.ListConstruct %int2_589, %int2_590 : (!torch.int, !torch.int) -> !torch.list<int>
%2261 = torch.prim.ListConstruct %int0_591, %int0_591 : (!torch.int, !torch.int) -> !torch.list<int>
%false_592 = torch.constant.bool false
%int1_593 = torch.constant.int 1
%2262 = torch.aten.convolution %2108, %2245, %2257, %2260, %2258, %2259, %false_592, %2261, %int1_593 : !torch.vtensor<[1,1024,14,14],f32>, !torch.vtensor<[2048,1024,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2263 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2264 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_594 = torch.constant.int 12
%2265 = torch.aten.item %2263 : !torch.vtensor<[],f32> -> !torch.float
%2266 = torch.aten.item %2264 : !torch.vtensor<[],si8> -> !torch.int
%2267 = torch.aten.quantize_per_tensor %2262, %2265, %2266, %int12_594 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2268 = torch.aten.int_repr %2267 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2269 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2270 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2271 = torch.aten.item %2269 : !torch.vtensor<[],f32> -> !torch.float
%2272 = torch.aten.item %2270 : !torch.vtensor<[],si8> -> !torch.int
%2273 = torch.aten._make_per_tensor_quantized_tensor %2268, %2271, %2272 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2274 = torch.aten.dequantize.self %2273 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%int1_595 = torch.constant.int 1
%2275 = torch.aten.add.Tensor %2233, %2274, %int1_595 : !torch.vtensor<[1,2048,7,7],f32>, !torch.vtensor<[1,2048,7,7],f32>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2276 = torch.aten.relu %2275 : !torch.vtensor<[1,2048,7,7],f32> -> !torch.vtensor<[1,2048,7,7],f32>
%2277 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2278 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_596 = torch.constant.int 12
%2279 = torch.aten.item %2277 : !torch.vtensor<[],f32> -> !torch.float
%2280 = torch.aten.item %2278 : !torch.vtensor<[],si8> -> !torch.int
%2281 = torch.aten.quantize_per_tensor %2276, %2279, %2280, %int12_596 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2282 = torch.aten.int_repr %2281 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2283 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2284 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2285 = torch.aten.item %2283 : !torch.vtensor<[],f32> -> !torch.float
%2286 = torch.aten.item %2284 : !torch.vtensor<[],si8> -> !torch.int
%2287 = torch.aten._make_per_tensor_quantized_tensor %2282, %2285, %2286 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2288 = torch.aten.dequantize.self %2287 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%2289 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2290 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_597 = torch.constant.int 12
%2291 = torch.aten.item %2289 : !torch.vtensor<[],f32> -> !torch.float
%2292 = torch.aten.item %2290 : !torch.vtensor<[],si8> -> !torch.int
%2293 = torch.aten.quantize_per_tensor %94, %2291, %2292, %int12_597 : !torch.vtensor<[512,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2294 = torch.aten.int_repr %2293 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],si8>
%2295 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2296 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2297 = torch.aten.item %2295 : !torch.vtensor<[],f32> -> !torch.float
%2298 = torch.aten.item %2296 : !torch.vtensor<[],si8> -> !torch.int
%2299 = torch.aten._make_per_tensor_quantized_tensor %2294, %2297, %2298 : !torch.vtensor<[512,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2300 = torch.aten.dequantize.self %2299 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],f32>
%2301 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2302 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_598 = torch.constant.int 12
%2303 = torch.aten.item %2301 : !torch.vtensor<[],f32> -> !torch.float
%2304 = torch.aten.item %2302 : !torch.vtensor<[],si8> -> !torch.int
%2305 = torch.aten.quantize_per_tensor %95, %2303, %2304, %int12_598 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2306 = torch.aten.int_repr %2305 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2307 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2308 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2309 = torch.aten.item %2307 : !torch.vtensor<[],f32> -> !torch.float
%2310 = torch.aten.item %2308 : !torch.vtensor<[],si8> -> !torch.int
%2311 = torch.aten._make_per_tensor_quantized_tensor %2306, %2309, %2310 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2312 = torch.aten.dequantize.self %2311 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_599 = torch.constant.int 0
%int0_600 = torch.constant.int 0
%int1_601 = torch.constant.int 1
%int1_602 = torch.constant.int 1
%int1_603 = torch.constant.int 1
%int1_604 = torch.constant.int 1
%int0_605 = torch.constant.int 0
%2313 = torch.prim.ListConstruct %int0_599, %int0_600 : (!torch.int, !torch.int) -> !torch.list<int>
%2314 = torch.prim.ListConstruct %int1_601, %int1_602 : (!torch.int, !torch.int) -> !torch.list<int>
%2315 = torch.prim.ListConstruct %int1_603, %int1_604 : (!torch.int, !torch.int) -> !torch.list<int>
%2316 = torch.prim.ListConstruct %int0_605, %int0_605 : (!torch.int, !torch.int) -> !torch.list<int>
%false_606 = torch.constant.bool false
%int1_607 = torch.constant.int 1
%2317 = torch.aten.convolution %2288, %2300, %2312, %2315, %2313, %2314, %false_606, %2316, %int1_607 : !torch.vtensor<[1,2048,7,7],f32>, !torch.vtensor<[512,2048,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,7,7],f32>
%2318 = torch.aten.relu %2317 : !torch.vtensor<[1,512,7,7],f32> -> !torch.vtensor<[1,512,7,7],f32>
%2319 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2320 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_608 = torch.constant.int 12
%2321 = torch.aten.item %2319 : !torch.vtensor<[],f32> -> !torch.float
%2322 = torch.aten.item %2320 : !torch.vtensor<[],si8> -> !torch.int
%2323 = torch.aten.quantize_per_tensor %2318, %2321, %2322, %int12_608 : !torch.vtensor<[1,512,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2324 = torch.aten.int_repr %2323 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],si8>
%2325 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2326 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2327 = torch.aten.item %2325 : !torch.vtensor<[],f32> -> !torch.float
%2328 = torch.aten.item %2326 : !torch.vtensor<[],si8> -> !torch.int
%2329 = torch.aten._make_per_tensor_quantized_tensor %2324, %2327, %2328 : !torch.vtensor<[1,512,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2330 = torch.aten.dequantize.self %2329 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],f32>
%2331 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2332 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_609 = torch.constant.int 12
%2333 = torch.aten.item %2331 : !torch.vtensor<[],f32> -> !torch.float
%2334 = torch.aten.item %2332 : !torch.vtensor<[],si8> -> !torch.int
%2335 = torch.aten.quantize_per_tensor %96, %2333, %2334, %int12_609 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2336 = torch.aten.int_repr %2335 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2337 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2338 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2339 = torch.aten.item %2337 : !torch.vtensor<[],f32> -> !torch.float
%2340 = torch.aten.item %2338 : !torch.vtensor<[],si8> -> !torch.int
%2341 = torch.aten._make_per_tensor_quantized_tensor %2336, %2339, %2340 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2342 = torch.aten.dequantize.self %2341 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2343 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2344 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_610 = torch.constant.int 12
%2345 = torch.aten.item %2343 : !torch.vtensor<[],f32> -> !torch.float
%2346 = torch.aten.item %2344 : !torch.vtensor<[],si8> -> !torch.int
%2347 = torch.aten.quantize_per_tensor %97, %2345, %2346, %int12_610 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2348 = torch.aten.int_repr %2347 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2349 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2350 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2351 = torch.aten.item %2349 : !torch.vtensor<[],f32> -> !torch.float
%2352 = torch.aten.item %2350 : !torch.vtensor<[],si8> -> !torch.int
%2353 = torch.aten._make_per_tensor_quantized_tensor %2348, %2351, %2352 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2354 = torch.aten.dequantize.self %2353 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_611 = torch.constant.int 1
%int1_612 = torch.constant.int 1
%int1_613 = torch.constant.int 1
%int1_614 = torch.constant.int 1
%int1_615 = torch.constant.int 1
%int1_616 = torch.constant.int 1
%int0_617 = torch.constant.int 0
%2355 = torch.prim.ListConstruct %int1_611, %int1_612 : (!torch.int, !torch.int) -> !torch.list<int>
%2356 = torch.prim.ListConstruct %int1_613, %int1_614 : (!torch.int, !torch.int) -> !torch.list<int>
%2357 = torch.prim.ListConstruct %int1_615, %int1_616 : (!torch.int, !torch.int) -> !torch.list<int>
%2358 = torch.prim.ListConstruct %int0_617, %int0_617 : (!torch.int, !torch.int) -> !torch.list<int>
%false_618 = torch.constant.bool false
%int1_619 = torch.constant.int 1
%2359 = torch.aten.convolution %2330, %2342, %2354, %2357, %2355, %2356, %false_618, %2358, %int1_619 : !torch.vtensor<[1,512,7,7],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,7,7],f32>
%2360 = torch.aten.relu %2359 : !torch.vtensor<[1,512,7,7],f32> -> !torch.vtensor<[1,512,7,7],f32>
%2361 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2362 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_620 = torch.constant.int 12
%2363 = torch.aten.item %2361 : !torch.vtensor<[],f32> -> !torch.float
%2364 = torch.aten.item %2362 : !torch.vtensor<[],si8> -> !torch.int
%2365 = torch.aten.quantize_per_tensor %2360, %2363, %2364, %int12_620 : !torch.vtensor<[1,512,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2366 = torch.aten.int_repr %2365 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],si8>
%2367 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2368 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2369 = torch.aten.item %2367 : !torch.vtensor<[],f32> -> !torch.float
%2370 = torch.aten.item %2368 : !torch.vtensor<[],si8> -> !torch.int
%2371 = torch.aten._make_per_tensor_quantized_tensor %2366, %2369, %2370 : !torch.vtensor<[1,512,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2372 = torch.aten.dequantize.self %2371 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],f32>
%2373 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2374 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_621 = torch.constant.int 12
%2375 = torch.aten.item %2373 : !torch.vtensor<[],f32> -> !torch.float
%2376 = torch.aten.item %2374 : !torch.vtensor<[],si8> -> !torch.int
%2377 = torch.aten.quantize_per_tensor %98, %2375, %2376, %int12_621 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2378 = torch.aten.int_repr %2377 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2379 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2380 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2381 = torch.aten.item %2379 : !torch.vtensor<[],f32> -> !torch.float
%2382 = torch.aten.item %2380 : !torch.vtensor<[],si8> -> !torch.int
%2383 = torch.aten._make_per_tensor_quantized_tensor %2378, %2381, %2382 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2384 = torch.aten.dequantize.self %2383 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2385 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2386 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_622 = torch.constant.int 12
%2387 = torch.aten.item %2385 : !torch.vtensor<[],f32> -> !torch.float
%2388 = torch.aten.item %2386 : !torch.vtensor<[],si8> -> !torch.int
%2389 = torch.aten.quantize_per_tensor %99, %2387, %2388, %int12_622 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2390 = torch.aten.int_repr %2389 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2391 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2392 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2393 = torch.aten.item %2391 : !torch.vtensor<[],f32> -> !torch.float
%2394 = torch.aten.item %2392 : !torch.vtensor<[],si8> -> !torch.int
%2395 = torch.aten._make_per_tensor_quantized_tensor %2390, %2393, %2394 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2396 = torch.aten.dequantize.self %2395 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_623 = torch.constant.int 0
%int0_624 = torch.constant.int 0
%int1_625 = torch.constant.int 1
%int1_626 = torch.constant.int 1
%int1_627 = torch.constant.int 1
%int1_628 = torch.constant.int 1
%int0_629 = torch.constant.int 0
%2397 = torch.prim.ListConstruct %int0_623, %int0_624 : (!torch.int, !torch.int) -> !torch.list<int>
%2398 = torch.prim.ListConstruct %int1_625, %int1_626 : (!torch.int, !torch.int) -> !torch.list<int>
%2399 = torch.prim.ListConstruct %int1_627, %int1_628 : (!torch.int, !torch.int) -> !torch.list<int>
%2400 = torch.prim.ListConstruct %int0_629, %int0_629 : (!torch.int, !torch.int) -> !torch.list<int>
%false_630 = torch.constant.bool false
%int1_631 = torch.constant.int 1
%2401 = torch.aten.convolution %2372, %2384, %2396, %2399, %2397, %2398, %false_630, %2400, %int1_631 : !torch.vtensor<[1,512,7,7],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2402 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2403 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_632 = torch.constant.int 12
%2404 = torch.aten.item %2402 : !torch.vtensor<[],f32> -> !torch.float
%2405 = torch.aten.item %2403 : !torch.vtensor<[],si8> -> !torch.int
%2406 = torch.aten.quantize_per_tensor %2401, %2404, %2405, %int12_632 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2407 = torch.aten.int_repr %2406 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2408 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2409 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2410 = torch.aten.item %2408 : !torch.vtensor<[],f32> -> !torch.float
%2411 = torch.aten.item %2409 : !torch.vtensor<[],si8> -> !torch.int
%2412 = torch.aten._make_per_tensor_quantized_tensor %2407, %2410, %2411 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2413 = torch.aten.dequantize.self %2412 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%int1_633 = torch.constant.int 1
%2414 = torch.aten.add.Tensor %2413, %2288, %int1_633 : !torch.vtensor<[1,2048,7,7],f32>, !torch.vtensor<[1,2048,7,7],f32>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2415 = torch.aten.relu %2414 : !torch.vtensor<[1,2048,7,7],f32> -> !torch.vtensor<[1,2048,7,7],f32>
%2416 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_634 = torch.constant.int 12
%2418 = torch.aten.item %2416 : !torch.vtensor<[],f32> -> !torch.float
%2419 = torch.aten.item %2417 : !torch.vtensor<[],si8> -> !torch.int
%2420 = torch.aten.quantize_per_tensor %2415, %2418, %2419, %int12_634 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2421 = torch.aten.int_repr %2420 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2422 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2424 = torch.aten.item %2422 : !torch.vtensor<[],f32> -> !torch.float
%2425 = torch.aten.item %2423 : !torch.vtensor<[],si8> -> !torch.int
%2426 = torch.aten._make_per_tensor_quantized_tensor %2421, %2424, %2425 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2427 = torch.aten.dequantize.self %2426 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%2428 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_635 = torch.constant.int 12
%2430 = torch.aten.item %2428 : !torch.vtensor<[],f32> -> !torch.float
%2431 = torch.aten.item %2429 : !torch.vtensor<[],si8> -> !torch.int
%2432 = torch.aten.quantize_per_tensor %100, %2430, %2431, %int12_635 : !torch.vtensor<[512,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2433 = torch.aten.int_repr %2432 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],si8>
%2434 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2436 = torch.aten.item %2434 : !torch.vtensor<[],f32> -> !torch.float
%2437 = torch.aten.item %2435 : !torch.vtensor<[],si8> -> !torch.int
%2438 = torch.aten._make_per_tensor_quantized_tensor %2433, %2436, %2437 : !torch.vtensor<[512,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,2048,1,1],!torch.qint8>
%2439 = torch.aten.dequantize.self %2438 : !torch.vtensor<[512,2048,1,1],!torch.qint8> -> !torch.vtensor<[512,2048,1,1],f32>
%2440 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_636 = torch.constant.int 12
%2442 = torch.aten.item %2440 : !torch.vtensor<[],f32> -> !torch.float
%2443 = torch.aten.item %2441 : !torch.vtensor<[],si8> -> !torch.int
%2444 = torch.aten.quantize_per_tensor %101, %2442, %2443, %int12_636 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2445 = torch.aten.int_repr %2444 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2446 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2448 = torch.aten.item %2446 : !torch.vtensor<[],f32> -> !torch.float
%2449 = torch.aten.item %2447 : !torch.vtensor<[],si8> -> !torch.int
%2450 = torch.aten._make_per_tensor_quantized_tensor %2445, %2448, %2449 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2451 = torch.aten.dequantize.self %2450 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int0_637 = torch.constant.int 0
%int0_638 = torch.constant.int 0
%int1_639 = torch.constant.int 1
%int1_640 = torch.constant.int 1
%int1_641 = torch.constant.int 1
%int1_642 = torch.constant.int 1
%int0_643 = torch.constant.int 0
%2452 = torch.prim.ListConstruct %int0_637, %int0_638 : (!torch.int, !torch.int) -> !torch.list<int>
%2453 = torch.prim.ListConstruct %int1_639, %int1_640 : (!torch.int, !torch.int) -> !torch.list<int>
%2454 = torch.prim.ListConstruct %int1_641, %int1_642 : (!torch.int, !torch.int) -> !torch.list<int>
%2455 = torch.prim.ListConstruct %int0_643, %int0_643 : (!torch.int, !torch.int) -> !torch.list<int>
%false_644 = torch.constant.bool false
%int1_645 = torch.constant.int 1
%2456 = torch.aten.convolution %2427, %2439, %2451, %2454, %2452, %2453, %false_644, %2455, %int1_645 : !torch.vtensor<[1,2048,7,7],f32>, !torch.vtensor<[512,2048,1,1],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,7,7],f32>
%2457 = torch.aten.relu %2456 : !torch.vtensor<[1,512,7,7],f32> -> !torch.vtensor<[1,512,7,7],f32>
%2458 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_646 = torch.constant.int 12
%2460 = torch.aten.item %2458 : !torch.vtensor<[],f32> -> !torch.float
%2461 = torch.aten.item %2459 : !torch.vtensor<[],si8> -> !torch.int
%2462 = torch.aten.quantize_per_tensor %2457, %2460, %2461, %int12_646 : !torch.vtensor<[1,512,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2463 = torch.aten.int_repr %2462 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],si8>
%2464 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2465 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2466 = torch.aten.item %2464 : !torch.vtensor<[],f32> -> !torch.float
%2467 = torch.aten.item %2465 : !torch.vtensor<[],si8> -> !torch.int
%2468 = torch.aten._make_per_tensor_quantized_tensor %2463, %2466, %2467 : !torch.vtensor<[1,512,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2469 = torch.aten.dequantize.self %2468 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],f32>
%2470 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_647 = torch.constant.int 12
%2472 = torch.aten.item %2470 : !torch.vtensor<[],f32> -> !torch.float
%2473 = torch.aten.item %2471 : !torch.vtensor<[],si8> -> !torch.int
%2474 = torch.aten.quantize_per_tensor %102, %2472, %2473, %int12_647 : !torch.vtensor<[512,512,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2475 = torch.aten.int_repr %2474 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],si8>
%2476 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2478 = torch.aten.item %2476 : !torch.vtensor<[],f32> -> !torch.float
%2479 = torch.aten.item %2477 : !torch.vtensor<[],si8> -> !torch.int
%2480 = torch.aten._make_per_tensor_quantized_tensor %2475, %2478, %2479 : !torch.vtensor<[512,512,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,512,3,3],!torch.qint8>
%2481 = torch.aten.dequantize.self %2480 : !torch.vtensor<[512,512,3,3],!torch.qint8> -> !torch.vtensor<[512,512,3,3],f32>
%2482 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2483 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_648 = torch.constant.int 12
%2484 = torch.aten.item %2482 : !torch.vtensor<[],f32> -> !torch.float
%2485 = torch.aten.item %2483 : !torch.vtensor<[],si8> -> !torch.int
%2486 = torch.aten.quantize_per_tensor %103, %2484, %2485, %int12_648 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2487 = torch.aten.int_repr %2486 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8>
%2488 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2489 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2490 = torch.aten.item %2488 : !torch.vtensor<[],f32> -> !torch.float
%2491 = torch.aten.item %2489 : !torch.vtensor<[],si8> -> !torch.int
%2492 = torch.aten._make_per_tensor_quantized_tensor %2487, %2490, %2491 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8>
%2493 = torch.aten.dequantize.self %2492 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32>
%int1_649 = torch.constant.int 1
%int1_650 = torch.constant.int 1
%int1_651 = torch.constant.int 1
%int1_652 = torch.constant.int 1
%int1_653 = torch.constant.int 1
%int1_654 = torch.constant.int 1
%int0_655 = torch.constant.int 0
%2494 = torch.prim.ListConstruct %int1_649, %int1_650 : (!torch.int, !torch.int) -> !torch.list<int>
%2495 = torch.prim.ListConstruct %int1_651, %int1_652 : (!torch.int, !torch.int) -> !torch.list<int>
%2496 = torch.prim.ListConstruct %int1_653, %int1_654 : (!torch.int, !torch.int) -> !torch.list<int>
%2497 = torch.prim.ListConstruct %int0_655, %int0_655 : (!torch.int, !torch.int) -> !torch.list<int>
%false_656 = torch.constant.bool false
%int1_657 = torch.constant.int 1
%2498 = torch.aten.convolution %2469, %2481, %2493, %2496, %2494, %2495, %false_656, %2497, %int1_657 : !torch.vtensor<[1,512,7,7],f32>, !torch.vtensor<[512,512,3,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,512,7,7],f32>
%2499 = torch.aten.relu %2498 : !torch.vtensor<[1,512,7,7],f32> -> !torch.vtensor<[1,512,7,7],f32>
%2500 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2501 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_658 = torch.constant.int 12
%2502 = torch.aten.item %2500 : !torch.vtensor<[],f32> -> !torch.float
%2503 = torch.aten.item %2501 : !torch.vtensor<[],si8> -> !torch.int
%2504 = torch.aten.quantize_per_tensor %2499, %2502, %2503, %int12_658 : !torch.vtensor<[1,512,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2505 = torch.aten.int_repr %2504 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],si8>
%2506 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2507 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2508 = torch.aten.item %2506 : !torch.vtensor<[],f32> -> !torch.float
%2509 = torch.aten.item %2507 : !torch.vtensor<[],si8> -> !torch.int
%2510 = torch.aten._make_per_tensor_quantized_tensor %2505, %2508, %2509 : !torch.vtensor<[1,512,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,512,7,7],!torch.qint8>
%2511 = torch.aten.dequantize.self %2510 : !torch.vtensor<[1,512,7,7],!torch.qint8> -> !torch.vtensor<[1,512,7,7],f32>
%2512 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_659 = torch.constant.int 12
%2514 = torch.aten.item %2512 : !torch.vtensor<[],f32> -> !torch.float
%2515 = torch.aten.item %2513 : !torch.vtensor<[],si8> -> !torch.int
%2516 = torch.aten.quantize_per_tensor %104, %2514, %2515, %int12_659 : !torch.vtensor<[2048,512,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2517 = torch.aten.int_repr %2516 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],si8>
%2518 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2520 = torch.aten.item %2518 : !torch.vtensor<[],f32> -> !torch.float
%2521 = torch.aten.item %2519 : !torch.vtensor<[],si8> -> !torch.int
%2522 = torch.aten._make_per_tensor_quantized_tensor %2517, %2520, %2521 : !torch.vtensor<[2048,512,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048,512,1,1],!torch.qint8>
%2523 = torch.aten.dequantize.self %2522 : !torch.vtensor<[2048,512,1,1],!torch.qint8> -> !torch.vtensor<[2048,512,1,1],f32>
%2524 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2525 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_660 = torch.constant.int 12
%2526 = torch.aten.item %2524 : !torch.vtensor<[],f32> -> !torch.float
%2527 = torch.aten.item %2525 : !torch.vtensor<[],si8> -> !torch.int
%2528 = torch.aten.quantize_per_tensor %105, %2526, %2527, %int12_660 : !torch.vtensor<[2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2529 = torch.aten.int_repr %2528 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],si8>
%2530 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2531 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2532 = torch.aten.item %2530 : !torch.vtensor<[],f32> -> !torch.float
%2533 = torch.aten.item %2531 : !torch.vtensor<[],si8> -> !torch.int
%2534 = torch.aten._make_per_tensor_quantized_tensor %2529, %2532, %2533 : !torch.vtensor<[2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[2048],!torch.qint8>
%2535 = torch.aten.dequantize.self %2534 : !torch.vtensor<[2048],!torch.qint8> -> !torch.vtensor<[2048],f32>
%int0_661 = torch.constant.int 0
%int0_662 = torch.constant.int 0
%int1_663 = torch.constant.int 1
%int1_664 = torch.constant.int 1
%int1_665 = torch.constant.int 1
%int1_666 = torch.constant.int 1
%int0_667 = torch.constant.int 0
%2536 = torch.prim.ListConstruct %int0_661, %int0_662 : (!torch.int, !torch.int) -> !torch.list<int>
%2537 = torch.prim.ListConstruct %int1_663, %int1_664 : (!torch.int, !torch.int) -> !torch.list<int>
%2538 = torch.prim.ListConstruct %int1_665, %int1_666 : (!torch.int, !torch.int) -> !torch.list<int>
%2539 = torch.prim.ListConstruct %int0_667, %int0_667 : (!torch.int, !torch.int) -> !torch.list<int>
%false_668 = torch.constant.bool false
%int1_669 = torch.constant.int 1
%2540 = torch.aten.convolution %2511, %2523, %2535, %2538, %2536, %2537, %false_668, %2539, %int1_669 : !torch.vtensor<[1,512,7,7],f32>, !torch.vtensor<[2048,512,1,1],f32>, !torch.vtensor<[2048],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2541 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2542 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_670 = torch.constant.int 12
%2543 = torch.aten.item %2541 : !torch.vtensor<[],f32> -> !torch.float
%2544 = torch.aten.item %2542 : !torch.vtensor<[],si8> -> !torch.int
%2545 = torch.aten.quantize_per_tensor %2540, %2543, %2544, %int12_670 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2546 = torch.aten.int_repr %2545 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2547 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2548 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2549 = torch.aten.item %2547 : !torch.vtensor<[],f32> -> !torch.float
%2550 = torch.aten.item %2548 : !torch.vtensor<[],si8> -> !torch.int
%2551 = torch.aten._make_per_tensor_quantized_tensor %2546, %2549, %2550 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2552 = torch.aten.dequantize.self %2551 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%int1_671 = torch.constant.int 1
%2553 = torch.aten.add.Tensor %2552, %2427, %int1_671 : !torch.vtensor<[1,2048,7,7],f32>, !torch.vtensor<[1,2048,7,7],f32>, !torch.int -> !torch.vtensor<[1,2048,7,7],f32>
%2554 = torch.aten.relu %2553 : !torch.vtensor<[1,2048,7,7],f32> -> !torch.vtensor<[1,2048,7,7],f32>
%2555 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2556 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_672 = torch.constant.int 12
%2557 = torch.aten.item %2555 : !torch.vtensor<[],f32> -> !torch.float
%2558 = torch.aten.item %2556 : !torch.vtensor<[],si8> -> !torch.int
%2559 = torch.aten.quantize_per_tensor %2554, %2557, %2558, %int12_672 : !torch.vtensor<[1,2048,7,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2560 = torch.aten.int_repr %2559 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],si8>
%2561 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2562 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2563 = torch.aten.item %2561 : !torch.vtensor<[],f32> -> !torch.float
%2564 = torch.aten.item %2562 : !torch.vtensor<[],si8> -> !torch.int
%2565 = torch.aten._make_per_tensor_quantized_tensor %2560, %2563, %2564 : !torch.vtensor<[1,2048,7,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,7,7],!torch.qint8>
%2566 = torch.aten.dequantize.self %2565 : !torch.vtensor<[1,2048,7,7],!torch.qint8> -> !torch.vtensor<[1,2048,7,7],f32>
%int0_673 = torch.constant.int 0
%int1_674 = torch.constant.int 1
%int7 = torch.constant.int 7
%int7_675 = torch.constant.int 7
%2567 = torch.prim.ListConstruct %int7, %int7_675 : (!torch.int, !torch.int) -> !torch.list<int>
%2568 = torch.prim.ListConstruct %int0_673, %int0_673 : (!torch.int, !torch.int) -> !torch.list<int>
%2569 = torch.prim.ListConstruct %int1_674, %int1_674 : (!torch.int, !torch.int) -> !torch.list<int>
%false_676 = torch.constant.bool false
%none_677 = torch.constant.none
%2570 = torch.aten.avg_pool2d %2566, %2567, %2569, %2568, %false_676, %false_676, %none_677 : !torch.vtensor<[1,2048,7,7],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,2048,1,1],f32>
%2571 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%2572 = torch.aten.mul.Tensor %2570, %2571 : !torch.vtensor<[1,2048,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,2048,1,1],f32>
%2573 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2574 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_678 = torch.constant.int 12
%2575 = torch.aten.item %2573 : !torch.vtensor<[],f32> -> !torch.float
%2576 = torch.aten.item %2574 : !torch.vtensor<[],si8> -> !torch.int
%2577 = torch.aten.quantize_per_tensor %2572, %2575, %2576, %int12_678 : !torch.vtensor<[1,2048,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,2048,1,1],!torch.qint8>
%2578 = torch.aten.int_repr %2577 : !torch.vtensor<[1,2048,1,1],!torch.qint8> -> !torch.vtensor<[1,2048,1,1],si8>
%2579 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2580 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2581 = torch.aten.item %2579 : !torch.vtensor<[],f32> -> !torch.float
%2582 = torch.aten.item %2580 : !torch.vtensor<[],si8> -> !torch.int
%2583 = torch.aten._make_per_tensor_quantized_tensor %2578, %2581, %2582 : !torch.vtensor<[1,2048,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,2048,1,1],!torch.qint8>
%2584 = torch.aten.dequantize.self %2583 : !torch.vtensor<[1,2048,1,1],!torch.qint8> -> !torch.vtensor<[1,2048,1,1],f32>
%int1_679 = torch.constant.int 1
%int3_680 = torch.constant.int 3
%2585 = torch.prims.collapse %2584, %int1_679, %int3_680 : !torch.vtensor<[1,2048,1,1],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,2048],f32>
%int0_681 = torch.constant.int 0
%int0_682 = torch.constant.int 0
%2586 = torch.prims.collapse %2585, %int0_681, %int0_682 : !torch.vtensor<[1,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,2048],f32>
%2587 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2588 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_683 = torch.constant.int 12
%2589 = torch.aten.item %2587 : !torch.vtensor<[],f32> -> !torch.float
%2590 = torch.aten.item %2588 : !torch.vtensor<[],si8> -> !torch.int
%2591 = torch.aten.quantize_per_tensor %106, %2589, %2590, %int12_683 : !torch.vtensor<[1000,2048],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1000,2048],!torch.qint8>
%2592 = torch.aten.int_repr %2591 : !torch.vtensor<[1000,2048],!torch.qint8> -> !torch.vtensor<[1000,2048],si8>
%2593 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2594 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2595 = torch.aten.item %2593 : !torch.vtensor<[],f32> -> !torch.float
%2596 = torch.aten.item %2594 : !torch.vtensor<[],si8> -> !torch.int
%2597 = torch.aten._make_per_tensor_quantized_tensor %2592, %2595, %2596 : !torch.vtensor<[1000,2048],si8>, !torch.float, !torch.int -> !torch.vtensor<[1000,2048],!torch.qint8>
%2598 = torch.aten.dequantize.self %2597 : !torch.vtensor<[1000,2048],!torch.qint8> -> !torch.vtensor<[1000,2048],f32>
%2599 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2600 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_684 = torch.constant.int 12
%2601 = torch.aten.item %2599 : !torch.vtensor<[],f32> -> !torch.float
%2602 = torch.aten.item %2600 : !torch.vtensor<[],si8> -> !torch.int
%2603 = torch.aten.quantize_per_tensor %107, %2601, %2602, %int12_684 : !torch.vtensor<[1000],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1000],!torch.qint8>
%2604 = torch.aten.int_repr %2603 : !torch.vtensor<[1000],!torch.qint8> -> !torch.vtensor<[1000],si8>
%2605 = torch.vtensor.literal(dense<4.8828125E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%2606 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2607 = torch.aten.item %2605 : !torch.vtensor<[],f32> -> !torch.float
%2608 = torch.aten.item %2606 : !torch.vtensor<[],si8> -> !torch.int
%2609 = torch.aten._make_per_tensor_quantized_tensor %2604, %2607, %2608 : !torch.vtensor<[1000],si8>, !torch.float, !torch.int -> !torch.vtensor<[1000],!torch.qint8>
%2610 = torch.aten.dequantize.self %2609 : !torch.vtensor<[1000],!torch.qint8> -> !torch.vtensor<[1000],f32>
%int0_685 = torch.constant.int 0
%int1_686 = torch.constant.int 1
%2611 = torch.aten.transpose.int %2598, %int0_685, %int1_686 : !torch.vtensor<[1000,2048],f32>, !torch.int, !torch.int -> !torch.vtensor<[2048,1000],f32>
%2612 = torch.aten.mm %2586, %2611 : !torch.vtensor<[1,2048],f32>, !torch.vtensor<[2048,1000],f32> -> !torch.vtensor<[1,1000],f32>
%2613 = torch.aten.add.Tensor %2612, %2610, %int1_686 : !torch.vtensor<[1,1000],f32>, !torch.vtensor<[1000],f32>, !torch.int -> !torch.vtensor<[1,1000],f32>
%2614 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2615 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_687 = torch.constant.int 12
%2616 = torch.aten.item %2614 : !torch.vtensor<[],f32> -> !torch.float
%2617 = torch.aten.item %2615 : !torch.vtensor<[],si8> -> !torch.int
%2618 = torch.aten.quantize_per_tensor %2613, %2616, %2617, %int12_687 : !torch.vtensor<[1,1000],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,1000],!torch.qint8>
%2619 = torch.aten.int_repr %2618 : !torch.vtensor<[1,1000],!torch.qint8> -> !torch.vtensor<[1,1000],si8>
%2620 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2621 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2622 = torch.aten.item %2620 : !torch.vtensor<[],f32> -> !torch.float
%2623 = torch.aten.item %2621 : !torch.vtensor<[],si8> -> !torch.int
%2624 = torch.aten._make_per_tensor_quantized_tensor %2619, %2622, %2623 : !torch.vtensor<[1,1000],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,1000],!torch.qint8>
%2625 = torch.aten.dequantize.self %2624 : !torch.vtensor<[1,1000],!torch.qint8> -> !torch.vtensor<[1,1000],f32>
return %2625 : !torch.vtensor<[1,1000],f32>
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment