Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save AmosLewis/eb8849f517b442b92d241ca3bca7baf2 to your computer and use it in GitHub Desktop.
Save AmosLewis/eb8849f517b442b92d241ca3bca7baf2 to your computer and use it in GitHub Desktop.
module {
func.func @torch_jit(%arg0: !torch.vtensor<[1,3,224,224],f32>) -> !torch.vtensor<[1,21,224,224],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.13.1"} {
%0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x3x3x3xf32>) : !torch.vtensor<[16,3,3,3],f32>
%1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x1x3x3xf32>) : !torch.vtensor<[16,1,3,3],f32>
%3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16x16x1x1xf32>) : !torch.vtensor<[16,16,1,1],f32>
%5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<16xf32>) : !torch.vtensor<[16],f32>
%6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x16x1x1xf32>) : !torch.vtensor<[64,16,1,1],f32>
%7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x1x3x3xf32>) : !torch.vtensor<[64,1,3,3],f32>
%9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32>
%10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24x64x1x1xf32>) : !torch.vtensor<[24,64,1,1],f32>
%11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24xf32>) : !torch.vtensor<[24],f32>
%12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72x24x1x1xf32>) : !torch.vtensor<[72,24,1,1],f32>
%13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72xf32>) : !torch.vtensor<[72],f32>
%14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72x1x3x3xf32>) : !torch.vtensor<[72,1,3,3],f32>
%15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72xf32>) : !torch.vtensor<[72],f32>
%16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24x72x1x1xf32>) : !torch.vtensor<[24,72,1,1],f32>
%17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24xf32>) : !torch.vtensor<[24],f32>
%18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72x24x1x1xf32>) : !torch.vtensor<[72,24,1,1],f32>
%19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72xf32>) : !torch.vtensor<[72],f32>
%20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72x1x5x5xf32>) : !torch.vtensor<[72,1,5,5],f32>
%21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72xf32>) : !torch.vtensor<[72],f32>
%22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24x72x1x1xf32>) : !torch.vtensor<[24,72,1,1],f32>
%23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<24xf32>) : !torch.vtensor<[24],f32>
%24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72x24x1x1xf32>) : !torch.vtensor<[72,24,1,1],f32>
%25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<72xf32>) : !torch.vtensor<[72],f32>
%26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40x72x1x1xf32>) : !torch.vtensor<[40,72,1,1],f32>
%27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40xf32>) : !torch.vtensor<[40],f32>
%28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x40x1x1xf32>) : !torch.vtensor<[120,40,1,1],f32>
%29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x1x5x5xf32>) : !torch.vtensor<[120,1,5,5],f32>
%31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x120x1x1xf32>) : !torch.vtensor<[32,120,1,1],f32>
%33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x32x1x1xf32>) : !torch.vtensor<[120,32,1,1],f32>
%35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40x120x1x1xf32>) : !torch.vtensor<[40,120,1,1],f32>
%37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40xf32>) : !torch.vtensor<[40],f32>
%38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x40x1x1xf32>) : !torch.vtensor<[120,40,1,1],f32>
%39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x1x5x5xf32>) : !torch.vtensor<[120,1,5,5],f32>
%41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x120x1x1xf32>) : !torch.vtensor<[32,120,1,1],f32>
%43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32>
%44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x32x1x1xf32>) : !torch.vtensor<[120,32,1,1],f32>
%45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40x120x1x1xf32>) : !torch.vtensor<[40,120,1,1],f32>
%47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<40xf32>) : !torch.vtensor<[40],f32>
%48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240x40x1x1xf32>) : !torch.vtensor<[240,40,1,1],f32>
%49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240xf32>) : !torch.vtensor<[240],f32>
%50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240x1x3x3xf32>) : !torch.vtensor<[240,1,3,3],f32>
%51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240xf32>) : !torch.vtensor<[240],f32>
%52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80x240x1x1xf32>) : !torch.vtensor<[80,240,1,1],f32>
%53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80xf32>) : !torch.vtensor<[80],f32>
%54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<200x80x1x1xf32>) : !torch.vtensor<[200,80,1,1],f32>
%55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<200xf32>) : !torch.vtensor<[200],f32>
%56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<200x1x3x3xf32>) : !torch.vtensor<[200,1,3,3],f32>
%57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<200xf32>) : !torch.vtensor<[200],f32>
%58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80x200x1x1xf32>) : !torch.vtensor<[80,200,1,1],f32>
%59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80xf32>) : !torch.vtensor<[80],f32>
%60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184x80x1x1xf32>) : !torch.vtensor<[184,80,1,1],f32>
%61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184xf32>) : !torch.vtensor<[184],f32>
%62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184x1x3x3xf32>) : !torch.vtensor<[184,1,3,3],f32>
%63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184xf32>) : !torch.vtensor<[184],f32>
%64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80x184x1x1xf32>) : !torch.vtensor<[80,184,1,1],f32>
%65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80xf32>) : !torch.vtensor<[80],f32>
%66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184x80x1x1xf32>) : !torch.vtensor<[184,80,1,1],f32>
%67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184xf32>) : !torch.vtensor<[184],f32>
%68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184x1x3x3xf32>) : !torch.vtensor<[184,1,3,3],f32>
%69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<184xf32>) : !torch.vtensor<[184],f32>
%70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80x184x1x1xf32>) : !torch.vtensor<[80,184,1,1],f32>
%71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<80xf32>) : !torch.vtensor<[80],f32>
%72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480x80x1x1xf32>) : !torch.vtensor<[480,80,1,1],f32>
%73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480xf32>) : !torch.vtensor<[480],f32>
%74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480x1x3x3xf32>) : !torch.vtensor<[480,1,3,3],f32>
%75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480xf32>) : !torch.vtensor<[480],f32>
%76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120x480x1x1xf32>) : !torch.vtensor<[120,480,1,1],f32>
%77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<120xf32>) : !torch.vtensor<[120],f32>
%78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480x120x1x1xf32>) : !torch.vtensor<[480,120,1,1],f32>
%79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<480xf32>) : !torch.vtensor<[480],f32>
%80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<112x480x1x1xf32>) : !torch.vtensor<[112,480,1,1],f32>
%81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<112xf32>) : !torch.vtensor<[112],f32>
%82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x112x1x1xf32>) : !torch.vtensor<[672,112,1,1],f32>
%83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x1x3x3xf32>) : !torch.vtensor<[672,1,3,3],f32>
%85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<168x672x1x1xf32>) : !torch.vtensor<[168,672,1,1],f32>
%87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<168xf32>) : !torch.vtensor<[168],f32>
%88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x168x1x1xf32>) : !torch.vtensor<[672,168,1,1],f32>
%89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<112x672x1x1xf32>) : !torch.vtensor<[112,672,1,1],f32>
%91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<112xf32>) : !torch.vtensor<[112],f32>
%92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x112x1x1xf32>) : !torch.vtensor<[672,112,1,1],f32>
%93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x1x5x5xf32>) : !torch.vtensor<[672,1,5,5],f32>
%95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<168x672x1x1xf32>) : !torch.vtensor<[168,672,1,1],f32>
%97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<168xf32>) : !torch.vtensor<[168],f32>
%98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672x168x1x1xf32>) : !torch.vtensor<[672,168,1,1],f32>
%99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<672xf32>) : !torch.vtensor<[672],f32>
%100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160x672x1x1xf32>) : !torch.vtensor<[160,672,1,1],f32>
%101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160xf32>) : !torch.vtensor<[160],f32>
%102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x160x1x1xf32>) : !torch.vtensor<[960,160,1,1],f32>
%103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x1x5x5xf32>) : !torch.vtensor<[960,1,5,5],f32>
%105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240x960x1x1xf32>) : !torch.vtensor<[240,960,1,1],f32>
%107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240xf32>) : !torch.vtensor<[240],f32>
%108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x240x1x1xf32>) : !torch.vtensor<[960,240,1,1],f32>
%109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160x960x1x1xf32>) : !torch.vtensor<[160,960,1,1],f32>
%111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160xf32>) : !torch.vtensor<[160],f32>
%112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x160x1x1xf32>) : !torch.vtensor<[960,160,1,1],f32>
%113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x1x5x5xf32>) : !torch.vtensor<[960,1,5,5],f32>
%115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240x960x1x1xf32>) : !torch.vtensor<[240,960,1,1],f32>
%117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<240xf32>) : !torch.vtensor<[240],f32>
%118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x240x1x1xf32>) : !torch.vtensor<[960,240,1,1],f32>
%119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160x960x1x1xf32>) : !torch.vtensor<[160,960,1,1],f32>
%121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<160xf32>) : !torch.vtensor<[160],f32>
%122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960x160x1x1xf32>) : !torch.vtensor<[960,160,1,1],f32>
%123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<960xf32>) : !torch.vtensor<[960],f32>
%124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x960x1x1xf32>) : !torch.vtensor<[128,960,1,1],f32>
%125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32>
%126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x960x1x1xf32>) : !torch.vtensor<[128,960,1,1],f32>
%127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21x40x1x1xf32>) : !torch.vtensor<[21,40,1,1],f32>
%128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21xf32>) : !torch.vtensor<[21],f32>
%129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21x128x1x1xf32>) : !torch.vtensor<[21,128,1,1],f32>
%130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<21xf32>) : !torch.vtensor<[21],f32>
%none = torch.constant.none
%131 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%132 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12 = torch.constant.int 12
%133 = torch.aten.item %131 : !torch.vtensor<[],f32> -> !torch.float
%134 = torch.aten.item %132 : !torch.vtensor<[],si8> -> !torch.int
%135 = torch.aten.quantize_per_tensor %arg0, %133, %134, %int12 : !torch.vtensor<[1,3,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%136 = torch.aten.int_repr %135 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],si8>
%137 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%138 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%139 = torch.aten.item %137 : !torch.vtensor<[],f32> -> !torch.float
%140 = torch.aten.item %138 : !torch.vtensor<[],si8> -> !torch.int
%141 = torch.aten._make_per_tensor_quantized_tensor %136, %139, %140 : !torch.vtensor<[1,3,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,3,224,224],!torch.qint8>
%142 = torch.aten.dequantize.self %141 : !torch.vtensor<[1,3,224,224],!torch.qint8> -> !torch.vtensor<[1,3,224,224],f32>
%143 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%144 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_0 = torch.constant.int 12
%145 = torch.aten.item %143 : !torch.vtensor<[],f32> -> !torch.float
%146 = torch.aten.item %144 : !torch.vtensor<[],si8> -> !torch.int
%147 = torch.aten.quantize_per_tensor %0, %145, %146, %int12_0 : !torch.vtensor<[16,3,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,3,3,3],!torch.qint8>
%148 = torch.aten.int_repr %147 : !torch.vtensor<[16,3,3,3],!torch.qint8> -> !torch.vtensor<[16,3,3,3],si8>
%149 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%150 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%151 = torch.aten.item %149 : !torch.vtensor<[],f32> -> !torch.float
%152 = torch.aten.item %150 : !torch.vtensor<[],si8> -> !torch.int
%153 = torch.aten._make_per_tensor_quantized_tensor %148, %151, %152 : !torch.vtensor<[16,3,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,3,3,3],!torch.qint8>
%154 = torch.aten.dequantize.self %153 : !torch.vtensor<[16,3,3,3],!torch.qint8> -> !torch.vtensor<[16,3,3,3],f32>
%155 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%156 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1 = torch.constant.int 12
%157 = torch.aten.item %155 : !torch.vtensor<[],f32> -> !torch.float
%158 = torch.aten.item %156 : !torch.vtensor<[],si8> -> !torch.int
%159 = torch.aten.quantize_per_tensor %1, %157, %158, %int12_1 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%160 = torch.aten.int_repr %159 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%161 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%162 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%163 = torch.aten.item %161 : !torch.vtensor<[],f32> -> !torch.float
%164 = torch.aten.item %162 : !torch.vtensor<[],si8> -> !torch.int
%165 = torch.aten._make_per_tensor_quantized_tensor %160, %163, %164 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%166 = torch.aten.dequantize.self %165 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1 = torch.constant.int 1
%int1_2 = torch.constant.int 1
%int1_3 = torch.constant.int 1
%int1_4 = torch.constant.int 1
%int2 = torch.constant.int 2
%int2_5 = torch.constant.int 2
%int0 = torch.constant.int 0
%167 = torch.prim.ListConstruct %int1, %int1_2 : (!torch.int, !torch.int) -> !torch.list<int>
%168 = torch.prim.ListConstruct %int1_3, %int1_4 : (!torch.int, !torch.int) -> !torch.list<int>
%169 = torch.prim.ListConstruct %int2, %int2_5 : (!torch.int, !torch.int) -> !torch.list<int>
%170 = torch.prim.ListConstruct %int0, %int0 : (!torch.int, !torch.int) -> !torch.list<int>
%false = torch.constant.bool false
%int1_6 = torch.constant.int 1
%171 = torch.aten.convolution %142, %154, %166, %169, %167, %168, %false, %170, %int1_6 : !torch.vtensor<[1,3,224,224],f32>, !torch.vtensor<[16,3,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,112,112],f32>
%172 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%173 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_7 = torch.constant.int 12
%174 = torch.aten.item %172 : !torch.vtensor<[],f32> -> !torch.float
%175 = torch.aten.item %173 : !torch.vtensor<[],si8> -> !torch.int
%176 = torch.aten.quantize_per_tensor %171, %174, %175, %int12_7 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%177 = torch.aten.int_repr %176 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%178 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%179 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%180 = torch.aten.item %178 : !torch.vtensor<[],f32> -> !torch.float
%181 = torch.aten.item %179 : !torch.vtensor<[],si8> -> !torch.int
%182 = torch.aten._make_per_tensor_quantized_tensor %177, %180, %181 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%183 = torch.aten.dequantize.self %182 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%184 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_8 = torch.constant.int 1
%185 = torch.aten.add.Tensor %183, %184, %int1_8 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,16,112,112],f32>
%186 = torch.aten.relu %185 : !torch.vtensor<[1,16,112,112],f32> -> !torch.vtensor<[1,16,112,112],f32>
%187 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%188 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6 = torch.constant.int 6
%none_9 = torch.constant.none
%false_10 = torch.constant.bool false
%189 = torch.aten.to.dtype %187, %int6, %false_10, %false_10, %none_9 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_11 = torch.constant.int 6
%none_12 = torch.constant.none
%false_13 = torch.constant.bool false
%190 = torch.aten.to.dtype %188, %int6_11, %false_13, %false_13, %none_12 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%191 = torch.aten.clamp.Tensor %186, %189, %190 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,112,112],f32>
%192 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%193 = torch.aten.mul.Tensor %191, %192 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,112,112],f32>
%194 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%195 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_14 = torch.constant.int 12
%196 = torch.aten.item %194 : !torch.vtensor<[],f32> -> !torch.float
%197 = torch.aten.item %195 : !torch.vtensor<[],si8> -> !torch.int
%198 = torch.aten.quantize_per_tensor %193, %196, %197, %int12_14 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%199 = torch.aten.int_repr %198 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%200 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%201 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%202 = torch.aten.item %200 : !torch.vtensor<[],f32> -> !torch.float
%203 = torch.aten.item %201 : !torch.vtensor<[],si8> -> !torch.int
%204 = torch.aten._make_per_tensor_quantized_tensor %199, %202, %203 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%205 = torch.aten.dequantize.self %204 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%206 = torch.aten.mul.Tensor %183, %205 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[1,16,112,112],f32> -> !torch.vtensor<[1,16,112,112],f32>
%207 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%208 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_15 = torch.constant.int 12
%209 = torch.aten.item %207 : !torch.vtensor<[],f32> -> !torch.float
%210 = torch.aten.item %208 : !torch.vtensor<[],si8> -> !torch.int
%211 = torch.aten.quantize_per_tensor %206, %209, %210, %int12_15 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%212 = torch.aten.int_repr %211 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%213 = torch.vtensor.literal(dense<5.000000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%214 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%215 = torch.aten.item %213 : !torch.vtensor<[],f32> -> !torch.float
%216 = torch.aten.item %214 : !torch.vtensor<[],si8> -> !torch.int
%217 = torch.aten._make_per_tensor_quantized_tensor %212, %215, %216 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%218 = torch.aten.dequantize.self %217 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%219 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%220 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_16 = torch.constant.int 12
%221 = torch.aten.item %219 : !torch.vtensor<[],f32> -> !torch.float
%222 = torch.aten.item %220 : !torch.vtensor<[],si8> -> !torch.int
%223 = torch.aten.quantize_per_tensor %2, %221, %222, %int12_16 : !torch.vtensor<[16,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,1,3,3],!torch.qint8>
%224 = torch.aten.int_repr %223 : !torch.vtensor<[16,1,3,3],!torch.qint8> -> !torch.vtensor<[16,1,3,3],si8>
%225 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%226 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%227 = torch.aten.item %225 : !torch.vtensor<[],f32> -> !torch.float
%228 = torch.aten.item %226 : !torch.vtensor<[],si8> -> !torch.int
%229 = torch.aten._make_per_tensor_quantized_tensor %224, %227, %228 : !torch.vtensor<[16,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,1,3,3],!torch.qint8>
%230 = torch.aten.dequantize.self %229 : !torch.vtensor<[16,1,3,3],!torch.qint8> -> !torch.vtensor<[16,1,3,3],f32>
%231 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%232 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_17 = torch.constant.int 12
%233 = torch.aten.item %231 : !torch.vtensor<[],f32> -> !torch.float
%234 = torch.aten.item %232 : !torch.vtensor<[],si8> -> !torch.int
%235 = torch.aten.quantize_per_tensor %3, %233, %234, %int12_17 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%236 = torch.aten.int_repr %235 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%237 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%238 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%239 = torch.aten.item %237 : !torch.vtensor<[],f32> -> !torch.float
%240 = torch.aten.item %238 : !torch.vtensor<[],si8> -> !torch.int
%241 = torch.aten._make_per_tensor_quantized_tensor %236, %239, %240 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%242 = torch.aten.dequantize.self %241 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int1_18 = torch.constant.int 1
%int1_19 = torch.constant.int 1
%int1_20 = torch.constant.int 1
%int1_21 = torch.constant.int 1
%int1_22 = torch.constant.int 1
%int1_23 = torch.constant.int 1
%int0_24 = torch.constant.int 0
%243 = torch.prim.ListConstruct %int1_18, %int1_19 : (!torch.int, !torch.int) -> !torch.list<int>
%244 = torch.prim.ListConstruct %int1_20, %int1_21 : (!torch.int, !torch.int) -> !torch.list<int>
%245 = torch.prim.ListConstruct %int1_22, %int1_23 : (!torch.int, !torch.int) -> !torch.list<int>
%246 = torch.prim.ListConstruct %int0_24, %int0_24 : (!torch.int, !torch.int) -> !torch.list<int>
%false_25 = torch.constant.bool false
%int16 = torch.constant.int 16
%247 = torch.aten.convolution %218, %230, %242, %245, %243, %244, %false_25, %246, %int16 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[16,1,3,3],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,112,112],f32>
%248 = torch.aten.relu %247 : !torch.vtensor<[1,16,112,112],f32> -> !torch.vtensor<[1,16,112,112],f32>
%249 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%250 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_26 = torch.constant.int 12
%251 = torch.aten.item %249 : !torch.vtensor<[],f32> -> !torch.float
%252 = torch.aten.item %250 : !torch.vtensor<[],si8> -> !torch.int
%253 = torch.aten.quantize_per_tensor %248, %251, %252, %int12_26 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%254 = torch.aten.int_repr %253 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%255 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%257 = torch.aten.item %255 : !torch.vtensor<[],f32> -> !torch.float
%258 = torch.aten.item %256 : !torch.vtensor<[],si8> -> !torch.int
%259 = torch.aten._make_per_tensor_quantized_tensor %254, %257, %258 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%260 = torch.aten.dequantize.self %259 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%261 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%262 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_27 = torch.constant.int 12
%263 = torch.aten.item %261 : !torch.vtensor<[],f32> -> !torch.float
%264 = torch.aten.item %262 : !torch.vtensor<[],si8> -> !torch.int
%265 = torch.aten.quantize_per_tensor %4, %263, %264, %int12_27 : !torch.vtensor<[16,16,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16,16,1,1],!torch.qint8>
%266 = torch.aten.int_repr %265 : !torch.vtensor<[16,16,1,1],!torch.qint8> -> !torch.vtensor<[16,16,1,1],si8>
%267 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%268 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%269 = torch.aten.item %267 : !torch.vtensor<[],f32> -> !torch.float
%270 = torch.aten.item %268 : !torch.vtensor<[],si8> -> !torch.int
%271 = torch.aten._make_per_tensor_quantized_tensor %266, %269, %270 : !torch.vtensor<[16,16,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[16,16,1,1],!torch.qint8>
%272 = torch.aten.dequantize.self %271 : !torch.vtensor<[16,16,1,1],!torch.qint8> -> !torch.vtensor<[16,16,1,1],f32>
%273 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%274 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_28 = torch.constant.int 12
%275 = torch.aten.item %273 : !torch.vtensor<[],f32> -> !torch.float
%276 = torch.aten.item %274 : !torch.vtensor<[],si8> -> !torch.int
%277 = torch.aten.quantize_per_tensor %5, %275, %276, %int12_28 : !torch.vtensor<[16],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%278 = torch.aten.int_repr %277 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],si8>
%279 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%280 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%281 = torch.aten.item %279 : !torch.vtensor<[],f32> -> !torch.float
%282 = torch.aten.item %280 : !torch.vtensor<[],si8> -> !torch.int
%283 = torch.aten._make_per_tensor_quantized_tensor %278, %281, %282 : !torch.vtensor<[16],si8>, !torch.float, !torch.int -> !torch.vtensor<[16],!torch.qint8>
%284 = torch.aten.dequantize.self %283 : !torch.vtensor<[16],!torch.qint8> -> !torch.vtensor<[16],f32>
%int0_29 = torch.constant.int 0
%int0_30 = torch.constant.int 0
%int1_31 = torch.constant.int 1
%int1_32 = torch.constant.int 1
%int1_33 = torch.constant.int 1
%int1_34 = torch.constant.int 1
%int0_35 = torch.constant.int 0
%285 = torch.prim.ListConstruct %int0_29, %int0_30 : (!torch.int, !torch.int) -> !torch.list<int>
%286 = torch.prim.ListConstruct %int1_31, %int1_32 : (!torch.int, !torch.int) -> !torch.list<int>
%287 = torch.prim.ListConstruct %int1_33, %int1_34 : (!torch.int, !torch.int) -> !torch.list<int>
%288 = torch.prim.ListConstruct %int0_35, %int0_35 : (!torch.int, !torch.int) -> !torch.list<int>
%false_36 = torch.constant.bool false
%int1_37 = torch.constant.int 1
%289 = torch.aten.convolution %260, %272, %284, %287, %285, %286, %false_36, %288, %int1_37 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[16,16,1,1],f32>, !torch.vtensor<[16],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,16,112,112],f32>
%290 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%291 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_38 = torch.constant.int 12
%292 = torch.aten.item %290 : !torch.vtensor<[],f32> -> !torch.float
%293 = torch.aten.item %291 : !torch.vtensor<[],si8> -> !torch.int
%294 = torch.aten.quantize_per_tensor %289, %292, %293, %int12_38 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%295 = torch.aten.int_repr %294 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%296 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%297 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%298 = torch.aten.item %296 : !torch.vtensor<[],f32> -> !torch.float
%299 = torch.aten.item %297 : !torch.vtensor<[],si8> -> !torch.int
%300 = torch.aten._make_per_tensor_quantized_tensor %295, %298, %299 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%301 = torch.aten.dequantize.self %300 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%int1_39 = torch.constant.int 1
%302 = torch.aten.add.Tensor %301, %218, %int1_39 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[1,16,112,112],f32>, !torch.int -> !torch.vtensor<[1,16,112,112],f32>
%303 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_40 = torch.constant.int 12
%305 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float
%306 = torch.aten.item %304 : !torch.vtensor<[],si8> -> !torch.int
%307 = torch.aten.quantize_per_tensor %302, %305, %306, %int12_40 : !torch.vtensor<[1,16,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%308 = torch.aten.int_repr %307 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],si8>
%309 = torch.vtensor.literal(dense<2.500000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%311 = torch.aten.item %309 : !torch.vtensor<[],f32> -> !torch.float
%312 = torch.aten.item %310 : !torch.vtensor<[],si8> -> !torch.int
%313 = torch.aten._make_per_tensor_quantized_tensor %308, %311, %312 : !torch.vtensor<[1,16,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,16,112,112],!torch.qint8>
%314 = torch.aten.dequantize.self %313 : !torch.vtensor<[1,16,112,112],!torch.qint8> -> !torch.vtensor<[1,16,112,112],f32>
%315 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%316 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_41 = torch.constant.int 12
%317 = torch.aten.item %315 : !torch.vtensor<[],f32> -> !torch.float
%318 = torch.aten.item %316 : !torch.vtensor<[],si8> -> !torch.int
%319 = torch.aten.quantize_per_tensor %6, %317, %318, %int12_41 : !torch.vtensor<[64,16,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,16,1,1],!torch.qint8>
%320 = torch.aten.int_repr %319 : !torch.vtensor<[64,16,1,1],!torch.qint8> -> !torch.vtensor<[64,16,1,1],si8>
%321 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%322 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%323 = torch.aten.item %321 : !torch.vtensor<[],f32> -> !torch.float
%324 = torch.aten.item %322 : !torch.vtensor<[],si8> -> !torch.int
%325 = torch.aten._make_per_tensor_quantized_tensor %320, %323, %324 : !torch.vtensor<[64,16,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,16,1,1],!torch.qint8>
%326 = torch.aten.dequantize.self %325 : !torch.vtensor<[64,16,1,1],!torch.qint8> -> !torch.vtensor<[64,16,1,1],f32>
%327 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_42 = torch.constant.int 12
%329 = torch.aten.item %327 : !torch.vtensor<[],f32> -> !torch.float
%330 = torch.aten.item %328 : !torch.vtensor<[],si8> -> !torch.int
%331 = torch.aten.quantize_per_tensor %7, %329, %330, %int12_42 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%332 = torch.aten.int_repr %331 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%333 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%334 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%335 = torch.aten.item %333 : !torch.vtensor<[],f32> -> !torch.float
%336 = torch.aten.item %334 : !torch.vtensor<[],si8> -> !torch.int
%337 = torch.aten._make_per_tensor_quantized_tensor %332, %335, %336 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%338 = torch.aten.dequantize.self %337 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int0_43 = torch.constant.int 0
%int0_44 = torch.constant.int 0
%int1_45 = torch.constant.int 1
%int1_46 = torch.constant.int 1
%int1_47 = torch.constant.int 1
%int1_48 = torch.constant.int 1
%int0_49 = torch.constant.int 0
%339 = torch.prim.ListConstruct %int0_43, %int0_44 : (!torch.int, !torch.int) -> !torch.list<int>
%340 = torch.prim.ListConstruct %int1_45, %int1_46 : (!torch.int, !torch.int) -> !torch.list<int>
%341 = torch.prim.ListConstruct %int1_47, %int1_48 : (!torch.int, !torch.int) -> !torch.list<int>
%342 = torch.prim.ListConstruct %int0_49, %int0_49 : (!torch.int, !torch.int) -> !torch.list<int>
%false_50 = torch.constant.bool false
%int1_51 = torch.constant.int 1
%343 = torch.aten.convolution %314, %326, %338, %341, %339, %340, %false_50, %342, %int1_51 : !torch.vtensor<[1,16,112,112],f32>, !torch.vtensor<[64,16,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,112,112],f32>
%344 = torch.aten.relu %343 : !torch.vtensor<[1,64,112,112],f32> -> !torch.vtensor<[1,64,112,112],f32>
%345 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_52 = torch.constant.int 12
%347 = torch.aten.item %345 : !torch.vtensor<[],f32> -> !torch.float
%348 = torch.aten.item %346 : !torch.vtensor<[],si8> -> !torch.int
%349 = torch.aten.quantize_per_tensor %344, %347, %348, %int12_52 : !torch.vtensor<[1,64,112,112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%350 = torch.aten.int_repr %349 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],si8>
%351 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%352 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%353 = torch.aten.item %351 : !torch.vtensor<[],f32> -> !torch.float
%354 = torch.aten.item %352 : !torch.vtensor<[],si8> -> !torch.int
%355 = torch.aten._make_per_tensor_quantized_tensor %350, %353, %354 : !torch.vtensor<[1,64,112,112],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,112,112],!torch.qint8>
%356 = torch.aten.dequantize.self %355 : !torch.vtensor<[1,64,112,112],!torch.qint8> -> !torch.vtensor<[1,64,112,112],f32>
%357 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%358 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_53 = torch.constant.int 12
%359 = torch.aten.item %357 : !torch.vtensor<[],f32> -> !torch.float
%360 = torch.aten.item %358 : !torch.vtensor<[],si8> -> !torch.int
%361 = torch.aten.quantize_per_tensor %8, %359, %360, %int12_53 : !torch.vtensor<[64,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,1,3,3],!torch.qint8>
%362 = torch.aten.int_repr %361 : !torch.vtensor<[64,1,3,3],!torch.qint8> -> !torch.vtensor<[64,1,3,3],si8>
%363 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%364 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%365 = torch.aten.item %363 : !torch.vtensor<[],f32> -> !torch.float
%366 = torch.aten.item %364 : !torch.vtensor<[],si8> -> !torch.int
%367 = torch.aten._make_per_tensor_quantized_tensor %362, %365, %366 : !torch.vtensor<[64,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,1,3,3],!torch.qint8>
%368 = torch.aten.dequantize.self %367 : !torch.vtensor<[64,1,3,3],!torch.qint8> -> !torch.vtensor<[64,1,3,3],f32>
%369 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%370 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_54 = torch.constant.int 12
%371 = torch.aten.item %369 : !torch.vtensor<[],f32> -> !torch.float
%372 = torch.aten.item %370 : !torch.vtensor<[],si8> -> !torch.int
%373 = torch.aten.quantize_per_tensor %9, %371, %372, %int12_54 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%374 = torch.aten.int_repr %373 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8>
%375 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%376 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%377 = torch.aten.item %375 : !torch.vtensor<[],f32> -> !torch.float
%378 = torch.aten.item %376 : !torch.vtensor<[],si8> -> !torch.int
%379 = torch.aten._make_per_tensor_quantized_tensor %374, %377, %378 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8>
%380 = torch.aten.dequantize.self %379 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32>
%int1_55 = torch.constant.int 1
%int1_56 = torch.constant.int 1
%int1_57 = torch.constant.int 1
%int1_58 = torch.constant.int 1
%int2_59 = torch.constant.int 2
%int2_60 = torch.constant.int 2
%int0_61 = torch.constant.int 0
%381 = torch.prim.ListConstruct %int1_55, %int1_56 : (!torch.int, !torch.int) -> !torch.list<int>
%382 = torch.prim.ListConstruct %int1_57, %int1_58 : (!torch.int, !torch.int) -> !torch.list<int>
%383 = torch.prim.ListConstruct %int2_59, %int2_60 : (!torch.int, !torch.int) -> !torch.list<int>
%384 = torch.prim.ListConstruct %int0_61, %int0_61 : (!torch.int, !torch.int) -> !torch.list<int>
%false_62 = torch.constant.bool false
%int64 = torch.constant.int 64
%385 = torch.aten.convolution %356, %368, %380, %383, %381, %382, %false_62, %384, %int64 : !torch.vtensor<[1,64,112,112],f32>, !torch.vtensor<[64,1,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,56,56],f32>
%386 = torch.aten.relu %385 : !torch.vtensor<[1,64,56,56],f32> -> !torch.vtensor<[1,64,56,56],f32>
%387 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%388 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_63 = torch.constant.int 12
%389 = torch.aten.item %387 : !torch.vtensor<[],f32> -> !torch.float
%390 = torch.aten.item %388 : !torch.vtensor<[],si8> -> !torch.int
%391 = torch.aten.quantize_per_tensor %386, %389, %390, %int12_63 : !torch.vtensor<[1,64,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%392 = torch.aten.int_repr %391 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],si8>
%393 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%394 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%395 = torch.aten.item %393 : !torch.vtensor<[],f32> -> !torch.float
%396 = torch.aten.item %394 : !torch.vtensor<[],si8> -> !torch.int
%397 = torch.aten._make_per_tensor_quantized_tensor %392, %395, %396 : !torch.vtensor<[1,64,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,64,56,56],!torch.qint8>
%398 = torch.aten.dequantize.self %397 : !torch.vtensor<[1,64,56,56],!torch.qint8> -> !torch.vtensor<[1,64,56,56],f32>
%399 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%400 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_64 = torch.constant.int 12
%401 = torch.aten.item %399 : !torch.vtensor<[],f32> -> !torch.float
%402 = torch.aten.item %400 : !torch.vtensor<[],si8> -> !torch.int
%403 = torch.aten.quantize_per_tensor %10, %401, %402, %int12_64 : !torch.vtensor<[24,64,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24,64,1,1],!torch.qint8>
%404 = torch.aten.int_repr %403 : !torch.vtensor<[24,64,1,1],!torch.qint8> -> !torch.vtensor<[24,64,1,1],si8>
%405 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%406 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%407 = torch.aten.item %405 : !torch.vtensor<[],f32> -> !torch.float
%408 = torch.aten.item %406 : !torch.vtensor<[],si8> -> !torch.int
%409 = torch.aten._make_per_tensor_quantized_tensor %404, %407, %408 : !torch.vtensor<[24,64,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[24,64,1,1],!torch.qint8>
%410 = torch.aten.dequantize.self %409 : !torch.vtensor<[24,64,1,1],!torch.qint8> -> !torch.vtensor<[24,64,1,1],f32>
%411 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%412 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_65 = torch.constant.int 12
%413 = torch.aten.item %411 : !torch.vtensor<[],f32> -> !torch.float
%414 = torch.aten.item %412 : !torch.vtensor<[],si8> -> !torch.int
%415 = torch.aten.quantize_per_tensor %11, %413, %414, %int12_65 : !torch.vtensor<[24],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%416 = torch.aten.int_repr %415 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],si8>
%417 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%418 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%419 = torch.aten.item %417 : !torch.vtensor<[],f32> -> !torch.float
%420 = torch.aten.item %418 : !torch.vtensor<[],si8> -> !torch.int
%421 = torch.aten._make_per_tensor_quantized_tensor %416, %419, %420 : !torch.vtensor<[24],si8>, !torch.float, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%422 = torch.aten.dequantize.self %421 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],f32>
%int0_66 = torch.constant.int 0
%int0_67 = torch.constant.int 0
%int1_68 = torch.constant.int 1
%int1_69 = torch.constant.int 1
%int1_70 = torch.constant.int 1
%int1_71 = torch.constant.int 1
%int0_72 = torch.constant.int 0
%423 = torch.prim.ListConstruct %int0_66, %int0_67 : (!torch.int, !torch.int) -> !torch.list<int>
%424 = torch.prim.ListConstruct %int1_68, %int1_69 : (!torch.int, !torch.int) -> !torch.list<int>
%425 = torch.prim.ListConstruct %int1_70, %int1_71 : (!torch.int, !torch.int) -> !torch.list<int>
%426 = torch.prim.ListConstruct %int0_72, %int0_72 : (!torch.int, !torch.int) -> !torch.list<int>
%false_73 = torch.constant.bool false
%int1_74 = torch.constant.int 1
%427 = torch.aten.convolution %398, %410, %422, %425, %423, %424, %false_73, %426, %int1_74 : !torch.vtensor<[1,64,56,56],f32>, !torch.vtensor<[24,64,1,1],f32>, !torch.vtensor<[24],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,24,56,56],f32>
%428 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_75 = torch.constant.int 12
%430 = torch.aten.item %428 : !torch.vtensor<[],f32> -> !torch.float
%431 = torch.aten.item %429 : !torch.vtensor<[],si8> -> !torch.int
%432 = torch.aten.quantize_per_tensor %427, %430, %431, %int12_75 : !torch.vtensor<[1,24,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%433 = torch.aten.int_repr %432 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],si8>
%434 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%435 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%436 = torch.aten.item %434 : !torch.vtensor<[],f32> -> !torch.float
%437 = torch.aten.item %435 : !torch.vtensor<[],si8> -> !torch.int
%438 = torch.aten._make_per_tensor_quantized_tensor %433, %436, %437 : !torch.vtensor<[1,24,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%439 = torch.aten.dequantize.self %438 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],f32>
%440 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%441 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_76 = torch.constant.int 12
%442 = torch.aten.item %440 : !torch.vtensor<[],f32> -> !torch.float
%443 = torch.aten.item %441 : !torch.vtensor<[],si8> -> !torch.int
%444 = torch.aten.quantize_per_tensor %12, %442, %443, %int12_76 : !torch.vtensor<[72,24,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%445 = torch.aten.int_repr %444 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],si8>
%446 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%447 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%448 = torch.aten.item %446 : !torch.vtensor<[],f32> -> !torch.float
%449 = torch.aten.item %447 : !torch.vtensor<[],si8> -> !torch.int
%450 = torch.aten._make_per_tensor_quantized_tensor %445, %448, %449 : !torch.vtensor<[72,24,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%451 = torch.aten.dequantize.self %450 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],f32>
%452 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%453 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_77 = torch.constant.int 12
%454 = torch.aten.item %452 : !torch.vtensor<[],f32> -> !torch.float
%455 = torch.aten.item %453 : !torch.vtensor<[],si8> -> !torch.int
%456 = torch.aten.quantize_per_tensor %13, %454, %455, %int12_77 : !torch.vtensor<[72],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%457 = torch.aten.int_repr %456 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],si8>
%458 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%459 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%460 = torch.aten.item %458 : !torch.vtensor<[],f32> -> !torch.float
%461 = torch.aten.item %459 : !torch.vtensor<[],si8> -> !torch.int
%462 = torch.aten._make_per_tensor_quantized_tensor %457, %460, %461 : !torch.vtensor<[72],si8>, !torch.float, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%463 = torch.aten.dequantize.self %462 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],f32>
%int0_78 = torch.constant.int 0
%int0_79 = torch.constant.int 0
%int1_80 = torch.constant.int 1
%int1_81 = torch.constant.int 1
%int1_82 = torch.constant.int 1
%int1_83 = torch.constant.int 1
%int0_84 = torch.constant.int 0
%464 = torch.prim.ListConstruct %int0_78, %int0_79 : (!torch.int, !torch.int) -> !torch.list<int>
%465 = torch.prim.ListConstruct %int1_80, %int1_81 : (!torch.int, !torch.int) -> !torch.list<int>
%466 = torch.prim.ListConstruct %int1_82, %int1_83 : (!torch.int, !torch.int) -> !torch.list<int>
%467 = torch.prim.ListConstruct %int0_84, %int0_84 : (!torch.int, !torch.int) -> !torch.list<int>
%false_85 = torch.constant.bool false
%int1_86 = torch.constant.int 1
%468 = torch.aten.convolution %439, %451, %463, %466, %464, %465, %false_85, %467, %int1_86 : !torch.vtensor<[1,24,56,56],f32>, !torch.vtensor<[72,24,1,1],f32>, !torch.vtensor<[72],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,72,56,56],f32>
%469 = torch.aten.relu %468 : !torch.vtensor<[1,72,56,56],f32> -> !torch.vtensor<[1,72,56,56],f32>
%470 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%471 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_87 = torch.constant.int 12
%472 = torch.aten.item %470 : !torch.vtensor<[],f32> -> !torch.float
%473 = torch.aten.item %471 : !torch.vtensor<[],si8> -> !torch.int
%474 = torch.aten.quantize_per_tensor %469, %472, %473, %int12_87 : !torch.vtensor<[1,72,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%475 = torch.aten.int_repr %474 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],si8>
%476 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%477 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%478 = torch.aten.item %476 : !torch.vtensor<[],f32> -> !torch.float
%479 = torch.aten.item %477 : !torch.vtensor<[],si8> -> !torch.int
%480 = torch.aten._make_per_tensor_quantized_tensor %475, %478, %479 : !torch.vtensor<[1,72,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%481 = torch.aten.dequantize.self %480 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],f32>
%482 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%483 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_88 = torch.constant.int 12
%484 = torch.aten.item %482 : !torch.vtensor<[],f32> -> !torch.float
%485 = torch.aten.item %483 : !torch.vtensor<[],si8> -> !torch.int
%486 = torch.aten.quantize_per_tensor %14, %484, %485, %int12_88 : !torch.vtensor<[72,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72,1,3,3],!torch.qint8>
%487 = torch.aten.int_repr %486 : !torch.vtensor<[72,1,3,3],!torch.qint8> -> !torch.vtensor<[72,1,3,3],si8>
%488 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%489 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%490 = torch.aten.item %488 : !torch.vtensor<[],f32> -> !torch.float
%491 = torch.aten.item %489 : !torch.vtensor<[],si8> -> !torch.int
%492 = torch.aten._make_per_tensor_quantized_tensor %487, %490, %491 : !torch.vtensor<[72,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[72,1,3,3],!torch.qint8>
%493 = torch.aten.dequantize.self %492 : !torch.vtensor<[72,1,3,3],!torch.qint8> -> !torch.vtensor<[72,1,3,3],f32>
%494 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%495 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_89 = torch.constant.int 12
%496 = torch.aten.item %494 : !torch.vtensor<[],f32> -> !torch.float
%497 = torch.aten.item %495 : !torch.vtensor<[],si8> -> !torch.int
%498 = torch.aten.quantize_per_tensor %15, %496, %497, %int12_89 : !torch.vtensor<[72],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%499 = torch.aten.int_repr %498 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],si8>
%500 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%501 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%502 = torch.aten.item %500 : !torch.vtensor<[],f32> -> !torch.float
%503 = torch.aten.item %501 : !torch.vtensor<[],si8> -> !torch.int
%504 = torch.aten._make_per_tensor_quantized_tensor %499, %502, %503 : !torch.vtensor<[72],si8>, !torch.float, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%505 = torch.aten.dequantize.self %504 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],f32>
%int1_90 = torch.constant.int 1
%int1_91 = torch.constant.int 1
%int1_92 = torch.constant.int 1
%int1_93 = torch.constant.int 1
%int1_94 = torch.constant.int 1
%int1_95 = torch.constant.int 1
%int0_96 = torch.constant.int 0
%506 = torch.prim.ListConstruct %int1_90, %int1_91 : (!torch.int, !torch.int) -> !torch.list<int>
%507 = torch.prim.ListConstruct %int1_92, %int1_93 : (!torch.int, !torch.int) -> !torch.list<int>
%508 = torch.prim.ListConstruct %int1_94, %int1_95 : (!torch.int, !torch.int) -> !torch.list<int>
%509 = torch.prim.ListConstruct %int0_96, %int0_96 : (!torch.int, !torch.int) -> !torch.list<int>
%false_97 = torch.constant.bool false
%int72 = torch.constant.int 72
%510 = torch.aten.convolution %481, %493, %505, %508, %506, %507, %false_97, %509, %int72 : !torch.vtensor<[1,72,56,56],f32>, !torch.vtensor<[72,1,3,3],f32>, !torch.vtensor<[72],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,72,56,56],f32>
%511 = torch.aten.relu %510 : !torch.vtensor<[1,72,56,56],f32> -> !torch.vtensor<[1,72,56,56],f32>
%512 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%513 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_98 = torch.constant.int 12
%514 = torch.aten.item %512 : !torch.vtensor<[],f32> -> !torch.float
%515 = torch.aten.item %513 : !torch.vtensor<[],si8> -> !torch.int
%516 = torch.aten.quantize_per_tensor %511, %514, %515, %int12_98 : !torch.vtensor<[1,72,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%517 = torch.aten.int_repr %516 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],si8>
%518 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%519 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%520 = torch.aten.item %518 : !torch.vtensor<[],f32> -> !torch.float
%521 = torch.aten.item %519 : !torch.vtensor<[],si8> -> !torch.int
%522 = torch.aten._make_per_tensor_quantized_tensor %517, %520, %521 : !torch.vtensor<[1,72,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%523 = torch.aten.dequantize.self %522 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],f32>
%524 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%525 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_99 = torch.constant.int 12
%526 = torch.aten.item %524 : !torch.vtensor<[],f32> -> !torch.float
%527 = torch.aten.item %525 : !torch.vtensor<[],si8> -> !torch.int
%528 = torch.aten.quantize_per_tensor %16, %526, %527, %int12_99 : !torch.vtensor<[24,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24,72,1,1],!torch.qint8>
%529 = torch.aten.int_repr %528 : !torch.vtensor<[24,72,1,1],!torch.qint8> -> !torch.vtensor<[24,72,1,1],si8>
%530 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%531 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%532 = torch.aten.item %530 : !torch.vtensor<[],f32> -> !torch.float
%533 = torch.aten.item %531 : !torch.vtensor<[],si8> -> !torch.int
%534 = torch.aten._make_per_tensor_quantized_tensor %529, %532, %533 : !torch.vtensor<[24,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[24,72,1,1],!torch.qint8>
%535 = torch.aten.dequantize.self %534 : !torch.vtensor<[24,72,1,1],!torch.qint8> -> !torch.vtensor<[24,72,1,1],f32>
%536 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%537 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_100 = torch.constant.int 12
%538 = torch.aten.item %536 : !torch.vtensor<[],f32> -> !torch.float
%539 = torch.aten.item %537 : !torch.vtensor<[],si8> -> !torch.int
%540 = torch.aten.quantize_per_tensor %17, %538, %539, %int12_100 : !torch.vtensor<[24],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%541 = torch.aten.int_repr %540 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],si8>
%542 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%543 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%544 = torch.aten.item %542 : !torch.vtensor<[],f32> -> !torch.float
%545 = torch.aten.item %543 : !torch.vtensor<[],si8> -> !torch.int
%546 = torch.aten._make_per_tensor_quantized_tensor %541, %544, %545 : !torch.vtensor<[24],si8>, !torch.float, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%547 = torch.aten.dequantize.self %546 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],f32>
%int0_101 = torch.constant.int 0
%int0_102 = torch.constant.int 0
%int1_103 = torch.constant.int 1
%int1_104 = torch.constant.int 1
%int1_105 = torch.constant.int 1
%int1_106 = torch.constant.int 1
%int0_107 = torch.constant.int 0
%548 = torch.prim.ListConstruct %int0_101, %int0_102 : (!torch.int, !torch.int) -> !torch.list<int>
%549 = torch.prim.ListConstruct %int1_103, %int1_104 : (!torch.int, !torch.int) -> !torch.list<int>
%550 = torch.prim.ListConstruct %int1_105, %int1_106 : (!torch.int, !torch.int) -> !torch.list<int>
%551 = torch.prim.ListConstruct %int0_107, %int0_107 : (!torch.int, !torch.int) -> !torch.list<int>
%false_108 = torch.constant.bool false
%int1_109 = torch.constant.int 1
%552 = torch.aten.convolution %523, %535, %547, %550, %548, %549, %false_108, %551, %int1_109 : !torch.vtensor<[1,72,56,56],f32>, !torch.vtensor<[24,72,1,1],f32>, !torch.vtensor<[24],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,24,56,56],f32>
%553 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%554 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_110 = torch.constant.int 12
%555 = torch.aten.item %553 : !torch.vtensor<[],f32> -> !torch.float
%556 = torch.aten.item %554 : !torch.vtensor<[],si8> -> !torch.int
%557 = torch.aten.quantize_per_tensor %552, %555, %556, %int12_110 : !torch.vtensor<[1,24,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%558 = torch.aten.int_repr %557 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],si8>
%559 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%560 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%561 = torch.aten.item %559 : !torch.vtensor<[],f32> -> !torch.float
%562 = torch.aten.item %560 : !torch.vtensor<[],si8> -> !torch.int
%563 = torch.aten._make_per_tensor_quantized_tensor %558, %561, %562 : !torch.vtensor<[1,24,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%564 = torch.aten.dequantize.self %563 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],f32>
%int1_111 = torch.constant.int 1
%565 = torch.aten.add.Tensor %564, %439, %int1_111 : !torch.vtensor<[1,24,56,56],f32>, !torch.vtensor<[1,24,56,56],f32>, !torch.int -> !torch.vtensor<[1,24,56,56],f32>
%566 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%567 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_112 = torch.constant.int 12
%568 = torch.aten.item %566 : !torch.vtensor<[],f32> -> !torch.float
%569 = torch.aten.item %567 : !torch.vtensor<[],si8> -> !torch.int
%570 = torch.aten.quantize_per_tensor %565, %568, %569, %int12_112 : !torch.vtensor<[1,24,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%571 = torch.aten.int_repr %570 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],si8>
%572 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%573 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%574 = torch.aten.item %572 : !torch.vtensor<[],f32> -> !torch.float
%575 = torch.aten.item %573 : !torch.vtensor<[],si8> -> !torch.int
%576 = torch.aten._make_per_tensor_quantized_tensor %571, %574, %575 : !torch.vtensor<[1,24,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,24,56,56],!torch.qint8>
%577 = torch.aten.dequantize.self %576 : !torch.vtensor<[1,24,56,56],!torch.qint8> -> !torch.vtensor<[1,24,56,56],f32>
%578 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%579 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_113 = torch.constant.int 12
%580 = torch.aten.item %578 : !torch.vtensor<[],f32> -> !torch.float
%581 = torch.aten.item %579 : !torch.vtensor<[],si8> -> !torch.int
%582 = torch.aten.quantize_per_tensor %18, %580, %581, %int12_113 : !torch.vtensor<[72,24,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%583 = torch.aten.int_repr %582 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],si8>
%584 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%585 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%586 = torch.aten.item %584 : !torch.vtensor<[],f32> -> !torch.float
%587 = torch.aten.item %585 : !torch.vtensor<[],si8> -> !torch.int
%588 = torch.aten._make_per_tensor_quantized_tensor %583, %586, %587 : !torch.vtensor<[72,24,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%589 = torch.aten.dequantize.self %588 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],f32>
%590 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%591 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_114 = torch.constant.int 12
%592 = torch.aten.item %590 : !torch.vtensor<[],f32> -> !torch.float
%593 = torch.aten.item %591 : !torch.vtensor<[],si8> -> !torch.int
%594 = torch.aten.quantize_per_tensor %19, %592, %593, %int12_114 : !torch.vtensor<[72],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%595 = torch.aten.int_repr %594 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],si8>
%596 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%597 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%598 = torch.aten.item %596 : !torch.vtensor<[],f32> -> !torch.float
%599 = torch.aten.item %597 : !torch.vtensor<[],si8> -> !torch.int
%600 = torch.aten._make_per_tensor_quantized_tensor %595, %598, %599 : !torch.vtensor<[72],si8>, !torch.float, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%601 = torch.aten.dequantize.self %600 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],f32>
%int0_115 = torch.constant.int 0
%int0_116 = torch.constant.int 0
%int1_117 = torch.constant.int 1
%int1_118 = torch.constant.int 1
%int1_119 = torch.constant.int 1
%int1_120 = torch.constant.int 1
%int0_121 = torch.constant.int 0
%602 = torch.prim.ListConstruct %int0_115, %int0_116 : (!torch.int, !torch.int) -> !torch.list<int>
%603 = torch.prim.ListConstruct %int1_117, %int1_118 : (!torch.int, !torch.int) -> !torch.list<int>
%604 = torch.prim.ListConstruct %int1_119, %int1_120 : (!torch.int, !torch.int) -> !torch.list<int>
%605 = torch.prim.ListConstruct %int0_121, %int0_121 : (!torch.int, !torch.int) -> !torch.list<int>
%false_122 = torch.constant.bool false
%int1_123 = torch.constant.int 1
%606 = torch.aten.convolution %577, %589, %601, %604, %602, %603, %false_122, %605, %int1_123 : !torch.vtensor<[1,24,56,56],f32>, !torch.vtensor<[72,24,1,1],f32>, !torch.vtensor<[72],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,72,56,56],f32>
%607 = torch.aten.relu %606 : !torch.vtensor<[1,72,56,56],f32> -> !torch.vtensor<[1,72,56,56],f32>
%608 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%609 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_124 = torch.constant.int 12
%610 = torch.aten.item %608 : !torch.vtensor<[],f32> -> !torch.float
%611 = torch.aten.item %609 : !torch.vtensor<[],si8> -> !torch.int
%612 = torch.aten.quantize_per_tensor %607, %610, %611, %int12_124 : !torch.vtensor<[1,72,56,56],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%613 = torch.aten.int_repr %612 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],si8>
%614 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%615 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%616 = torch.aten.item %614 : !torch.vtensor<[],f32> -> !torch.float
%617 = torch.aten.item %615 : !torch.vtensor<[],si8> -> !torch.int
%618 = torch.aten._make_per_tensor_quantized_tensor %613, %616, %617 : !torch.vtensor<[1,72,56,56],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,56,56],!torch.qint8>
%619 = torch.aten.dequantize.self %618 : !torch.vtensor<[1,72,56,56],!torch.qint8> -> !torch.vtensor<[1,72,56,56],f32>
%620 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%621 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_125 = torch.constant.int 12
%622 = torch.aten.item %620 : !torch.vtensor<[],f32> -> !torch.float
%623 = torch.aten.item %621 : !torch.vtensor<[],si8> -> !torch.int
%624 = torch.aten.quantize_per_tensor %20, %622, %623, %int12_125 : !torch.vtensor<[72,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72,1,5,5],!torch.qint8>
%625 = torch.aten.int_repr %624 : !torch.vtensor<[72,1,5,5],!torch.qint8> -> !torch.vtensor<[72,1,5,5],si8>
%626 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%627 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%628 = torch.aten.item %626 : !torch.vtensor<[],f32> -> !torch.float
%629 = torch.aten.item %627 : !torch.vtensor<[],si8> -> !torch.int
%630 = torch.aten._make_per_tensor_quantized_tensor %625, %628, %629 : !torch.vtensor<[72,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[72,1,5,5],!torch.qint8>
%631 = torch.aten.dequantize.self %630 : !torch.vtensor<[72,1,5,5],!torch.qint8> -> !torch.vtensor<[72,1,5,5],f32>
%632 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_126 = torch.constant.int 12
%634 = torch.aten.item %632 : !torch.vtensor<[],f32> -> !torch.float
%635 = torch.aten.item %633 : !torch.vtensor<[],si8> -> !torch.int
%636 = torch.aten.quantize_per_tensor %21, %634, %635, %int12_126 : !torch.vtensor<[72],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%637 = torch.aten.int_repr %636 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],si8>
%638 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%639 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%640 = torch.aten.item %638 : !torch.vtensor<[],f32> -> !torch.float
%641 = torch.aten.item %639 : !torch.vtensor<[],si8> -> !torch.int
%642 = torch.aten._make_per_tensor_quantized_tensor %637, %640, %641 : !torch.vtensor<[72],si8>, !torch.float, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%643 = torch.aten.dequantize.self %642 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],f32>
%int2_127 = torch.constant.int 2
%int2_128 = torch.constant.int 2
%int1_129 = torch.constant.int 1
%int1_130 = torch.constant.int 1
%int2_131 = torch.constant.int 2
%int2_132 = torch.constant.int 2
%int0_133 = torch.constant.int 0
%644 = torch.prim.ListConstruct %int2_127, %int2_128 : (!torch.int, !torch.int) -> !torch.list<int>
%645 = torch.prim.ListConstruct %int1_129, %int1_130 : (!torch.int, !torch.int) -> !torch.list<int>
%646 = torch.prim.ListConstruct %int2_131, %int2_132 : (!torch.int, !torch.int) -> !torch.list<int>
%647 = torch.prim.ListConstruct %int0_133, %int0_133 : (!torch.int, !torch.int) -> !torch.list<int>
%false_134 = torch.constant.bool false
%int72_135 = torch.constant.int 72
%648 = torch.aten.convolution %619, %631, %643, %646, %644, %645, %false_134, %647, %int72_135 : !torch.vtensor<[1,72,56,56],f32>, !torch.vtensor<[72,1,5,5],f32>, !torch.vtensor<[72],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,72,28,28],f32>
%649 = torch.aten.relu %648 : !torch.vtensor<[1,72,28,28],f32> -> !torch.vtensor<[1,72,28,28],f32>
%650 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%651 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_136 = torch.constant.int 12
%652 = torch.aten.item %650 : !torch.vtensor<[],f32> -> !torch.float
%653 = torch.aten.item %651 : !torch.vtensor<[],si8> -> !torch.int
%654 = torch.aten.quantize_per_tensor %649, %652, %653, %int12_136 : !torch.vtensor<[1,72,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,28,28],!torch.qint8>
%655 = torch.aten.int_repr %654 : !torch.vtensor<[1,72,28,28],!torch.qint8> -> !torch.vtensor<[1,72,28,28],si8>
%656 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%657 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%658 = torch.aten.item %656 : !torch.vtensor<[],f32> -> !torch.float
%659 = torch.aten.item %657 : !torch.vtensor<[],si8> -> !torch.int
%660 = torch.aten._make_per_tensor_quantized_tensor %655, %658, %659 : !torch.vtensor<[1,72,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,28,28],!torch.qint8>
%661 = torch.aten.dequantize.self %660 : !torch.vtensor<[1,72,28,28],!torch.qint8> -> !torch.vtensor<[1,72,28,28],f32>
%int0_137 = torch.constant.int 0
%int1_138 = torch.constant.int 1
%int28 = torch.constant.int 28
%int28_139 = torch.constant.int 28
%662 = torch.prim.ListConstruct %int28, %int28_139 : (!torch.int, !torch.int) -> !torch.list<int>
%663 = torch.prim.ListConstruct %int0_137, %int0_137 : (!torch.int, !torch.int) -> !torch.list<int>
%664 = torch.prim.ListConstruct %int1_138, %int1_138 : (!torch.int, !torch.int) -> !torch.list<int>
%false_140 = torch.constant.bool false
%none_141 = torch.constant.none
%665 = torch.aten.avg_pool2d %661, %662, %664, %663, %false_140, %false_140, %none_141 : !torch.vtensor<[1,72,28,28],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,72,1,1],f32>
%666 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%667 = torch.aten.mul.Tensor %665, %666 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,72,1,1],f32>
%668 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%669 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_142 = torch.constant.int 12
%670 = torch.aten.item %668 : !torch.vtensor<[],f32> -> !torch.float
%671 = torch.aten.item %669 : !torch.vtensor<[],si8> -> !torch.int
%672 = torch.aten.quantize_per_tensor %667, %670, %671, %int12_142 : !torch.vtensor<[1,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%673 = torch.aten.int_repr %672 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],si8>
%674 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%675 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%676 = torch.aten.item %674 : !torch.vtensor<[],f32> -> !torch.float
%677 = torch.aten.item %675 : !torch.vtensor<[],si8> -> !torch.int
%678 = torch.aten._make_per_tensor_quantized_tensor %673, %676, %677 : !torch.vtensor<[1,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%679 = torch.aten.dequantize.self %678 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],f32>
%680 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%681 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_143 = torch.constant.int 12
%682 = torch.aten.item %680 : !torch.vtensor<[],f32> -> !torch.float
%683 = torch.aten.item %681 : !torch.vtensor<[],si8> -> !torch.int
%684 = torch.aten.quantize_per_tensor %22, %682, %683, %int12_143 : !torch.vtensor<[24,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24,72,1,1],!torch.qint8>
%685 = torch.aten.int_repr %684 : !torch.vtensor<[24,72,1,1],!torch.qint8> -> !torch.vtensor<[24,72,1,1],si8>
%686 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%687 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%688 = torch.aten.item %686 : !torch.vtensor<[],f32> -> !torch.float
%689 = torch.aten.item %687 : !torch.vtensor<[],si8> -> !torch.int
%690 = torch.aten._make_per_tensor_quantized_tensor %685, %688, %689 : !torch.vtensor<[24,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[24,72,1,1],!torch.qint8>
%691 = torch.aten.dequantize.self %690 : !torch.vtensor<[24,72,1,1],!torch.qint8> -> !torch.vtensor<[24,72,1,1],f32>
%692 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%693 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_144 = torch.constant.int 12
%694 = torch.aten.item %692 : !torch.vtensor<[],f32> -> !torch.float
%695 = torch.aten.item %693 : !torch.vtensor<[],si8> -> !torch.int
%696 = torch.aten.quantize_per_tensor %23, %694, %695, %int12_144 : !torch.vtensor<[24],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%697 = torch.aten.int_repr %696 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],si8>
%698 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%699 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%700 = torch.aten.item %698 : !torch.vtensor<[],f32> -> !torch.float
%701 = torch.aten.item %699 : !torch.vtensor<[],si8> -> !torch.int
%702 = torch.aten._make_per_tensor_quantized_tensor %697, %700, %701 : !torch.vtensor<[24],si8>, !torch.float, !torch.int -> !torch.vtensor<[24],!torch.qint8>
%703 = torch.aten.dequantize.self %702 : !torch.vtensor<[24],!torch.qint8> -> !torch.vtensor<[24],f32>
%int0_145 = torch.constant.int 0
%int0_146 = torch.constant.int 0
%int1_147 = torch.constant.int 1
%int1_148 = torch.constant.int 1
%int1_149 = torch.constant.int 1
%int1_150 = torch.constant.int 1
%int0_151 = torch.constant.int 0
%704 = torch.prim.ListConstruct %int0_145, %int0_146 : (!torch.int, !torch.int) -> !torch.list<int>
%705 = torch.prim.ListConstruct %int1_147, %int1_148 : (!torch.int, !torch.int) -> !torch.list<int>
%706 = torch.prim.ListConstruct %int1_149, %int1_150 : (!torch.int, !torch.int) -> !torch.list<int>
%707 = torch.prim.ListConstruct %int0_151, %int0_151 : (!torch.int, !torch.int) -> !torch.list<int>
%false_152 = torch.constant.bool false
%int1_153 = torch.constant.int 1
%708 = torch.aten.convolution %679, %691, %703, %706, %704, %705, %false_152, %707, %int1_153 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[24,72,1,1],f32>, !torch.vtensor<[24],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,24,1,1],f32>
%709 = torch.aten.relu %708 : !torch.vtensor<[1,24,1,1],f32> -> !torch.vtensor<[1,24,1,1],f32>
%710 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_154 = torch.constant.int 12
%712 = torch.aten.item %710 : !torch.vtensor<[],f32> -> !torch.float
%713 = torch.aten.item %711 : !torch.vtensor<[],si8> -> !torch.int
%714 = torch.aten.quantize_per_tensor %709, %712, %713, %int12_154 : !torch.vtensor<[1,24,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,24,1,1],!torch.qint8>
%715 = torch.aten.int_repr %714 : !torch.vtensor<[1,24,1,1],!torch.qint8> -> !torch.vtensor<[1,24,1,1],si8>
%716 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%717 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%718 = torch.aten.item %716 : !torch.vtensor<[],f32> -> !torch.float
%719 = torch.aten.item %717 : !torch.vtensor<[],si8> -> !torch.int
%720 = torch.aten._make_per_tensor_quantized_tensor %715, %718, %719 : !torch.vtensor<[1,24,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,24,1,1],!torch.qint8>
%721 = torch.aten.dequantize.self %720 : !torch.vtensor<[1,24,1,1],!torch.qint8> -> !torch.vtensor<[1,24,1,1],f32>
%722 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_155 = torch.constant.int 12
%724 = torch.aten.item %722 : !torch.vtensor<[],f32> -> !torch.float
%725 = torch.aten.item %723 : !torch.vtensor<[],si8> -> !torch.int
%726 = torch.aten.quantize_per_tensor %24, %724, %725, %int12_155 : !torch.vtensor<[72,24,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%727 = torch.aten.int_repr %726 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],si8>
%728 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%730 = torch.aten.item %728 : !torch.vtensor<[],f32> -> !torch.float
%731 = torch.aten.item %729 : !torch.vtensor<[],si8> -> !torch.int
%732 = torch.aten._make_per_tensor_quantized_tensor %727, %730, %731 : !torch.vtensor<[72,24,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[72,24,1,1],!torch.qint8>
%733 = torch.aten.dequantize.self %732 : !torch.vtensor<[72,24,1,1],!torch.qint8> -> !torch.vtensor<[72,24,1,1],f32>
%734 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%735 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_156 = torch.constant.int 12
%736 = torch.aten.item %734 : !torch.vtensor<[],f32> -> !torch.float
%737 = torch.aten.item %735 : !torch.vtensor<[],si8> -> !torch.int
%738 = torch.aten.quantize_per_tensor %25, %736, %737, %int12_156 : !torch.vtensor<[72],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%739 = torch.aten.int_repr %738 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],si8>
%740 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%742 = torch.aten.item %740 : !torch.vtensor<[],f32> -> !torch.float
%743 = torch.aten.item %741 : !torch.vtensor<[],si8> -> !torch.int
%744 = torch.aten._make_per_tensor_quantized_tensor %739, %742, %743 : !torch.vtensor<[72],si8>, !torch.float, !torch.int -> !torch.vtensor<[72],!torch.qint8>
%745 = torch.aten.dequantize.self %744 : !torch.vtensor<[72],!torch.qint8> -> !torch.vtensor<[72],f32>
%int0_157 = torch.constant.int 0
%int0_158 = torch.constant.int 0
%int1_159 = torch.constant.int 1
%int1_160 = torch.constant.int 1
%int1_161 = torch.constant.int 1
%int1_162 = torch.constant.int 1
%int0_163 = torch.constant.int 0
%746 = torch.prim.ListConstruct %int0_157, %int0_158 : (!torch.int, !torch.int) -> !torch.list<int>
%747 = torch.prim.ListConstruct %int1_159, %int1_160 : (!torch.int, !torch.int) -> !torch.list<int>
%748 = torch.prim.ListConstruct %int1_161, %int1_162 : (!torch.int, !torch.int) -> !torch.list<int>
%749 = torch.prim.ListConstruct %int0_163, %int0_163 : (!torch.int, !torch.int) -> !torch.list<int>
%false_164 = torch.constant.bool false
%int1_165 = torch.constant.int 1
%750 = torch.aten.convolution %721, %733, %745, %748, %746, %747, %false_164, %749, %int1_165 : !torch.vtensor<[1,24,1,1],f32>, !torch.vtensor<[72,24,1,1],f32>, !torch.vtensor<[72],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,72,1,1],f32>
%751 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%752 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_166 = torch.constant.int 12
%753 = torch.aten.item %751 : !torch.vtensor<[],f32> -> !torch.float
%754 = torch.aten.item %752 : !torch.vtensor<[],si8> -> !torch.int
%755 = torch.aten.quantize_per_tensor %750, %753, %754, %int12_166 : !torch.vtensor<[1,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%756 = torch.aten.int_repr %755 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],si8>
%757 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%758 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%759 = torch.aten.item %757 : !torch.vtensor<[],f32> -> !torch.float
%760 = torch.aten.item %758 : !torch.vtensor<[],si8> -> !torch.int
%761 = torch.aten._make_per_tensor_quantized_tensor %756, %759, %760 : !torch.vtensor<[1,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%762 = torch.aten.dequantize.self %761 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],f32>
%763 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_167 = torch.constant.int 1
%764 = torch.aten.add.Tensor %762, %763, %int1_167 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,72,1,1],f32>
%765 = torch.aten.relu %764 : !torch.vtensor<[1,72,1,1],f32> -> !torch.vtensor<[1,72,1,1],f32>
%766 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%767 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_168 = torch.constant.int 6
%none_169 = torch.constant.none
%false_170 = torch.constant.bool false
%768 = torch.aten.to.dtype %766, %int6_168, %false_170, %false_170, %none_169 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_171 = torch.constant.int 6
%none_172 = torch.constant.none
%false_173 = torch.constant.bool false
%769 = torch.aten.to.dtype %767, %int6_171, %false_173, %false_173, %none_172 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%770 = torch.aten.clamp.Tensor %765, %768, %769 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,72,1,1],f32>
%771 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%772 = torch.aten.mul.Tensor %770, %771 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,72,1,1],f32>
%773 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%774 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_174 = torch.constant.int 12
%775 = torch.aten.item %773 : !torch.vtensor<[],f32> -> !torch.float
%776 = torch.aten.item %774 : !torch.vtensor<[],si8> -> !torch.int
%777 = torch.aten.quantize_per_tensor %772, %775, %776, %int12_174 : !torch.vtensor<[1,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%778 = torch.aten.int_repr %777 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],si8>
%779 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%780 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%781 = torch.aten.item %779 : !torch.vtensor<[],f32> -> !torch.float
%782 = torch.aten.item %780 : !torch.vtensor<[],si8> -> !torch.int
%783 = torch.aten._make_per_tensor_quantized_tensor %778, %781, %782 : !torch.vtensor<[1,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,1,1],!torch.qint8>
%784 = torch.aten.dequantize.self %783 : !torch.vtensor<[1,72,1,1],!torch.qint8> -> !torch.vtensor<[1,72,1,1],f32>
%785 = torch.aten.mul.Tensor %784, %661 : !torch.vtensor<[1,72,1,1],f32>, !torch.vtensor<[1,72,28,28],f32> -> !torch.vtensor<[1,72,28,28],f32>
%786 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%787 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_175 = torch.constant.int 12
%788 = torch.aten.item %786 : !torch.vtensor<[],f32> -> !torch.float
%789 = torch.aten.item %787 : !torch.vtensor<[],si8> -> !torch.int
%790 = torch.aten.quantize_per_tensor %785, %788, %789, %int12_175 : !torch.vtensor<[1,72,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,72,28,28],!torch.qint8>
%791 = torch.aten.int_repr %790 : !torch.vtensor<[1,72,28,28],!torch.qint8> -> !torch.vtensor<[1,72,28,28],si8>
%792 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%793 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%794 = torch.aten.item %792 : !torch.vtensor<[],f32> -> !torch.float
%795 = torch.aten.item %793 : !torch.vtensor<[],si8> -> !torch.int
%796 = torch.aten._make_per_tensor_quantized_tensor %791, %794, %795 : !torch.vtensor<[1,72,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,72,28,28],!torch.qint8>
%797 = torch.aten.dequantize.self %796 : !torch.vtensor<[1,72,28,28],!torch.qint8> -> !torch.vtensor<[1,72,28,28],f32>
%798 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%799 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_176 = torch.constant.int 12
%800 = torch.aten.item %798 : !torch.vtensor<[],f32> -> !torch.float
%801 = torch.aten.item %799 : !torch.vtensor<[],si8> -> !torch.int
%802 = torch.aten.quantize_per_tensor %26, %800, %801, %int12_176 : !torch.vtensor<[40,72,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40,72,1,1],!torch.qint8>
%803 = torch.aten.int_repr %802 : !torch.vtensor<[40,72,1,1],!torch.qint8> -> !torch.vtensor<[40,72,1,1],si8>
%804 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%805 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%806 = torch.aten.item %804 : !torch.vtensor<[],f32> -> !torch.float
%807 = torch.aten.item %805 : !torch.vtensor<[],si8> -> !torch.int
%808 = torch.aten._make_per_tensor_quantized_tensor %803, %806, %807 : !torch.vtensor<[40,72,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[40,72,1,1],!torch.qint8>
%809 = torch.aten.dequantize.self %808 : !torch.vtensor<[40,72,1,1],!torch.qint8> -> !torch.vtensor<[40,72,1,1],f32>
%810 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%811 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_177 = torch.constant.int 12
%812 = torch.aten.item %810 : !torch.vtensor<[],f32> -> !torch.float
%813 = torch.aten.item %811 : !torch.vtensor<[],si8> -> !torch.int
%814 = torch.aten.quantize_per_tensor %27, %812, %813, %int12_177 : !torch.vtensor<[40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%815 = torch.aten.int_repr %814 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],si8>
%816 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%817 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%818 = torch.aten.item %816 : !torch.vtensor<[],f32> -> !torch.float
%819 = torch.aten.item %817 : !torch.vtensor<[],si8> -> !torch.int
%820 = torch.aten._make_per_tensor_quantized_tensor %815, %818, %819 : !torch.vtensor<[40],si8>, !torch.float, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%821 = torch.aten.dequantize.self %820 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],f32>
%int0_178 = torch.constant.int 0
%int0_179 = torch.constant.int 0
%int1_180 = torch.constant.int 1
%int1_181 = torch.constant.int 1
%int1_182 = torch.constant.int 1
%int1_183 = torch.constant.int 1
%int0_184 = torch.constant.int 0
%822 = torch.prim.ListConstruct %int0_178, %int0_179 : (!torch.int, !torch.int) -> !torch.list<int>
%823 = torch.prim.ListConstruct %int1_180, %int1_181 : (!torch.int, !torch.int) -> !torch.list<int>
%824 = torch.prim.ListConstruct %int1_182, %int1_183 : (!torch.int, !torch.int) -> !torch.list<int>
%825 = torch.prim.ListConstruct %int0_184, %int0_184 : (!torch.int, !torch.int) -> !torch.list<int>
%false_185 = torch.constant.bool false
%int1_186 = torch.constant.int 1
%826 = torch.aten.convolution %797, %809, %821, %824, %822, %823, %false_185, %825, %int1_186 : !torch.vtensor<[1,72,28,28],f32>, !torch.vtensor<[40,72,1,1],f32>, !torch.vtensor<[40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,40,28,28],f32>
%827 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%828 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_187 = torch.constant.int 12
%829 = torch.aten.item %827 : !torch.vtensor<[],f32> -> !torch.float
%830 = torch.aten.item %828 : !torch.vtensor<[],si8> -> !torch.int
%831 = torch.aten.quantize_per_tensor %826, %829, %830, %int12_187 : !torch.vtensor<[1,40,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%832 = torch.aten.int_repr %831 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],si8>
%833 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%834 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%835 = torch.aten.item %833 : !torch.vtensor<[],f32> -> !torch.float
%836 = torch.aten.item %834 : !torch.vtensor<[],si8> -> !torch.int
%837 = torch.aten._make_per_tensor_quantized_tensor %832, %835, %836 : !torch.vtensor<[1,40,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%838 = torch.aten.dequantize.self %837 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],f32>
%839 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%840 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_188 = torch.constant.int 12
%841 = torch.aten.item %839 : !torch.vtensor<[],f32> -> !torch.float
%842 = torch.aten.item %840 : !torch.vtensor<[],si8> -> !torch.int
%843 = torch.aten.quantize_per_tensor %28, %841, %842, %int12_188 : !torch.vtensor<[120,40,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,40,1,1],!torch.qint8>
%844 = torch.aten.int_repr %843 : !torch.vtensor<[120,40,1,1],!torch.qint8> -> !torch.vtensor<[120,40,1,1],si8>
%845 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%846 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%847 = torch.aten.item %845 : !torch.vtensor<[],f32> -> !torch.float
%848 = torch.aten.item %846 : !torch.vtensor<[],si8> -> !torch.int
%849 = torch.aten._make_per_tensor_quantized_tensor %844, %847, %848 : !torch.vtensor<[120,40,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,40,1,1],!torch.qint8>
%850 = torch.aten.dequantize.self %849 : !torch.vtensor<[120,40,1,1],!torch.qint8> -> !torch.vtensor<[120,40,1,1],f32>
%851 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%852 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_189 = torch.constant.int 12
%853 = torch.aten.item %851 : !torch.vtensor<[],f32> -> !torch.float
%854 = torch.aten.item %852 : !torch.vtensor<[],si8> -> !torch.int
%855 = torch.aten.quantize_per_tensor %29, %853, %854, %int12_189 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%856 = torch.aten.int_repr %855 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%857 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%858 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%859 = torch.aten.item %857 : !torch.vtensor<[],f32> -> !torch.float
%860 = torch.aten.item %858 : !torch.vtensor<[],si8> -> !torch.int
%861 = torch.aten._make_per_tensor_quantized_tensor %856, %859, %860 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%862 = torch.aten.dequantize.self %861 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int0_190 = torch.constant.int 0
%int0_191 = torch.constant.int 0
%int1_192 = torch.constant.int 1
%int1_193 = torch.constant.int 1
%int1_194 = torch.constant.int 1
%int1_195 = torch.constant.int 1
%int0_196 = torch.constant.int 0
%863 = torch.prim.ListConstruct %int0_190, %int0_191 : (!torch.int, !torch.int) -> !torch.list<int>
%864 = torch.prim.ListConstruct %int1_192, %int1_193 : (!torch.int, !torch.int) -> !torch.list<int>
%865 = torch.prim.ListConstruct %int1_194, %int1_195 : (!torch.int, !torch.int) -> !torch.list<int>
%866 = torch.prim.ListConstruct %int0_196, %int0_196 : (!torch.int, !torch.int) -> !torch.list<int>
%false_197 = torch.constant.bool false
%int1_198 = torch.constant.int 1
%867 = torch.aten.convolution %838, %850, %862, %865, %863, %864, %false_197, %866, %int1_198 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[120,40,1,1],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,28,28],f32>
%868 = torch.aten.relu %867 : !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%869 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%870 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_199 = torch.constant.int 12
%871 = torch.aten.item %869 : !torch.vtensor<[],f32> -> !torch.float
%872 = torch.aten.item %870 : !torch.vtensor<[],si8> -> !torch.int
%873 = torch.aten.quantize_per_tensor %868, %871, %872, %int12_199 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%874 = torch.aten.int_repr %873 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%875 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%876 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%877 = torch.aten.item %875 : !torch.vtensor<[],f32> -> !torch.float
%878 = torch.aten.item %876 : !torch.vtensor<[],si8> -> !torch.int
%879 = torch.aten._make_per_tensor_quantized_tensor %874, %877, %878 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%880 = torch.aten.dequantize.self %879 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%881 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%882 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_200 = torch.constant.int 12
%883 = torch.aten.item %881 : !torch.vtensor<[],f32> -> !torch.float
%884 = torch.aten.item %882 : !torch.vtensor<[],si8> -> !torch.int
%885 = torch.aten.quantize_per_tensor %30, %883, %884, %int12_200 : !torch.vtensor<[120,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,1,5,5],!torch.qint8>
%886 = torch.aten.int_repr %885 : !torch.vtensor<[120,1,5,5],!torch.qint8> -> !torch.vtensor<[120,1,5,5],si8>
%887 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%888 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%889 = torch.aten.item %887 : !torch.vtensor<[],f32> -> !torch.float
%890 = torch.aten.item %888 : !torch.vtensor<[],si8> -> !torch.int
%891 = torch.aten._make_per_tensor_quantized_tensor %886, %889, %890 : !torch.vtensor<[120,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,1,5,5],!torch.qint8>
%892 = torch.aten.dequantize.self %891 : !torch.vtensor<[120,1,5,5],!torch.qint8> -> !torch.vtensor<[120,1,5,5],f32>
%893 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%894 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_201 = torch.constant.int 12
%895 = torch.aten.item %893 : !torch.vtensor<[],f32> -> !torch.float
%896 = torch.aten.item %894 : !torch.vtensor<[],si8> -> !torch.int
%897 = torch.aten.quantize_per_tensor %31, %895, %896, %int12_201 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%898 = torch.aten.int_repr %897 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%899 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%900 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%901 = torch.aten.item %899 : !torch.vtensor<[],f32> -> !torch.float
%902 = torch.aten.item %900 : !torch.vtensor<[],si8> -> !torch.int
%903 = torch.aten._make_per_tensor_quantized_tensor %898, %901, %902 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%904 = torch.aten.dequantize.self %903 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int2_202 = torch.constant.int 2
%int2_203 = torch.constant.int 2
%int1_204 = torch.constant.int 1
%int1_205 = torch.constant.int 1
%int1_206 = torch.constant.int 1
%int1_207 = torch.constant.int 1
%int0_208 = torch.constant.int 0
%905 = torch.prim.ListConstruct %int2_202, %int2_203 : (!torch.int, !torch.int) -> !torch.list<int>
%906 = torch.prim.ListConstruct %int1_204, %int1_205 : (!torch.int, !torch.int) -> !torch.list<int>
%907 = torch.prim.ListConstruct %int1_206, %int1_207 : (!torch.int, !torch.int) -> !torch.list<int>
%908 = torch.prim.ListConstruct %int0_208, %int0_208 : (!torch.int, !torch.int) -> !torch.list<int>
%false_209 = torch.constant.bool false
%int120 = torch.constant.int 120
%909 = torch.aten.convolution %880, %892, %904, %907, %905, %906, %false_209, %908, %int120 : !torch.vtensor<[1,120,28,28],f32>, !torch.vtensor<[120,1,5,5],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,28,28],f32>
%910 = torch.aten.relu %909 : !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%911 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_210 = torch.constant.int 12
%913 = torch.aten.item %911 : !torch.vtensor<[],f32> -> !torch.float
%914 = torch.aten.item %912 : !torch.vtensor<[],si8> -> !torch.int
%915 = torch.aten.quantize_per_tensor %910, %913, %914, %int12_210 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%916 = torch.aten.int_repr %915 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%917 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%918 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%919 = torch.aten.item %917 : !torch.vtensor<[],f32> -> !torch.float
%920 = torch.aten.item %918 : !torch.vtensor<[],si8> -> !torch.int
%921 = torch.aten._make_per_tensor_quantized_tensor %916, %919, %920 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%922 = torch.aten.dequantize.self %921 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%int0_211 = torch.constant.int 0
%int1_212 = torch.constant.int 1
%int28_213 = torch.constant.int 28
%int28_214 = torch.constant.int 28
%923 = torch.prim.ListConstruct %int28_213, %int28_214 : (!torch.int, !torch.int) -> !torch.list<int>
%924 = torch.prim.ListConstruct %int0_211, %int0_211 : (!torch.int, !torch.int) -> !torch.list<int>
%925 = torch.prim.ListConstruct %int1_212, %int1_212 : (!torch.int, !torch.int) -> !torch.list<int>
%false_215 = torch.constant.bool false
%none_216 = torch.constant.none
%926 = torch.aten.avg_pool2d %922, %923, %925, %924, %false_215, %false_215, %none_216 : !torch.vtensor<[1,120,28,28],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,120,1,1],f32>
%927 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%928 = torch.aten.mul.Tensor %926, %927 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%929 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%930 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_217 = torch.constant.int 12
%931 = torch.aten.item %929 : !torch.vtensor<[],f32> -> !torch.float
%932 = torch.aten.item %930 : !torch.vtensor<[],si8> -> !torch.int
%933 = torch.aten.quantize_per_tensor %928, %931, %932, %int12_217 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%934 = torch.aten.int_repr %933 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%935 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%936 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%937 = torch.aten.item %935 : !torch.vtensor<[],f32> -> !torch.float
%938 = torch.aten.item %936 : !torch.vtensor<[],si8> -> !torch.int
%939 = torch.aten._make_per_tensor_quantized_tensor %934, %937, %938 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%940 = torch.aten.dequantize.self %939 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%941 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%942 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_218 = torch.constant.int 12
%943 = torch.aten.item %941 : !torch.vtensor<[],f32> -> !torch.float
%944 = torch.aten.item %942 : !torch.vtensor<[],si8> -> !torch.int
%945 = torch.aten.quantize_per_tensor %32, %943, %944, %int12_218 : !torch.vtensor<[32,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,120,1,1],!torch.qint8>
%946 = torch.aten.int_repr %945 : !torch.vtensor<[32,120,1,1],!torch.qint8> -> !torch.vtensor<[32,120,1,1],si8>
%947 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%948 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%949 = torch.aten.item %947 : !torch.vtensor<[],f32> -> !torch.float
%950 = torch.aten.item %948 : !torch.vtensor<[],si8> -> !torch.int
%951 = torch.aten._make_per_tensor_quantized_tensor %946, %949, %950 : !torch.vtensor<[32,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,120,1,1],!torch.qint8>
%952 = torch.aten.dequantize.self %951 : !torch.vtensor<[32,120,1,1],!torch.qint8> -> !torch.vtensor<[32,120,1,1],f32>
%953 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%954 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_219 = torch.constant.int 12
%955 = torch.aten.item %953 : !torch.vtensor<[],f32> -> !torch.float
%956 = torch.aten.item %954 : !torch.vtensor<[],si8> -> !torch.int
%957 = torch.aten.quantize_per_tensor %33, %955, %956, %int12_219 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%958 = torch.aten.int_repr %957 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%959 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%960 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%961 = torch.aten.item %959 : !torch.vtensor<[],f32> -> !torch.float
%962 = torch.aten.item %960 : !torch.vtensor<[],si8> -> !torch.int
%963 = torch.aten._make_per_tensor_quantized_tensor %958, %961, %962 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%964 = torch.aten.dequantize.self %963 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int0_220 = torch.constant.int 0
%int0_221 = torch.constant.int 0
%int1_222 = torch.constant.int 1
%int1_223 = torch.constant.int 1
%int1_224 = torch.constant.int 1
%int1_225 = torch.constant.int 1
%int0_226 = torch.constant.int 0
%965 = torch.prim.ListConstruct %int0_220, %int0_221 : (!torch.int, !torch.int) -> !torch.list<int>
%966 = torch.prim.ListConstruct %int1_222, %int1_223 : (!torch.int, !torch.int) -> !torch.list<int>
%967 = torch.prim.ListConstruct %int1_224, %int1_225 : (!torch.int, !torch.int) -> !torch.list<int>
%968 = torch.prim.ListConstruct %int0_226, %int0_226 : (!torch.int, !torch.int) -> !torch.list<int>
%false_227 = torch.constant.bool false
%int1_228 = torch.constant.int 1
%969 = torch.aten.convolution %940, %952, %964, %967, %965, %966, %false_227, %968, %int1_228 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[32,120,1,1],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,1,1],f32>
%970 = torch.aten.relu %969 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32>
%971 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%972 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_229 = torch.constant.int 12
%973 = torch.aten.item %971 : !torch.vtensor<[],f32> -> !torch.float
%974 = torch.aten.item %972 : !torch.vtensor<[],si8> -> !torch.int
%975 = torch.aten.quantize_per_tensor %970, %973, %974, %int12_229 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%976 = torch.aten.int_repr %975 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],si8>
%977 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%978 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%979 = torch.aten.item %977 : !torch.vtensor<[],f32> -> !torch.float
%980 = torch.aten.item %978 : !torch.vtensor<[],si8> -> !torch.int
%981 = torch.aten._make_per_tensor_quantized_tensor %976, %979, %980 : !torch.vtensor<[1,32,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%982 = torch.aten.dequantize.self %981 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],f32>
%983 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%984 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_230 = torch.constant.int 12
%985 = torch.aten.item %983 : !torch.vtensor<[],f32> -> !torch.float
%986 = torch.aten.item %984 : !torch.vtensor<[],si8> -> !torch.int
%987 = torch.aten.quantize_per_tensor %34, %985, %986, %int12_230 : !torch.vtensor<[120,32,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,32,1,1],!torch.qint8>
%988 = torch.aten.int_repr %987 : !torch.vtensor<[120,32,1,1],!torch.qint8> -> !torch.vtensor<[120,32,1,1],si8>
%989 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%990 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%991 = torch.aten.item %989 : !torch.vtensor<[],f32> -> !torch.float
%992 = torch.aten.item %990 : !torch.vtensor<[],si8> -> !torch.int
%993 = torch.aten._make_per_tensor_quantized_tensor %988, %991, %992 : !torch.vtensor<[120,32,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,32,1,1],!torch.qint8>
%994 = torch.aten.dequantize.self %993 : !torch.vtensor<[120,32,1,1],!torch.qint8> -> !torch.vtensor<[120,32,1,1],f32>
%995 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%996 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_231 = torch.constant.int 12
%997 = torch.aten.item %995 : !torch.vtensor<[],f32> -> !torch.float
%998 = torch.aten.item %996 : !torch.vtensor<[],si8> -> !torch.int
%999 = torch.aten.quantize_per_tensor %35, %997, %998, %int12_231 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1000 = torch.aten.int_repr %999 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%1001 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1002 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1003 = torch.aten.item %1001 : !torch.vtensor<[],f32> -> !torch.float
%1004 = torch.aten.item %1002 : !torch.vtensor<[],si8> -> !torch.int
%1005 = torch.aten._make_per_tensor_quantized_tensor %1000, %1003, %1004 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1006 = torch.aten.dequantize.self %1005 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int0_232 = torch.constant.int 0
%int0_233 = torch.constant.int 0
%int1_234 = torch.constant.int 1
%int1_235 = torch.constant.int 1
%int1_236 = torch.constant.int 1
%int1_237 = torch.constant.int 1
%int0_238 = torch.constant.int 0
%1007 = torch.prim.ListConstruct %int0_232, %int0_233 : (!torch.int, !torch.int) -> !torch.list<int>
%1008 = torch.prim.ListConstruct %int1_234, %int1_235 : (!torch.int, !torch.int) -> !torch.list<int>
%1009 = torch.prim.ListConstruct %int1_236, %int1_237 : (!torch.int, !torch.int) -> !torch.list<int>
%1010 = torch.prim.ListConstruct %int0_238, %int0_238 : (!torch.int, !torch.int) -> !torch.list<int>
%false_239 = torch.constant.bool false
%int1_240 = torch.constant.int 1
%1011 = torch.aten.convolution %982, %994, %1006, %1009, %1007, %1008, %false_239, %1010, %int1_240 : !torch.vtensor<[1,32,1,1],f32>, !torch.vtensor<[120,32,1,1],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,1,1],f32>
%1012 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_241 = torch.constant.int 12
%1014 = torch.aten.item %1012 : !torch.vtensor<[],f32> -> !torch.float
%1015 = torch.aten.item %1013 : !torch.vtensor<[],si8> -> !torch.int
%1016 = torch.aten.quantize_per_tensor %1011, %1014, %1015, %int12_241 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1017 = torch.aten.int_repr %1016 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%1018 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1020 = torch.aten.item %1018 : !torch.vtensor<[],f32> -> !torch.float
%1021 = torch.aten.item %1019 : !torch.vtensor<[],si8> -> !torch.int
%1022 = torch.aten._make_per_tensor_quantized_tensor %1017, %1020, %1021 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1023 = torch.aten.dequantize.self %1022 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%1024 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_242 = torch.constant.int 1
%1025 = torch.aten.add.Tensor %1023, %1024, %int1_242 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,120,1,1],f32>
%1026 = torch.aten.relu %1025 : !torch.vtensor<[1,120,1,1],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1027 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1028 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_243 = torch.constant.int 6
%none_244 = torch.constant.none
%false_245 = torch.constant.bool false
%1029 = torch.aten.to.dtype %1027, %int6_243, %false_245, %false_245, %none_244 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_246 = torch.constant.int 6
%none_247 = torch.constant.none
%false_248 = torch.constant.bool false
%1030 = torch.aten.to.dtype %1028, %int6_246, %false_248, %false_248, %none_247 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1031 = torch.aten.clamp.Tensor %1026, %1029, %1030 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1032 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1033 = torch.aten.mul.Tensor %1031, %1032 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1034 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1035 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_249 = torch.constant.int 12
%1036 = torch.aten.item %1034 : !torch.vtensor<[],f32> -> !torch.float
%1037 = torch.aten.item %1035 : !torch.vtensor<[],si8> -> !torch.int
%1038 = torch.aten.quantize_per_tensor %1033, %1036, %1037, %int12_249 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1039 = torch.aten.int_repr %1038 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%1040 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1041 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1042 = torch.aten.item %1040 : !torch.vtensor<[],f32> -> !torch.float
%1043 = torch.aten.item %1041 : !torch.vtensor<[],si8> -> !torch.int
%1044 = torch.aten._make_per_tensor_quantized_tensor %1039, %1042, %1043 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1045 = torch.aten.dequantize.self %1044 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%1046 = torch.aten.mul.Tensor %1045, %922 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%1047 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1048 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_250 = torch.constant.int 12
%1049 = torch.aten.item %1047 : !torch.vtensor<[],f32> -> !torch.float
%1050 = torch.aten.item %1048 : !torch.vtensor<[],si8> -> !torch.int
%1051 = torch.aten.quantize_per_tensor %1046, %1049, %1050, %int12_250 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1052 = torch.aten.int_repr %1051 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%1053 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1054 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1055 = torch.aten.item %1053 : !torch.vtensor<[],f32> -> !torch.float
%1056 = torch.aten.item %1054 : !torch.vtensor<[],si8> -> !torch.int
%1057 = torch.aten._make_per_tensor_quantized_tensor %1052, %1055, %1056 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1058 = torch.aten.dequantize.self %1057 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%1059 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1060 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_251 = torch.constant.int 12
%1061 = torch.aten.item %1059 : !torch.vtensor<[],f32> -> !torch.float
%1062 = torch.aten.item %1060 : !torch.vtensor<[],si8> -> !torch.int
%1063 = torch.aten.quantize_per_tensor %36, %1061, %1062, %int12_251 : !torch.vtensor<[40,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40,120,1,1],!torch.qint8>
%1064 = torch.aten.int_repr %1063 : !torch.vtensor<[40,120,1,1],!torch.qint8> -> !torch.vtensor<[40,120,1,1],si8>
%1065 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1066 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1067 = torch.aten.item %1065 : !torch.vtensor<[],f32> -> !torch.float
%1068 = torch.aten.item %1066 : !torch.vtensor<[],si8> -> !torch.int
%1069 = torch.aten._make_per_tensor_quantized_tensor %1064, %1067, %1068 : !torch.vtensor<[40,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[40,120,1,1],!torch.qint8>
%1070 = torch.aten.dequantize.self %1069 : !torch.vtensor<[40,120,1,1],!torch.qint8> -> !torch.vtensor<[40,120,1,1],f32>
%1071 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1072 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_252 = torch.constant.int 12
%1073 = torch.aten.item %1071 : !torch.vtensor<[],f32> -> !torch.float
%1074 = torch.aten.item %1072 : !torch.vtensor<[],si8> -> !torch.int
%1075 = torch.aten.quantize_per_tensor %37, %1073, %1074, %int12_252 : !torch.vtensor<[40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%1076 = torch.aten.int_repr %1075 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],si8>
%1077 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1078 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1079 = torch.aten.item %1077 : !torch.vtensor<[],f32> -> !torch.float
%1080 = torch.aten.item %1078 : !torch.vtensor<[],si8> -> !torch.int
%1081 = torch.aten._make_per_tensor_quantized_tensor %1076, %1079, %1080 : !torch.vtensor<[40],si8>, !torch.float, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%1082 = torch.aten.dequantize.self %1081 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],f32>
%int0_253 = torch.constant.int 0
%int0_254 = torch.constant.int 0
%int1_255 = torch.constant.int 1
%int1_256 = torch.constant.int 1
%int1_257 = torch.constant.int 1
%int1_258 = torch.constant.int 1
%int0_259 = torch.constant.int 0
%1083 = torch.prim.ListConstruct %int0_253, %int0_254 : (!torch.int, !torch.int) -> !torch.list<int>
%1084 = torch.prim.ListConstruct %int1_255, %int1_256 : (!torch.int, !torch.int) -> !torch.list<int>
%1085 = torch.prim.ListConstruct %int1_257, %int1_258 : (!torch.int, !torch.int) -> !torch.list<int>
%1086 = torch.prim.ListConstruct %int0_259, %int0_259 : (!torch.int, !torch.int) -> !torch.list<int>
%false_260 = torch.constant.bool false
%int1_261 = torch.constant.int 1
%1087 = torch.aten.convolution %1058, %1070, %1082, %1085, %1083, %1084, %false_260, %1086, %int1_261 : !torch.vtensor<[1,120,28,28],f32>, !torch.vtensor<[40,120,1,1],f32>, !torch.vtensor<[40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,40,28,28],f32>
%1088 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1089 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_262 = torch.constant.int 12
%1090 = torch.aten.item %1088 : !torch.vtensor<[],f32> -> !torch.float
%1091 = torch.aten.item %1089 : !torch.vtensor<[],si8> -> !torch.int
%1092 = torch.aten.quantize_per_tensor %1087, %1090, %1091, %int12_262 : !torch.vtensor<[1,40,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1093 = torch.aten.int_repr %1092 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],si8>
%1094 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1095 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1096 = torch.aten.item %1094 : !torch.vtensor<[],f32> -> !torch.float
%1097 = torch.aten.item %1095 : !torch.vtensor<[],si8> -> !torch.int
%1098 = torch.aten._make_per_tensor_quantized_tensor %1093, %1096, %1097 : !torch.vtensor<[1,40,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1099 = torch.aten.dequantize.self %1098 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],f32>
%int1_263 = torch.constant.int 1
%1100 = torch.aten.add.Tensor %1099, %838, %int1_263 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[1,40,28,28],f32>, !torch.int -> !torch.vtensor<[1,40,28,28],f32>
%1101 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1102 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_264 = torch.constant.int 12
%1103 = torch.aten.item %1101 : !torch.vtensor<[],f32> -> !torch.float
%1104 = torch.aten.item %1102 : !torch.vtensor<[],si8> -> !torch.int
%1105 = torch.aten.quantize_per_tensor %1100, %1103, %1104, %int12_264 : !torch.vtensor<[1,40,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1106 = torch.aten.int_repr %1105 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],si8>
%1107 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1108 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1109 = torch.aten.item %1107 : !torch.vtensor<[],f32> -> !torch.float
%1110 = torch.aten.item %1108 : !torch.vtensor<[],si8> -> !torch.int
%1111 = torch.aten._make_per_tensor_quantized_tensor %1106, %1109, %1110 : !torch.vtensor<[1,40,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1112 = torch.aten.dequantize.self %1111 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],f32>
%1113 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1114 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_265 = torch.constant.int 12
%1115 = torch.aten.item %1113 : !torch.vtensor<[],f32> -> !torch.float
%1116 = torch.aten.item %1114 : !torch.vtensor<[],si8> -> !torch.int
%1117 = torch.aten.quantize_per_tensor %38, %1115, %1116, %int12_265 : !torch.vtensor<[120,40,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,40,1,1],!torch.qint8>
%1118 = torch.aten.int_repr %1117 : !torch.vtensor<[120,40,1,1],!torch.qint8> -> !torch.vtensor<[120,40,1,1],si8>
%1119 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1121 = torch.aten.item %1119 : !torch.vtensor<[],f32> -> !torch.float
%1122 = torch.aten.item %1120 : !torch.vtensor<[],si8> -> !torch.int
%1123 = torch.aten._make_per_tensor_quantized_tensor %1118, %1121, %1122 : !torch.vtensor<[120,40,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,40,1,1],!torch.qint8>
%1124 = torch.aten.dequantize.self %1123 : !torch.vtensor<[120,40,1,1],!torch.qint8> -> !torch.vtensor<[120,40,1,1],f32>
%1125 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_266 = torch.constant.int 12
%1127 = torch.aten.item %1125 : !torch.vtensor<[],f32> -> !torch.float
%1128 = torch.aten.item %1126 : !torch.vtensor<[],si8> -> !torch.int
%1129 = torch.aten.quantize_per_tensor %39, %1127, %1128, %int12_266 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1130 = torch.aten.int_repr %1129 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%1131 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1132 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1133 = torch.aten.item %1131 : !torch.vtensor<[],f32> -> !torch.float
%1134 = torch.aten.item %1132 : !torch.vtensor<[],si8> -> !torch.int
%1135 = torch.aten._make_per_tensor_quantized_tensor %1130, %1133, %1134 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1136 = torch.aten.dequantize.self %1135 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int0_267 = torch.constant.int 0
%int0_268 = torch.constant.int 0
%int1_269 = torch.constant.int 1
%int1_270 = torch.constant.int 1
%int1_271 = torch.constant.int 1
%int1_272 = torch.constant.int 1
%int0_273 = torch.constant.int 0
%1137 = torch.prim.ListConstruct %int0_267, %int0_268 : (!torch.int, !torch.int) -> !torch.list<int>
%1138 = torch.prim.ListConstruct %int1_269, %int1_270 : (!torch.int, !torch.int) -> !torch.list<int>
%1139 = torch.prim.ListConstruct %int1_271, %int1_272 : (!torch.int, !torch.int) -> !torch.list<int>
%1140 = torch.prim.ListConstruct %int0_273, %int0_273 : (!torch.int, !torch.int) -> !torch.list<int>
%false_274 = torch.constant.bool false
%int1_275 = torch.constant.int 1
%1141 = torch.aten.convolution %1112, %1124, %1136, %1139, %1137, %1138, %false_274, %1140, %int1_275 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[120,40,1,1],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,28,28],f32>
%1142 = torch.aten.relu %1141 : !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%1143 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1144 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_276 = torch.constant.int 12
%1145 = torch.aten.item %1143 : !torch.vtensor<[],f32> -> !torch.float
%1146 = torch.aten.item %1144 : !torch.vtensor<[],si8> -> !torch.int
%1147 = torch.aten.quantize_per_tensor %1142, %1145, %1146, %int12_276 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1148 = torch.aten.int_repr %1147 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%1149 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1150 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1151 = torch.aten.item %1149 : !torch.vtensor<[],f32> -> !torch.float
%1152 = torch.aten.item %1150 : !torch.vtensor<[],si8> -> !torch.int
%1153 = torch.aten._make_per_tensor_quantized_tensor %1148, %1151, %1152 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1154 = torch.aten.dequantize.self %1153 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%1155 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1156 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_277 = torch.constant.int 12
%1157 = torch.aten.item %1155 : !torch.vtensor<[],f32> -> !torch.float
%1158 = torch.aten.item %1156 : !torch.vtensor<[],si8> -> !torch.int
%1159 = torch.aten.quantize_per_tensor %40, %1157, %1158, %int12_277 : !torch.vtensor<[120,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,1,5,5],!torch.qint8>
%1160 = torch.aten.int_repr %1159 : !torch.vtensor<[120,1,5,5],!torch.qint8> -> !torch.vtensor<[120,1,5,5],si8>
%1161 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1162 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1163 = torch.aten.item %1161 : !torch.vtensor<[],f32> -> !torch.float
%1164 = torch.aten.item %1162 : !torch.vtensor<[],si8> -> !torch.int
%1165 = torch.aten._make_per_tensor_quantized_tensor %1160, %1163, %1164 : !torch.vtensor<[120,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,1,5,5],!torch.qint8>
%1166 = torch.aten.dequantize.self %1165 : !torch.vtensor<[120,1,5,5],!torch.qint8> -> !torch.vtensor<[120,1,5,5],f32>
%1167 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1168 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_278 = torch.constant.int 12
%1169 = torch.aten.item %1167 : !torch.vtensor<[],f32> -> !torch.float
%1170 = torch.aten.item %1168 : !torch.vtensor<[],si8> -> !torch.int
%1171 = torch.aten.quantize_per_tensor %41, %1169, %1170, %int12_278 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1172 = torch.aten.int_repr %1171 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%1173 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1175 = torch.aten.item %1173 : !torch.vtensor<[],f32> -> !torch.float
%1176 = torch.aten.item %1174 : !torch.vtensor<[],si8> -> !torch.int
%1177 = torch.aten._make_per_tensor_quantized_tensor %1172, %1175, %1176 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1178 = torch.aten.dequantize.self %1177 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int2_279 = torch.constant.int 2
%int2_280 = torch.constant.int 2
%int1_281 = torch.constant.int 1
%int1_282 = torch.constant.int 1
%int1_283 = torch.constant.int 1
%int1_284 = torch.constant.int 1
%int0_285 = torch.constant.int 0
%1179 = torch.prim.ListConstruct %int2_279, %int2_280 : (!torch.int, !torch.int) -> !torch.list<int>
%1180 = torch.prim.ListConstruct %int1_281, %int1_282 : (!torch.int, !torch.int) -> !torch.list<int>
%1181 = torch.prim.ListConstruct %int1_283, %int1_284 : (!torch.int, !torch.int) -> !torch.list<int>
%1182 = torch.prim.ListConstruct %int0_285, %int0_285 : (!torch.int, !torch.int) -> !torch.list<int>
%false_286 = torch.constant.bool false
%int120_287 = torch.constant.int 120
%1183 = torch.aten.convolution %1154, %1166, %1178, %1181, %1179, %1180, %false_286, %1182, %int120_287 : !torch.vtensor<[1,120,28,28],f32>, !torch.vtensor<[120,1,5,5],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,28,28],f32>
%1184 = torch.aten.relu %1183 : !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%1185 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1186 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_288 = torch.constant.int 12
%1187 = torch.aten.item %1185 : !torch.vtensor<[],f32> -> !torch.float
%1188 = torch.aten.item %1186 : !torch.vtensor<[],si8> -> !torch.int
%1189 = torch.aten.quantize_per_tensor %1184, %1187, %1188, %int12_288 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1190 = torch.aten.int_repr %1189 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%1191 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1192 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1193 = torch.aten.item %1191 : !torch.vtensor<[],f32> -> !torch.float
%1194 = torch.aten.item %1192 : !torch.vtensor<[],si8> -> !torch.int
%1195 = torch.aten._make_per_tensor_quantized_tensor %1190, %1193, %1194 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1196 = torch.aten.dequantize.self %1195 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%int0_289 = torch.constant.int 0
%int1_290 = torch.constant.int 1
%int28_291 = torch.constant.int 28
%int28_292 = torch.constant.int 28
%1197 = torch.prim.ListConstruct %int28_291, %int28_292 : (!torch.int, !torch.int) -> !torch.list<int>
%1198 = torch.prim.ListConstruct %int0_289, %int0_289 : (!torch.int, !torch.int) -> !torch.list<int>
%1199 = torch.prim.ListConstruct %int1_290, %int1_290 : (!torch.int, !torch.int) -> !torch.list<int>
%false_293 = torch.constant.bool false
%none_294 = torch.constant.none
%1200 = torch.aten.avg_pool2d %1196, %1197, %1199, %1198, %false_293, %false_293, %none_294 : !torch.vtensor<[1,120,28,28],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,120,1,1],f32>
%1201 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%1202 = torch.aten.mul.Tensor %1200, %1201 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1203 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1204 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_295 = torch.constant.int 12
%1205 = torch.aten.item %1203 : !torch.vtensor<[],f32> -> !torch.float
%1206 = torch.aten.item %1204 : !torch.vtensor<[],si8> -> !torch.int
%1207 = torch.aten.quantize_per_tensor %1202, %1205, %1206, %int12_295 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1208 = torch.aten.int_repr %1207 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%1209 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1210 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1211 = torch.aten.item %1209 : !torch.vtensor<[],f32> -> !torch.float
%1212 = torch.aten.item %1210 : !torch.vtensor<[],si8> -> !torch.int
%1213 = torch.aten._make_per_tensor_quantized_tensor %1208, %1211, %1212 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1214 = torch.aten.dequantize.self %1213 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%1215 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1216 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_296 = torch.constant.int 12
%1217 = torch.aten.item %1215 : !torch.vtensor<[],f32> -> !torch.float
%1218 = torch.aten.item %1216 : !torch.vtensor<[],si8> -> !torch.int
%1219 = torch.aten.quantize_per_tensor %42, %1217, %1218, %int12_296 : !torch.vtensor<[32,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,120,1,1],!torch.qint8>
%1220 = torch.aten.int_repr %1219 : !torch.vtensor<[32,120,1,1],!torch.qint8> -> !torch.vtensor<[32,120,1,1],si8>
%1221 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1222 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1223 = torch.aten.item %1221 : !torch.vtensor<[],f32> -> !torch.float
%1224 = torch.aten.item %1222 : !torch.vtensor<[],si8> -> !torch.int
%1225 = torch.aten._make_per_tensor_quantized_tensor %1220, %1223, %1224 : !torch.vtensor<[32,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,120,1,1],!torch.qint8>
%1226 = torch.aten.dequantize.self %1225 : !torch.vtensor<[32,120,1,1],!torch.qint8> -> !torch.vtensor<[32,120,1,1],f32>
%1227 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1228 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_297 = torch.constant.int 12
%1229 = torch.aten.item %1227 : !torch.vtensor<[],f32> -> !torch.float
%1230 = torch.aten.item %1228 : !torch.vtensor<[],si8> -> !torch.int
%1231 = torch.aten.quantize_per_tensor %43, %1229, %1230, %int12_297 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1232 = torch.aten.int_repr %1231 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8>
%1233 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%1234 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1235 = torch.aten.item %1233 : !torch.vtensor<[],f32> -> !torch.float
%1236 = torch.aten.item %1234 : !torch.vtensor<[],si8> -> !torch.int
%1237 = torch.aten._make_per_tensor_quantized_tensor %1232, %1235, %1236 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8>
%1238 = torch.aten.dequantize.self %1237 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32>
%int0_298 = torch.constant.int 0
%int0_299 = torch.constant.int 0
%int1_300 = torch.constant.int 1
%int1_301 = torch.constant.int 1
%int1_302 = torch.constant.int 1
%int1_303 = torch.constant.int 1
%int0_304 = torch.constant.int 0
%1239 = torch.prim.ListConstruct %int0_298, %int0_299 : (!torch.int, !torch.int) -> !torch.list<int>
%1240 = torch.prim.ListConstruct %int1_300, %int1_301 : (!torch.int, !torch.int) -> !torch.list<int>
%1241 = torch.prim.ListConstruct %int1_302, %int1_303 : (!torch.int, !torch.int) -> !torch.list<int>
%1242 = torch.prim.ListConstruct %int0_304, %int0_304 : (!torch.int, !torch.int) -> !torch.list<int>
%false_305 = torch.constant.bool false
%int1_306 = torch.constant.int 1
%1243 = torch.aten.convolution %1214, %1226, %1238, %1241, %1239, %1240, %false_305, %1242, %int1_306 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[32,120,1,1],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,1,1],f32>
%1244 = torch.aten.relu %1243 : !torch.vtensor<[1,32,1,1],f32> -> !torch.vtensor<[1,32,1,1],f32>
%1245 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1246 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_307 = torch.constant.int 12
%1247 = torch.aten.item %1245 : !torch.vtensor<[],f32> -> !torch.float
%1248 = torch.aten.item %1246 : !torch.vtensor<[],si8> -> !torch.int
%1249 = torch.aten.quantize_per_tensor %1244, %1247, %1248, %int12_307 : !torch.vtensor<[1,32,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%1250 = torch.aten.int_repr %1249 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],si8>
%1251 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1252 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1253 = torch.aten.item %1251 : !torch.vtensor<[],f32> -> !torch.float
%1254 = torch.aten.item %1252 : !torch.vtensor<[],si8> -> !torch.int
%1255 = torch.aten._make_per_tensor_quantized_tensor %1250, %1253, %1254 : !torch.vtensor<[1,32,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,32,1,1],!torch.qint8>
%1256 = torch.aten.dequantize.self %1255 : !torch.vtensor<[1,32,1,1],!torch.qint8> -> !torch.vtensor<[1,32,1,1],f32>
%1257 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1258 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_308 = torch.constant.int 12
%1259 = torch.aten.item %1257 : !torch.vtensor<[],f32> -> !torch.float
%1260 = torch.aten.item %1258 : !torch.vtensor<[],si8> -> !torch.int
%1261 = torch.aten.quantize_per_tensor %44, %1259, %1260, %int12_308 : !torch.vtensor<[120,32,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,32,1,1],!torch.qint8>
%1262 = torch.aten.int_repr %1261 : !torch.vtensor<[120,32,1,1],!torch.qint8> -> !torch.vtensor<[120,32,1,1],si8>
%1263 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1264 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1265 = torch.aten.item %1263 : !torch.vtensor<[],f32> -> !torch.float
%1266 = torch.aten.item %1264 : !torch.vtensor<[],si8> -> !torch.int
%1267 = torch.aten._make_per_tensor_quantized_tensor %1262, %1265, %1266 : !torch.vtensor<[120,32,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,32,1,1],!torch.qint8>
%1268 = torch.aten.dequantize.self %1267 : !torch.vtensor<[120,32,1,1],!torch.qint8> -> !torch.vtensor<[120,32,1,1],f32>
%1269 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1270 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_309 = torch.constant.int 12
%1271 = torch.aten.item %1269 : !torch.vtensor<[],f32> -> !torch.float
%1272 = torch.aten.item %1270 : !torch.vtensor<[],si8> -> !torch.int
%1273 = torch.aten.quantize_per_tensor %45, %1271, %1272, %int12_309 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1274 = torch.aten.int_repr %1273 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%1275 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1276 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1277 = torch.aten.item %1275 : !torch.vtensor<[],f32> -> !torch.float
%1278 = torch.aten.item %1276 : !torch.vtensor<[],si8> -> !torch.int
%1279 = torch.aten._make_per_tensor_quantized_tensor %1274, %1277, %1278 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%1280 = torch.aten.dequantize.self %1279 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int0_310 = torch.constant.int 0
%int0_311 = torch.constant.int 0
%int1_312 = torch.constant.int 1
%int1_313 = torch.constant.int 1
%int1_314 = torch.constant.int 1
%int1_315 = torch.constant.int 1
%int0_316 = torch.constant.int 0
%1281 = torch.prim.ListConstruct %int0_310, %int0_311 : (!torch.int, !torch.int) -> !torch.list<int>
%1282 = torch.prim.ListConstruct %int1_312, %int1_313 : (!torch.int, !torch.int) -> !torch.list<int>
%1283 = torch.prim.ListConstruct %int1_314, %int1_315 : (!torch.int, !torch.int) -> !torch.list<int>
%1284 = torch.prim.ListConstruct %int0_316, %int0_316 : (!torch.int, !torch.int) -> !torch.list<int>
%false_317 = torch.constant.bool false
%int1_318 = torch.constant.int 1
%1285 = torch.aten.convolution %1256, %1268, %1280, %1283, %1281, %1282, %false_317, %1284, %int1_318 : !torch.vtensor<[1,32,1,1],f32>, !torch.vtensor<[120,32,1,1],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,1,1],f32>
%1286 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1287 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_319 = torch.constant.int 12
%1288 = torch.aten.item %1286 : !torch.vtensor<[],f32> -> !torch.float
%1289 = torch.aten.item %1287 : !torch.vtensor<[],si8> -> !torch.int
%1290 = torch.aten.quantize_per_tensor %1285, %1288, %1289, %int12_319 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1291 = torch.aten.int_repr %1290 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%1292 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1294 = torch.aten.item %1292 : !torch.vtensor<[],f32> -> !torch.float
%1295 = torch.aten.item %1293 : !torch.vtensor<[],si8> -> !torch.int
%1296 = torch.aten._make_per_tensor_quantized_tensor %1291, %1294, %1295 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1297 = torch.aten.dequantize.self %1296 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%1298 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_320 = torch.constant.int 1
%1299 = torch.aten.add.Tensor %1297, %1298, %int1_320 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,120,1,1],f32>
%1300 = torch.aten.relu %1299 : !torch.vtensor<[1,120,1,1],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1301 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1302 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_321 = torch.constant.int 6
%none_322 = torch.constant.none
%false_323 = torch.constant.bool false
%1303 = torch.aten.to.dtype %1301, %int6_321, %false_323, %false_323, %none_322 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_324 = torch.constant.int 6
%none_325 = torch.constant.none
%false_326 = torch.constant.bool false
%1304 = torch.aten.to.dtype %1302, %int6_324, %false_326, %false_326, %none_325 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1305 = torch.aten.clamp.Tensor %1300, %1303, %1304 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1306 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1307 = torch.aten.mul.Tensor %1305, %1306 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,120,1,1],f32>
%1308 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1309 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_327 = torch.constant.int 12
%1310 = torch.aten.item %1308 : !torch.vtensor<[],f32> -> !torch.float
%1311 = torch.aten.item %1309 : !torch.vtensor<[],si8> -> !torch.int
%1312 = torch.aten.quantize_per_tensor %1307, %1310, %1311, %int12_327 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1313 = torch.aten.int_repr %1312 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%1314 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1315 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1316 = torch.aten.item %1314 : !torch.vtensor<[],f32> -> !torch.float
%1317 = torch.aten.item %1315 : !torch.vtensor<[],si8> -> !torch.int
%1318 = torch.aten._make_per_tensor_quantized_tensor %1313, %1316, %1317 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%1319 = torch.aten.dequantize.self %1318 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%1320 = torch.aten.mul.Tensor %1319, %1196 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[1,120,28,28],f32> -> !torch.vtensor<[1,120,28,28],f32>
%1321 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1322 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_328 = torch.constant.int 12
%1323 = torch.aten.item %1321 : !torch.vtensor<[],f32> -> !torch.float
%1324 = torch.aten.item %1322 : !torch.vtensor<[],si8> -> !torch.int
%1325 = torch.aten.quantize_per_tensor %1320, %1323, %1324, %int12_328 : !torch.vtensor<[1,120,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1326 = torch.aten.int_repr %1325 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],si8>
%1327 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1328 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1329 = torch.aten.item %1327 : !torch.vtensor<[],f32> -> !torch.float
%1330 = torch.aten.item %1328 : !torch.vtensor<[],si8> -> !torch.int
%1331 = torch.aten._make_per_tensor_quantized_tensor %1326, %1329, %1330 : !torch.vtensor<[1,120,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,28,28],!torch.qint8>
%1332 = torch.aten.dequantize.self %1331 : !torch.vtensor<[1,120,28,28],!torch.qint8> -> !torch.vtensor<[1,120,28,28],f32>
%1333 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1334 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_329 = torch.constant.int 12
%1335 = torch.aten.item %1333 : !torch.vtensor<[],f32> -> !torch.float
%1336 = torch.aten.item %1334 : !torch.vtensor<[],si8> -> !torch.int
%1337 = torch.aten.quantize_per_tensor %46, %1335, %1336, %int12_329 : !torch.vtensor<[40,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40,120,1,1],!torch.qint8>
%1338 = torch.aten.int_repr %1337 : !torch.vtensor<[40,120,1,1],!torch.qint8> -> !torch.vtensor<[40,120,1,1],si8>
%1339 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1340 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1341 = torch.aten.item %1339 : !torch.vtensor<[],f32> -> !torch.float
%1342 = torch.aten.item %1340 : !torch.vtensor<[],si8> -> !torch.int
%1343 = torch.aten._make_per_tensor_quantized_tensor %1338, %1341, %1342 : !torch.vtensor<[40,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[40,120,1,1],!torch.qint8>
%1344 = torch.aten.dequantize.self %1343 : !torch.vtensor<[40,120,1,1],!torch.qint8> -> !torch.vtensor<[40,120,1,1],f32>
%1345 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1346 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_330 = torch.constant.int 12
%1347 = torch.aten.item %1345 : !torch.vtensor<[],f32> -> !torch.float
%1348 = torch.aten.item %1346 : !torch.vtensor<[],si8> -> !torch.int
%1349 = torch.aten.quantize_per_tensor %47, %1347, %1348, %int12_330 : !torch.vtensor<[40],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%1350 = torch.aten.int_repr %1349 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],si8>
%1351 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1352 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1353 = torch.aten.item %1351 : !torch.vtensor<[],f32> -> !torch.float
%1354 = torch.aten.item %1352 : !torch.vtensor<[],si8> -> !torch.int
%1355 = torch.aten._make_per_tensor_quantized_tensor %1350, %1353, %1354 : !torch.vtensor<[40],si8>, !torch.float, !torch.int -> !torch.vtensor<[40],!torch.qint8>
%1356 = torch.aten.dequantize.self %1355 : !torch.vtensor<[40],!torch.qint8> -> !torch.vtensor<[40],f32>
%int0_331 = torch.constant.int 0
%int0_332 = torch.constant.int 0
%int1_333 = torch.constant.int 1
%int1_334 = torch.constant.int 1
%int1_335 = torch.constant.int 1
%int1_336 = torch.constant.int 1
%int0_337 = torch.constant.int 0
%1357 = torch.prim.ListConstruct %int0_331, %int0_332 : (!torch.int, !torch.int) -> !torch.list<int>
%1358 = torch.prim.ListConstruct %int1_333, %int1_334 : (!torch.int, !torch.int) -> !torch.list<int>
%1359 = torch.prim.ListConstruct %int1_335, %int1_336 : (!torch.int, !torch.int) -> !torch.list<int>
%1360 = torch.prim.ListConstruct %int0_337, %int0_337 : (!torch.int, !torch.int) -> !torch.list<int>
%false_338 = torch.constant.bool false
%int1_339 = torch.constant.int 1
%1361 = torch.aten.convolution %1332, %1344, %1356, %1359, %1357, %1358, %false_338, %1360, %int1_339 : !torch.vtensor<[1,120,28,28],f32>, !torch.vtensor<[40,120,1,1],f32>, !torch.vtensor<[40],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,40,28,28],f32>
%1362 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1363 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_340 = torch.constant.int 12
%1364 = torch.aten.item %1362 : !torch.vtensor<[],f32> -> !torch.float
%1365 = torch.aten.item %1363 : !torch.vtensor<[],si8> -> !torch.int
%1366 = torch.aten.quantize_per_tensor %1361, %1364, %1365, %int12_340 : !torch.vtensor<[1,40,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1367 = torch.aten.int_repr %1366 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],si8>
%1368 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1369 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1370 = torch.aten.item %1368 : !torch.vtensor<[],f32> -> !torch.float
%1371 = torch.aten.item %1369 : !torch.vtensor<[],si8> -> !torch.int
%1372 = torch.aten._make_per_tensor_quantized_tensor %1367, %1370, %1371 : !torch.vtensor<[1,40,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1373 = torch.aten.dequantize.self %1372 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],f32>
%int1_341 = torch.constant.int 1
%1374 = torch.aten.add.Tensor %1373, %1112, %int1_341 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[1,40,28,28],f32>, !torch.int -> !torch.vtensor<[1,40,28,28],f32>
%1375 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1376 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_342 = torch.constant.int 12
%1377 = torch.aten.item %1375 : !torch.vtensor<[],f32> -> !torch.float
%1378 = torch.aten.item %1376 : !torch.vtensor<[],si8> -> !torch.int
%1379 = torch.aten.quantize_per_tensor %1374, %1377, %1378, %int12_342 : !torch.vtensor<[1,40,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1380 = torch.aten.int_repr %1379 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],si8>
%1381 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1382 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1383 = torch.aten.item %1381 : !torch.vtensor<[],f32> -> !torch.float
%1384 = torch.aten.item %1382 : !torch.vtensor<[],si8> -> !torch.int
%1385 = torch.aten._make_per_tensor_quantized_tensor %1380, %1383, %1384 : !torch.vtensor<[1,40,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,40,28,28],!torch.qint8>
%1386 = torch.aten.dequantize.self %1385 : !torch.vtensor<[1,40,28,28],!torch.qint8> -> !torch.vtensor<[1,40,28,28],f32>
%1387 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1388 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_343 = torch.constant.int 12
%1389 = torch.aten.item %1387 : !torch.vtensor<[],f32> -> !torch.float
%1390 = torch.aten.item %1388 : !torch.vtensor<[],si8> -> !torch.int
%1391 = torch.aten.quantize_per_tensor %48, %1389, %1390, %int12_343 : !torch.vtensor<[240,40,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240,40,1,1],!torch.qint8>
%1392 = torch.aten.int_repr %1391 : !torch.vtensor<[240,40,1,1],!torch.qint8> -> !torch.vtensor<[240,40,1,1],si8>
%1393 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1394 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1395 = torch.aten.item %1393 : !torch.vtensor<[],f32> -> !torch.float
%1396 = torch.aten.item %1394 : !torch.vtensor<[],si8> -> !torch.int
%1397 = torch.aten._make_per_tensor_quantized_tensor %1392, %1395, %1396 : !torch.vtensor<[240,40,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[240,40,1,1],!torch.qint8>
%1398 = torch.aten.dequantize.self %1397 : !torch.vtensor<[240,40,1,1],!torch.qint8> -> !torch.vtensor<[240,40,1,1],f32>
%1399 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1400 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_344 = torch.constant.int 12
%1401 = torch.aten.item %1399 : !torch.vtensor<[],f32> -> !torch.float
%1402 = torch.aten.item %1400 : !torch.vtensor<[],si8> -> !torch.int
%1403 = torch.aten.quantize_per_tensor %49, %1401, %1402, %int12_344 : !torch.vtensor<[240],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%1404 = torch.aten.int_repr %1403 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],si8>
%1405 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1406 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1407 = torch.aten.item %1405 : !torch.vtensor<[],f32> -> !torch.float
%1408 = torch.aten.item %1406 : !torch.vtensor<[],si8> -> !torch.int
%1409 = torch.aten._make_per_tensor_quantized_tensor %1404, %1407, %1408 : !torch.vtensor<[240],si8>, !torch.float, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%1410 = torch.aten.dequantize.self %1409 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],f32>
%int0_345 = torch.constant.int 0
%int0_346 = torch.constant.int 0
%int1_347 = torch.constant.int 1
%int1_348 = torch.constant.int 1
%int1_349 = torch.constant.int 1
%int1_350 = torch.constant.int 1
%int0_351 = torch.constant.int 0
%1411 = torch.prim.ListConstruct %int0_345, %int0_346 : (!torch.int, !torch.int) -> !torch.list<int>
%1412 = torch.prim.ListConstruct %int1_347, %int1_348 : (!torch.int, !torch.int) -> !torch.list<int>
%1413 = torch.prim.ListConstruct %int1_349, %int1_350 : (!torch.int, !torch.int) -> !torch.list<int>
%1414 = torch.prim.ListConstruct %int0_351, %int0_351 : (!torch.int, !torch.int) -> !torch.list<int>
%false_352 = torch.constant.bool false
%int1_353 = torch.constant.int 1
%1415 = torch.aten.convolution %1386, %1398, %1410, %1413, %1411, %1412, %false_352, %1414, %int1_353 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[240,40,1,1],f32>, !torch.vtensor<[240],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,240,28,28],f32>
%1416 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_354 = torch.constant.int 12
%1418 = torch.aten.item %1416 : !torch.vtensor<[],f32> -> !torch.float
%1419 = torch.aten.item %1417 : !torch.vtensor<[],si8> -> !torch.int
%1420 = torch.aten.quantize_per_tensor %1415, %1418, %1419, %int12_354 : !torch.vtensor<[1,240,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1421 = torch.aten.int_repr %1420 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],si8>
%1422 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1424 = torch.aten.item %1422 : !torch.vtensor<[],f32> -> !torch.float
%1425 = torch.aten.item %1423 : !torch.vtensor<[],si8> -> !torch.int
%1426 = torch.aten._make_per_tensor_quantized_tensor %1421, %1424, %1425 : !torch.vtensor<[1,240,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1427 = torch.aten.dequantize.self %1426 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],f32>
%1428 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_355 = torch.constant.int 1
%1429 = torch.aten.add.Tensor %1427, %1428, %int1_355 : !torch.vtensor<[1,240,28,28],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,240,28,28],f32>
%1430 = torch.aten.relu %1429 : !torch.vtensor<[1,240,28,28],f32> -> !torch.vtensor<[1,240,28,28],f32>
%1431 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1432 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_356 = torch.constant.int 6
%none_357 = torch.constant.none
%false_358 = torch.constant.bool false
%1433 = torch.aten.to.dtype %1431, %int6_356, %false_358, %false_358, %none_357 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_359 = torch.constant.int 6
%none_360 = torch.constant.none
%false_361 = torch.constant.bool false
%1434 = torch.aten.to.dtype %1432, %int6_359, %false_361, %false_361, %none_360 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1435 = torch.aten.clamp.Tensor %1430, %1433, %1434 : !torch.vtensor<[1,240,28,28],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,240,28,28],f32>
%1436 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1437 = torch.aten.mul.Tensor %1435, %1436 : !torch.vtensor<[1,240,28,28],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,240,28,28],f32>
%1438 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1439 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_362 = torch.constant.int 12
%1440 = torch.aten.item %1438 : !torch.vtensor<[],f32> -> !torch.float
%1441 = torch.aten.item %1439 : !torch.vtensor<[],si8> -> !torch.int
%1442 = torch.aten.quantize_per_tensor %1437, %1440, %1441, %int12_362 : !torch.vtensor<[1,240,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1443 = torch.aten.int_repr %1442 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],si8>
%1444 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1445 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1446 = torch.aten.item %1444 : !torch.vtensor<[],f32> -> !torch.float
%1447 = torch.aten.item %1445 : !torch.vtensor<[],si8> -> !torch.int
%1448 = torch.aten._make_per_tensor_quantized_tensor %1443, %1446, %1447 : !torch.vtensor<[1,240,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1449 = torch.aten.dequantize.self %1448 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],f32>
%1450 = torch.aten.mul.Tensor %1427, %1449 : !torch.vtensor<[1,240,28,28],f32>, !torch.vtensor<[1,240,28,28],f32> -> !torch.vtensor<[1,240,28,28],f32>
%1451 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1452 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_363 = torch.constant.int 12
%1453 = torch.aten.item %1451 : !torch.vtensor<[],f32> -> !torch.float
%1454 = torch.aten.item %1452 : !torch.vtensor<[],si8> -> !torch.int
%1455 = torch.aten.quantize_per_tensor %1450, %1453, %1454, %int12_363 : !torch.vtensor<[1,240,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1456 = torch.aten.int_repr %1455 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],si8>
%1457 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1458 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1459 = torch.aten.item %1457 : !torch.vtensor<[],f32> -> !torch.float
%1460 = torch.aten.item %1458 : !torch.vtensor<[],si8> -> !torch.int
%1461 = torch.aten._make_per_tensor_quantized_tensor %1456, %1459, %1460 : !torch.vtensor<[1,240,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,28,28],!torch.qint8>
%1462 = torch.aten.dequantize.self %1461 : !torch.vtensor<[1,240,28,28],!torch.qint8> -> !torch.vtensor<[1,240,28,28],f32>
%1463 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1464 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_364 = torch.constant.int 12
%1465 = torch.aten.item %1463 : !torch.vtensor<[],f32> -> !torch.float
%1466 = torch.aten.item %1464 : !torch.vtensor<[],si8> -> !torch.int
%1467 = torch.aten.quantize_per_tensor %50, %1465, %1466, %int12_364 : !torch.vtensor<[240,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240,1,3,3],!torch.qint8>
%1468 = torch.aten.int_repr %1467 : !torch.vtensor<[240,1,3,3],!torch.qint8> -> !torch.vtensor<[240,1,3,3],si8>
%1469 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1470 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1471 = torch.aten.item %1469 : !torch.vtensor<[],f32> -> !torch.float
%1472 = torch.aten.item %1470 : !torch.vtensor<[],si8> -> !torch.int
%1473 = torch.aten._make_per_tensor_quantized_tensor %1468, %1471, %1472 : !torch.vtensor<[240,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[240,1,3,3],!torch.qint8>
%1474 = torch.aten.dequantize.self %1473 : !torch.vtensor<[240,1,3,3],!torch.qint8> -> !torch.vtensor<[240,1,3,3],f32>
%1475 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1476 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_365 = torch.constant.int 12
%1477 = torch.aten.item %1475 : !torch.vtensor<[],f32> -> !torch.float
%1478 = torch.aten.item %1476 : !torch.vtensor<[],si8> -> !torch.int
%1479 = torch.aten.quantize_per_tensor %51, %1477, %1478, %int12_365 : !torch.vtensor<[240],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%1480 = torch.aten.int_repr %1479 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],si8>
%1481 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1482 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1483 = torch.aten.item %1481 : !torch.vtensor<[],f32> -> !torch.float
%1484 = torch.aten.item %1482 : !torch.vtensor<[],si8> -> !torch.int
%1485 = torch.aten._make_per_tensor_quantized_tensor %1480, %1483, %1484 : !torch.vtensor<[240],si8>, !torch.float, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%1486 = torch.aten.dequantize.self %1485 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],f32>
%int1_366 = torch.constant.int 1
%int1_367 = torch.constant.int 1
%int1_368 = torch.constant.int 1
%int1_369 = torch.constant.int 1
%int2_370 = torch.constant.int 2
%int2_371 = torch.constant.int 2
%int0_372 = torch.constant.int 0
%1487 = torch.prim.ListConstruct %int1_366, %int1_367 : (!torch.int, !torch.int) -> !torch.list<int>
%1488 = torch.prim.ListConstruct %int1_368, %int1_369 : (!torch.int, !torch.int) -> !torch.list<int>
%1489 = torch.prim.ListConstruct %int2_370, %int2_371 : (!torch.int, !torch.int) -> !torch.list<int>
%1490 = torch.prim.ListConstruct %int0_372, %int0_372 : (!torch.int, !torch.int) -> !torch.list<int>
%false_373 = torch.constant.bool false
%int240 = torch.constant.int 240
%1491 = torch.aten.convolution %1462, %1474, %1486, %1489, %1487, %1488, %false_373, %1490, %int240 : !torch.vtensor<[1,240,28,28],f32>, !torch.vtensor<[240,1,3,3],f32>, !torch.vtensor<[240],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,240,14,14],f32>
%1492 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_374 = torch.constant.int 12
%1494 = torch.aten.item %1492 : !torch.vtensor<[],f32> -> !torch.float
%1495 = torch.aten.item %1493 : !torch.vtensor<[],si8> -> !torch.int
%1496 = torch.aten.quantize_per_tensor %1491, %1494, %1495, %int12_374 : !torch.vtensor<[1,240,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1497 = torch.aten.int_repr %1496 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],si8>
%1498 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1500 = torch.aten.item %1498 : !torch.vtensor<[],f32> -> !torch.float
%1501 = torch.aten.item %1499 : !torch.vtensor<[],si8> -> !torch.int
%1502 = torch.aten._make_per_tensor_quantized_tensor %1497, %1500, %1501 : !torch.vtensor<[1,240,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1503 = torch.aten.dequantize.self %1502 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],f32>
%1504 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_375 = torch.constant.int 1
%1505 = torch.aten.add.Tensor %1503, %1504, %int1_375 : !torch.vtensor<[1,240,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,240,14,14],f32>
%1506 = torch.aten.relu %1505 : !torch.vtensor<[1,240,14,14],f32> -> !torch.vtensor<[1,240,14,14],f32>
%1507 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1508 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_376 = torch.constant.int 6
%none_377 = torch.constant.none
%false_378 = torch.constant.bool false
%1509 = torch.aten.to.dtype %1507, %int6_376, %false_378, %false_378, %none_377 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_379 = torch.constant.int 6
%none_380 = torch.constant.none
%false_381 = torch.constant.bool false
%1510 = torch.aten.to.dtype %1508, %int6_379, %false_381, %false_381, %none_380 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1511 = torch.aten.clamp.Tensor %1506, %1509, %1510 : !torch.vtensor<[1,240,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,240,14,14],f32>
%1512 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1513 = torch.aten.mul.Tensor %1511, %1512 : !torch.vtensor<[1,240,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,240,14,14],f32>
%1514 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1515 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_382 = torch.constant.int 12
%1516 = torch.aten.item %1514 : !torch.vtensor<[],f32> -> !torch.float
%1517 = torch.aten.item %1515 : !torch.vtensor<[],si8> -> !torch.int
%1518 = torch.aten.quantize_per_tensor %1513, %1516, %1517, %int12_382 : !torch.vtensor<[1,240,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1519 = torch.aten.int_repr %1518 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],si8>
%1520 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1521 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1522 = torch.aten.item %1520 : !torch.vtensor<[],f32> -> !torch.float
%1523 = torch.aten.item %1521 : !torch.vtensor<[],si8> -> !torch.int
%1524 = torch.aten._make_per_tensor_quantized_tensor %1519, %1522, %1523 : !torch.vtensor<[1,240,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1525 = torch.aten.dequantize.self %1524 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],f32>
%1526 = torch.aten.mul.Tensor %1503, %1525 : !torch.vtensor<[1,240,14,14],f32>, !torch.vtensor<[1,240,14,14],f32> -> !torch.vtensor<[1,240,14,14],f32>
%1527 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1528 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_383 = torch.constant.int 12
%1529 = torch.aten.item %1527 : !torch.vtensor<[],f32> -> !torch.float
%1530 = torch.aten.item %1528 : !torch.vtensor<[],si8> -> !torch.int
%1531 = torch.aten.quantize_per_tensor %1526, %1529, %1530, %int12_383 : !torch.vtensor<[1,240,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1532 = torch.aten.int_repr %1531 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],si8>
%1533 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1534 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1535 = torch.aten.item %1533 : !torch.vtensor<[],f32> -> !torch.float
%1536 = torch.aten.item %1534 : !torch.vtensor<[],si8> -> !torch.int
%1537 = torch.aten._make_per_tensor_quantized_tensor %1532, %1535, %1536 : !torch.vtensor<[1,240,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,14,14],!torch.qint8>
%1538 = torch.aten.dequantize.self %1537 : !torch.vtensor<[1,240,14,14],!torch.qint8> -> !torch.vtensor<[1,240,14,14],f32>
%1539 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1540 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_384 = torch.constant.int 12
%1541 = torch.aten.item %1539 : !torch.vtensor<[],f32> -> !torch.float
%1542 = torch.aten.item %1540 : !torch.vtensor<[],si8> -> !torch.int
%1543 = torch.aten.quantize_per_tensor %52, %1541, %1542, %int12_384 : !torch.vtensor<[80,240,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80,240,1,1],!torch.qint8>
%1544 = torch.aten.int_repr %1543 : !torch.vtensor<[80,240,1,1],!torch.qint8> -> !torch.vtensor<[80,240,1,1],si8>
%1545 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1546 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1547 = torch.aten.item %1545 : !torch.vtensor<[],f32> -> !torch.float
%1548 = torch.aten.item %1546 : !torch.vtensor<[],si8> -> !torch.int
%1549 = torch.aten._make_per_tensor_quantized_tensor %1544, %1547, %1548 : !torch.vtensor<[80,240,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[80,240,1,1],!torch.qint8>
%1550 = torch.aten.dequantize.self %1549 : !torch.vtensor<[80,240,1,1],!torch.qint8> -> !torch.vtensor<[80,240,1,1],f32>
%1551 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1552 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_385 = torch.constant.int 12
%1553 = torch.aten.item %1551 : !torch.vtensor<[],f32> -> !torch.float
%1554 = torch.aten.item %1552 : !torch.vtensor<[],si8> -> !torch.int
%1555 = torch.aten.quantize_per_tensor %53, %1553, %1554, %int12_385 : !torch.vtensor<[80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1556 = torch.aten.int_repr %1555 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],si8>
%1557 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1558 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1559 = torch.aten.item %1557 : !torch.vtensor<[],f32> -> !torch.float
%1560 = torch.aten.item %1558 : !torch.vtensor<[],si8> -> !torch.int
%1561 = torch.aten._make_per_tensor_quantized_tensor %1556, %1559, %1560 : !torch.vtensor<[80],si8>, !torch.float, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1562 = torch.aten.dequantize.self %1561 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],f32>
%int0_386 = torch.constant.int 0
%int0_387 = torch.constant.int 0
%int1_388 = torch.constant.int 1
%int1_389 = torch.constant.int 1
%int1_390 = torch.constant.int 1
%int1_391 = torch.constant.int 1
%int0_392 = torch.constant.int 0
%1563 = torch.prim.ListConstruct %int0_386, %int0_387 : (!torch.int, !torch.int) -> !torch.list<int>
%1564 = torch.prim.ListConstruct %int1_388, %int1_389 : (!torch.int, !torch.int) -> !torch.list<int>
%1565 = torch.prim.ListConstruct %int1_390, %int1_391 : (!torch.int, !torch.int) -> !torch.list<int>
%1566 = torch.prim.ListConstruct %int0_392, %int0_392 : (!torch.int, !torch.int) -> !torch.list<int>
%false_393 = torch.constant.bool false
%int1_394 = torch.constant.int 1
%1567 = torch.aten.convolution %1538, %1550, %1562, %1565, %1563, %1564, %false_393, %1566, %int1_394 : !torch.vtensor<[1,240,14,14],f32>, !torch.vtensor<[80,240,1,1],f32>, !torch.vtensor<[80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%1568 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1569 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_395 = torch.constant.int 12
%1570 = torch.aten.item %1568 : !torch.vtensor<[],f32> -> !torch.float
%1571 = torch.aten.item %1569 : !torch.vtensor<[],si8> -> !torch.int
%1572 = torch.aten.quantize_per_tensor %1567, %1570, %1571, %int12_395 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1573 = torch.aten.int_repr %1572 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%1574 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1575 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1576 = torch.aten.item %1574 : !torch.vtensor<[],f32> -> !torch.float
%1577 = torch.aten.item %1575 : !torch.vtensor<[],si8> -> !torch.int
%1578 = torch.aten._make_per_tensor_quantized_tensor %1573, %1576, %1577 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1579 = torch.aten.dequantize.self %1578 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%1580 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1581 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_396 = torch.constant.int 12
%1582 = torch.aten.item %1580 : !torch.vtensor<[],f32> -> !torch.float
%1583 = torch.aten.item %1581 : !torch.vtensor<[],si8> -> !torch.int
%1584 = torch.aten.quantize_per_tensor %54, %1582, %1583, %int12_396 : !torch.vtensor<[200,80,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[200,80,1,1],!torch.qint8>
%1585 = torch.aten.int_repr %1584 : !torch.vtensor<[200,80,1,1],!torch.qint8> -> !torch.vtensor<[200,80,1,1],si8>
%1586 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1587 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1588 = torch.aten.item %1586 : !torch.vtensor<[],f32> -> !torch.float
%1589 = torch.aten.item %1587 : !torch.vtensor<[],si8> -> !torch.int
%1590 = torch.aten._make_per_tensor_quantized_tensor %1585, %1588, %1589 : !torch.vtensor<[200,80,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[200,80,1,1],!torch.qint8>
%1591 = torch.aten.dequantize.self %1590 : !torch.vtensor<[200,80,1,1],!torch.qint8> -> !torch.vtensor<[200,80,1,1],f32>
%1592 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1593 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_397 = torch.constant.int 12
%1594 = torch.aten.item %1592 : !torch.vtensor<[],f32> -> !torch.float
%1595 = torch.aten.item %1593 : !torch.vtensor<[],si8> -> !torch.int
%1596 = torch.aten.quantize_per_tensor %55, %1594, %1595, %int12_397 : !torch.vtensor<[200],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[200],!torch.qint8>
%1597 = torch.aten.int_repr %1596 : !torch.vtensor<[200],!torch.qint8> -> !torch.vtensor<[200],si8>
%1598 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1599 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1600 = torch.aten.item %1598 : !torch.vtensor<[],f32> -> !torch.float
%1601 = torch.aten.item %1599 : !torch.vtensor<[],si8> -> !torch.int
%1602 = torch.aten._make_per_tensor_quantized_tensor %1597, %1600, %1601 : !torch.vtensor<[200],si8>, !torch.float, !torch.int -> !torch.vtensor<[200],!torch.qint8>
%1603 = torch.aten.dequantize.self %1602 : !torch.vtensor<[200],!torch.qint8> -> !torch.vtensor<[200],f32>
%int0_398 = torch.constant.int 0
%int0_399 = torch.constant.int 0
%int1_400 = torch.constant.int 1
%int1_401 = torch.constant.int 1
%int1_402 = torch.constant.int 1
%int1_403 = torch.constant.int 1
%int0_404 = torch.constant.int 0
%1604 = torch.prim.ListConstruct %int0_398, %int0_399 : (!torch.int, !torch.int) -> !torch.list<int>
%1605 = torch.prim.ListConstruct %int1_400, %int1_401 : (!torch.int, !torch.int) -> !torch.list<int>
%1606 = torch.prim.ListConstruct %int1_402, %int1_403 : (!torch.int, !torch.int) -> !torch.list<int>
%1607 = torch.prim.ListConstruct %int0_404, %int0_404 : (!torch.int, !torch.int) -> !torch.list<int>
%false_405 = torch.constant.bool false
%int1_406 = torch.constant.int 1
%1608 = torch.aten.convolution %1579, %1591, %1603, %1606, %1604, %1605, %false_405, %1607, %int1_406 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[200,80,1,1],f32>, !torch.vtensor<[200],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,200,14,14],f32>
%1609 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1610 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_407 = torch.constant.int 12
%1611 = torch.aten.item %1609 : !torch.vtensor<[],f32> -> !torch.float
%1612 = torch.aten.item %1610 : !torch.vtensor<[],si8> -> !torch.int
%1613 = torch.aten.quantize_per_tensor %1608, %1611, %1612, %int12_407 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1614 = torch.aten.int_repr %1613 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1615 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1616 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1617 = torch.aten.item %1615 : !torch.vtensor<[],f32> -> !torch.float
%1618 = torch.aten.item %1616 : !torch.vtensor<[],si8> -> !torch.int
%1619 = torch.aten._make_per_tensor_quantized_tensor %1614, %1617, %1618 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1620 = torch.aten.dequantize.self %1619 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1621 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_408 = torch.constant.int 1
%1622 = torch.aten.add.Tensor %1620, %1621, %int1_408 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,200,14,14],f32>
%1623 = torch.aten.relu %1622 : !torch.vtensor<[1,200,14,14],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1624 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1625 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_409 = torch.constant.int 6
%none_410 = torch.constant.none
%false_411 = torch.constant.bool false
%1626 = torch.aten.to.dtype %1624, %int6_409, %false_411, %false_411, %none_410 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_412 = torch.constant.int 6
%none_413 = torch.constant.none
%false_414 = torch.constant.bool false
%1627 = torch.aten.to.dtype %1625, %int6_412, %false_414, %false_414, %none_413 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1628 = torch.aten.clamp.Tensor %1623, %1626, %1627 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1629 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1630 = torch.aten.mul.Tensor %1628, %1629 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1631 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1632 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_415 = torch.constant.int 12
%1633 = torch.aten.item %1631 : !torch.vtensor<[],f32> -> !torch.float
%1634 = torch.aten.item %1632 : !torch.vtensor<[],si8> -> !torch.int
%1635 = torch.aten.quantize_per_tensor %1630, %1633, %1634, %int12_415 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1636 = torch.aten.int_repr %1635 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1637 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1638 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1639 = torch.aten.item %1637 : !torch.vtensor<[],f32> -> !torch.float
%1640 = torch.aten.item %1638 : !torch.vtensor<[],si8> -> !torch.int
%1641 = torch.aten._make_per_tensor_quantized_tensor %1636, %1639, %1640 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1642 = torch.aten.dequantize.self %1641 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1643 = torch.aten.mul.Tensor %1620, %1642 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[1,200,14,14],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1644 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1645 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_416 = torch.constant.int 12
%1646 = torch.aten.item %1644 : !torch.vtensor<[],f32> -> !torch.float
%1647 = torch.aten.item %1645 : !torch.vtensor<[],si8> -> !torch.int
%1648 = torch.aten.quantize_per_tensor %1643, %1646, %1647, %int12_416 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1649 = torch.aten.int_repr %1648 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1650 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1651 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1652 = torch.aten.item %1650 : !torch.vtensor<[],f32> -> !torch.float
%1653 = torch.aten.item %1651 : !torch.vtensor<[],si8> -> !torch.int
%1654 = torch.aten._make_per_tensor_quantized_tensor %1649, %1652, %1653 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1655 = torch.aten.dequantize.self %1654 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1656 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1657 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_417 = torch.constant.int 12
%1658 = torch.aten.item %1656 : !torch.vtensor<[],f32> -> !torch.float
%1659 = torch.aten.item %1657 : !torch.vtensor<[],si8> -> !torch.int
%1660 = torch.aten.quantize_per_tensor %56, %1658, %1659, %int12_417 : !torch.vtensor<[200,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[200,1,3,3],!torch.qint8>
%1661 = torch.aten.int_repr %1660 : !torch.vtensor<[200,1,3,3],!torch.qint8> -> !torch.vtensor<[200,1,3,3],si8>
%1662 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1663 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1664 = torch.aten.item %1662 : !torch.vtensor<[],f32> -> !torch.float
%1665 = torch.aten.item %1663 : !torch.vtensor<[],si8> -> !torch.int
%1666 = torch.aten._make_per_tensor_quantized_tensor %1661, %1664, %1665 : !torch.vtensor<[200,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[200,1,3,3],!torch.qint8>
%1667 = torch.aten.dequantize.self %1666 : !torch.vtensor<[200,1,3,3],!torch.qint8> -> !torch.vtensor<[200,1,3,3],f32>
%1668 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1669 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_418 = torch.constant.int 12
%1670 = torch.aten.item %1668 : !torch.vtensor<[],f32> -> !torch.float
%1671 = torch.aten.item %1669 : !torch.vtensor<[],si8> -> !torch.int
%1672 = torch.aten.quantize_per_tensor %57, %1670, %1671, %int12_418 : !torch.vtensor<[200],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[200],!torch.qint8>
%1673 = torch.aten.int_repr %1672 : !torch.vtensor<[200],!torch.qint8> -> !torch.vtensor<[200],si8>
%1674 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1675 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1676 = torch.aten.item %1674 : !torch.vtensor<[],f32> -> !torch.float
%1677 = torch.aten.item %1675 : !torch.vtensor<[],si8> -> !torch.int
%1678 = torch.aten._make_per_tensor_quantized_tensor %1673, %1676, %1677 : !torch.vtensor<[200],si8>, !torch.float, !torch.int -> !torch.vtensor<[200],!torch.qint8>
%1679 = torch.aten.dequantize.self %1678 : !torch.vtensor<[200],!torch.qint8> -> !torch.vtensor<[200],f32>
%int1_419 = torch.constant.int 1
%int1_420 = torch.constant.int 1
%int1_421 = torch.constant.int 1
%int1_422 = torch.constant.int 1
%int1_423 = torch.constant.int 1
%int1_424 = torch.constant.int 1
%int0_425 = torch.constant.int 0
%1680 = torch.prim.ListConstruct %int1_419, %int1_420 : (!torch.int, !torch.int) -> !torch.list<int>
%1681 = torch.prim.ListConstruct %int1_421, %int1_422 : (!torch.int, !torch.int) -> !torch.list<int>
%1682 = torch.prim.ListConstruct %int1_423, %int1_424 : (!torch.int, !torch.int) -> !torch.list<int>
%1683 = torch.prim.ListConstruct %int0_425, %int0_425 : (!torch.int, !torch.int) -> !torch.list<int>
%false_426 = torch.constant.bool false
%int200 = torch.constant.int 200
%1684 = torch.aten.convolution %1655, %1667, %1679, %1682, %1680, %1681, %false_426, %1683, %int200 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[200,1,3,3],f32>, !torch.vtensor<[200],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,200,14,14],f32>
%1685 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1686 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_427 = torch.constant.int 12
%1687 = torch.aten.item %1685 : !torch.vtensor<[],f32> -> !torch.float
%1688 = torch.aten.item %1686 : !torch.vtensor<[],si8> -> !torch.int
%1689 = torch.aten.quantize_per_tensor %1684, %1687, %1688, %int12_427 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1690 = torch.aten.int_repr %1689 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1691 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1692 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1693 = torch.aten.item %1691 : !torch.vtensor<[],f32> -> !torch.float
%1694 = torch.aten.item %1692 : !torch.vtensor<[],si8> -> !torch.int
%1695 = torch.aten._make_per_tensor_quantized_tensor %1690, %1693, %1694 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1696 = torch.aten.dequantize.self %1695 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1697 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_428 = torch.constant.int 1
%1698 = torch.aten.add.Tensor %1696, %1697, %int1_428 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,200,14,14],f32>
%1699 = torch.aten.relu %1698 : !torch.vtensor<[1,200,14,14],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1700 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1701 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_429 = torch.constant.int 6
%none_430 = torch.constant.none
%false_431 = torch.constant.bool false
%1702 = torch.aten.to.dtype %1700, %int6_429, %false_431, %false_431, %none_430 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_432 = torch.constant.int 6
%none_433 = torch.constant.none
%false_434 = torch.constant.bool false
%1703 = torch.aten.to.dtype %1701, %int6_432, %false_434, %false_434, %none_433 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1704 = torch.aten.clamp.Tensor %1699, %1702, %1703 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1705 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1706 = torch.aten.mul.Tensor %1704, %1705 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1707 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1708 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_435 = torch.constant.int 12
%1709 = torch.aten.item %1707 : !torch.vtensor<[],f32> -> !torch.float
%1710 = torch.aten.item %1708 : !torch.vtensor<[],si8> -> !torch.int
%1711 = torch.aten.quantize_per_tensor %1706, %1709, %1710, %int12_435 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1712 = torch.aten.int_repr %1711 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1713 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1714 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1715 = torch.aten.item %1713 : !torch.vtensor<[],f32> -> !torch.float
%1716 = torch.aten.item %1714 : !torch.vtensor<[],si8> -> !torch.int
%1717 = torch.aten._make_per_tensor_quantized_tensor %1712, %1715, %1716 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1718 = torch.aten.dequantize.self %1717 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1719 = torch.aten.mul.Tensor %1696, %1718 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[1,200,14,14],f32> -> !torch.vtensor<[1,200,14,14],f32>
%1720 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1721 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_436 = torch.constant.int 12
%1722 = torch.aten.item %1720 : !torch.vtensor<[],f32> -> !torch.float
%1723 = torch.aten.item %1721 : !torch.vtensor<[],si8> -> !torch.int
%1724 = torch.aten.quantize_per_tensor %1719, %1722, %1723, %int12_436 : !torch.vtensor<[1,200,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1725 = torch.aten.int_repr %1724 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],si8>
%1726 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1727 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1728 = torch.aten.item %1726 : !torch.vtensor<[],f32> -> !torch.float
%1729 = torch.aten.item %1727 : !torch.vtensor<[],si8> -> !torch.int
%1730 = torch.aten._make_per_tensor_quantized_tensor %1725, %1728, %1729 : !torch.vtensor<[1,200,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,200,14,14],!torch.qint8>
%1731 = torch.aten.dequantize.self %1730 : !torch.vtensor<[1,200,14,14],!torch.qint8> -> !torch.vtensor<[1,200,14,14],f32>
%1732 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1733 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_437 = torch.constant.int 12
%1734 = torch.aten.item %1732 : !torch.vtensor<[],f32> -> !torch.float
%1735 = torch.aten.item %1733 : !torch.vtensor<[],si8> -> !torch.int
%1736 = torch.aten.quantize_per_tensor %58, %1734, %1735, %int12_437 : !torch.vtensor<[80,200,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80,200,1,1],!torch.qint8>
%1737 = torch.aten.int_repr %1736 : !torch.vtensor<[80,200,1,1],!torch.qint8> -> !torch.vtensor<[80,200,1,1],si8>
%1738 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1739 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1740 = torch.aten.item %1738 : !torch.vtensor<[],f32> -> !torch.float
%1741 = torch.aten.item %1739 : !torch.vtensor<[],si8> -> !torch.int
%1742 = torch.aten._make_per_tensor_quantized_tensor %1737, %1740, %1741 : !torch.vtensor<[80,200,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[80,200,1,1],!torch.qint8>
%1743 = torch.aten.dequantize.self %1742 : !torch.vtensor<[80,200,1,1],!torch.qint8> -> !torch.vtensor<[80,200,1,1],f32>
%1744 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1745 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_438 = torch.constant.int 12
%1746 = torch.aten.item %1744 : !torch.vtensor<[],f32> -> !torch.float
%1747 = torch.aten.item %1745 : !torch.vtensor<[],si8> -> !torch.int
%1748 = torch.aten.quantize_per_tensor %59, %1746, %1747, %int12_438 : !torch.vtensor<[80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1749 = torch.aten.int_repr %1748 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],si8>
%1750 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1751 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1752 = torch.aten.item %1750 : !torch.vtensor<[],f32> -> !torch.float
%1753 = torch.aten.item %1751 : !torch.vtensor<[],si8> -> !torch.int
%1754 = torch.aten._make_per_tensor_quantized_tensor %1749, %1752, %1753 : !torch.vtensor<[80],si8>, !torch.float, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1755 = torch.aten.dequantize.self %1754 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],f32>
%int0_439 = torch.constant.int 0
%int0_440 = torch.constant.int 0
%int1_441 = torch.constant.int 1
%int1_442 = torch.constant.int 1
%int1_443 = torch.constant.int 1
%int1_444 = torch.constant.int 1
%int0_445 = torch.constant.int 0
%1756 = torch.prim.ListConstruct %int0_439, %int0_440 : (!torch.int, !torch.int) -> !torch.list<int>
%1757 = torch.prim.ListConstruct %int1_441, %int1_442 : (!torch.int, !torch.int) -> !torch.list<int>
%1758 = torch.prim.ListConstruct %int1_443, %int1_444 : (!torch.int, !torch.int) -> !torch.list<int>
%1759 = torch.prim.ListConstruct %int0_445, %int0_445 : (!torch.int, !torch.int) -> !torch.list<int>
%false_446 = torch.constant.bool false
%int1_447 = torch.constant.int 1
%1760 = torch.aten.convolution %1731, %1743, %1755, %1758, %1756, %1757, %false_446, %1759, %int1_447 : !torch.vtensor<[1,200,14,14],f32>, !torch.vtensor<[80,200,1,1],f32>, !torch.vtensor<[80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%1761 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1762 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_448 = torch.constant.int 12
%1763 = torch.aten.item %1761 : !torch.vtensor<[],f32> -> !torch.float
%1764 = torch.aten.item %1762 : !torch.vtensor<[],si8> -> !torch.int
%1765 = torch.aten.quantize_per_tensor %1760, %1763, %1764, %int12_448 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1766 = torch.aten.int_repr %1765 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%1767 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1768 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1769 = torch.aten.item %1767 : !torch.vtensor<[],f32> -> !torch.float
%1770 = torch.aten.item %1768 : !torch.vtensor<[],si8> -> !torch.int
%1771 = torch.aten._make_per_tensor_quantized_tensor %1766, %1769, %1770 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1772 = torch.aten.dequantize.self %1771 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%int1_449 = torch.constant.int 1
%1773 = torch.aten.add.Tensor %1772, %1579, %int1_449 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[1,80,14,14],f32>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%1774 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1775 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_450 = torch.constant.int 12
%1776 = torch.aten.item %1774 : !torch.vtensor<[],f32> -> !torch.float
%1777 = torch.aten.item %1775 : !torch.vtensor<[],si8> -> !torch.int
%1778 = torch.aten.quantize_per_tensor %1773, %1776, %1777, %int12_450 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1779 = torch.aten.int_repr %1778 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%1780 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1781 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1782 = torch.aten.item %1780 : !torch.vtensor<[],f32> -> !torch.float
%1783 = torch.aten.item %1781 : !torch.vtensor<[],si8> -> !torch.int
%1784 = torch.aten._make_per_tensor_quantized_tensor %1779, %1782, %1783 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1785 = torch.aten.dequantize.self %1784 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%1786 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1787 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_451 = torch.constant.int 12
%1788 = torch.aten.item %1786 : !torch.vtensor<[],f32> -> !torch.float
%1789 = torch.aten.item %1787 : !torch.vtensor<[],si8> -> !torch.int
%1790 = torch.aten.quantize_per_tensor %60, %1788, %1789, %int12_451 : !torch.vtensor<[184,80,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184,80,1,1],!torch.qint8>
%1791 = torch.aten.int_repr %1790 : !torch.vtensor<[184,80,1,1],!torch.qint8> -> !torch.vtensor<[184,80,1,1],si8>
%1792 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1793 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1794 = torch.aten.item %1792 : !torch.vtensor<[],f32> -> !torch.float
%1795 = torch.aten.item %1793 : !torch.vtensor<[],si8> -> !torch.int
%1796 = torch.aten._make_per_tensor_quantized_tensor %1791, %1794, %1795 : !torch.vtensor<[184,80,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[184,80,1,1],!torch.qint8>
%1797 = torch.aten.dequantize.self %1796 : !torch.vtensor<[184,80,1,1],!torch.qint8> -> !torch.vtensor<[184,80,1,1],f32>
%1798 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1799 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_452 = torch.constant.int 12
%1800 = torch.aten.item %1798 : !torch.vtensor<[],f32> -> !torch.float
%1801 = torch.aten.item %1799 : !torch.vtensor<[],si8> -> !torch.int
%1802 = torch.aten.quantize_per_tensor %61, %1800, %1801, %int12_452 : !torch.vtensor<[184],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%1803 = torch.aten.int_repr %1802 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],si8>
%1804 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1805 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1806 = torch.aten.item %1804 : !torch.vtensor<[],f32> -> !torch.float
%1807 = torch.aten.item %1805 : !torch.vtensor<[],si8> -> !torch.int
%1808 = torch.aten._make_per_tensor_quantized_tensor %1803, %1806, %1807 : !torch.vtensor<[184],si8>, !torch.float, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%1809 = torch.aten.dequantize.self %1808 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],f32>
%int0_453 = torch.constant.int 0
%int0_454 = torch.constant.int 0
%int1_455 = torch.constant.int 1
%int1_456 = torch.constant.int 1
%int1_457 = torch.constant.int 1
%int1_458 = torch.constant.int 1
%int0_459 = torch.constant.int 0
%1810 = torch.prim.ListConstruct %int0_453, %int0_454 : (!torch.int, !torch.int) -> !torch.list<int>
%1811 = torch.prim.ListConstruct %int1_455, %int1_456 : (!torch.int, !torch.int) -> !torch.list<int>
%1812 = torch.prim.ListConstruct %int1_457, %int1_458 : (!torch.int, !torch.int) -> !torch.list<int>
%1813 = torch.prim.ListConstruct %int0_459, %int0_459 : (!torch.int, !torch.int) -> !torch.list<int>
%false_460 = torch.constant.bool false
%int1_461 = torch.constant.int 1
%1814 = torch.aten.convolution %1785, %1797, %1809, %1812, %1810, %1811, %false_460, %1813, %int1_461 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[184,80,1,1],f32>, !torch.vtensor<[184],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%1815 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1816 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_462 = torch.constant.int 12
%1817 = torch.aten.item %1815 : !torch.vtensor<[],f32> -> !torch.float
%1818 = torch.aten.item %1816 : !torch.vtensor<[],si8> -> !torch.int
%1819 = torch.aten.quantize_per_tensor %1814, %1817, %1818, %int12_462 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1820 = torch.aten.int_repr %1819 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1821 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1822 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1823 = torch.aten.item %1821 : !torch.vtensor<[],f32> -> !torch.float
%1824 = torch.aten.item %1822 : !torch.vtensor<[],si8> -> !torch.int
%1825 = torch.aten._make_per_tensor_quantized_tensor %1820, %1823, %1824 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1826 = torch.aten.dequantize.self %1825 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1827 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_463 = torch.constant.int 1
%1828 = torch.aten.add.Tensor %1826, %1827, %int1_463 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%1829 = torch.aten.relu %1828 : !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1830 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1831 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_464 = torch.constant.int 6
%none_465 = torch.constant.none
%false_466 = torch.constant.bool false
%1832 = torch.aten.to.dtype %1830, %int6_464, %false_466, %false_466, %none_465 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_467 = torch.constant.int 6
%none_468 = torch.constant.none
%false_469 = torch.constant.bool false
%1833 = torch.aten.to.dtype %1831, %int6_467, %false_469, %false_469, %none_468 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1834 = torch.aten.clamp.Tensor %1829, %1832, %1833 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1835 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1836 = torch.aten.mul.Tensor %1834, %1835 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1837 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1838 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_470 = torch.constant.int 12
%1839 = torch.aten.item %1837 : !torch.vtensor<[],f32> -> !torch.float
%1840 = torch.aten.item %1838 : !torch.vtensor<[],si8> -> !torch.int
%1841 = torch.aten.quantize_per_tensor %1836, %1839, %1840, %int12_470 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1842 = torch.aten.int_repr %1841 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1843 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1844 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1845 = torch.aten.item %1843 : !torch.vtensor<[],f32> -> !torch.float
%1846 = torch.aten.item %1844 : !torch.vtensor<[],si8> -> !torch.int
%1847 = torch.aten._make_per_tensor_quantized_tensor %1842, %1845, %1846 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1848 = torch.aten.dequantize.self %1847 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1849 = torch.aten.mul.Tensor %1826, %1848 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1850 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1851 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_471 = torch.constant.int 12
%1852 = torch.aten.item %1850 : !torch.vtensor<[],f32> -> !torch.float
%1853 = torch.aten.item %1851 : !torch.vtensor<[],si8> -> !torch.int
%1854 = torch.aten.quantize_per_tensor %1849, %1852, %1853, %int12_471 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1855 = torch.aten.int_repr %1854 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1856 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1857 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1858 = torch.aten.item %1856 : !torch.vtensor<[],f32> -> !torch.float
%1859 = torch.aten.item %1857 : !torch.vtensor<[],si8> -> !torch.int
%1860 = torch.aten._make_per_tensor_quantized_tensor %1855, %1858, %1859 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1861 = torch.aten.dequantize.self %1860 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1862 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1863 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_472 = torch.constant.int 12
%1864 = torch.aten.item %1862 : !torch.vtensor<[],f32> -> !torch.float
%1865 = torch.aten.item %1863 : !torch.vtensor<[],si8> -> !torch.int
%1866 = torch.aten.quantize_per_tensor %62, %1864, %1865, %int12_472 : !torch.vtensor<[184,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184,1,3,3],!torch.qint8>
%1867 = torch.aten.int_repr %1866 : !torch.vtensor<[184,1,3,3],!torch.qint8> -> !torch.vtensor<[184,1,3,3],si8>
%1868 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1869 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1870 = torch.aten.item %1868 : !torch.vtensor<[],f32> -> !torch.float
%1871 = torch.aten.item %1869 : !torch.vtensor<[],si8> -> !torch.int
%1872 = torch.aten._make_per_tensor_quantized_tensor %1867, %1870, %1871 : !torch.vtensor<[184,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[184,1,3,3],!torch.qint8>
%1873 = torch.aten.dequantize.self %1872 : !torch.vtensor<[184,1,3,3],!torch.qint8> -> !torch.vtensor<[184,1,3,3],f32>
%1874 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1875 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_473 = torch.constant.int 12
%1876 = torch.aten.item %1874 : !torch.vtensor<[],f32> -> !torch.float
%1877 = torch.aten.item %1875 : !torch.vtensor<[],si8> -> !torch.int
%1878 = torch.aten.quantize_per_tensor %63, %1876, %1877, %int12_473 : !torch.vtensor<[184],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%1879 = torch.aten.int_repr %1878 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],si8>
%1880 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1881 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1882 = torch.aten.item %1880 : !torch.vtensor<[],f32> -> !torch.float
%1883 = torch.aten.item %1881 : !torch.vtensor<[],si8> -> !torch.int
%1884 = torch.aten._make_per_tensor_quantized_tensor %1879, %1882, %1883 : !torch.vtensor<[184],si8>, !torch.float, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%1885 = torch.aten.dequantize.self %1884 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],f32>
%int1_474 = torch.constant.int 1
%int1_475 = torch.constant.int 1
%int1_476 = torch.constant.int 1
%int1_477 = torch.constant.int 1
%int1_478 = torch.constant.int 1
%int1_479 = torch.constant.int 1
%int0_480 = torch.constant.int 0
%1886 = torch.prim.ListConstruct %int1_474, %int1_475 : (!torch.int, !torch.int) -> !torch.list<int>
%1887 = torch.prim.ListConstruct %int1_476, %int1_477 : (!torch.int, !torch.int) -> !torch.list<int>
%1888 = torch.prim.ListConstruct %int1_478, %int1_479 : (!torch.int, !torch.int) -> !torch.list<int>
%1889 = torch.prim.ListConstruct %int0_480, %int0_480 : (!torch.int, !torch.int) -> !torch.list<int>
%false_481 = torch.constant.bool false
%int184 = torch.constant.int 184
%1890 = torch.aten.convolution %1861, %1873, %1885, %1888, %1886, %1887, %false_481, %1889, %int184 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[184,1,3,3],f32>, !torch.vtensor<[184],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%1891 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1892 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_482 = torch.constant.int 12
%1893 = torch.aten.item %1891 : !torch.vtensor<[],f32> -> !torch.float
%1894 = torch.aten.item %1892 : !torch.vtensor<[],si8> -> !torch.int
%1895 = torch.aten.quantize_per_tensor %1890, %1893, %1894, %int12_482 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1896 = torch.aten.int_repr %1895 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1897 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1898 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1899 = torch.aten.item %1897 : !torch.vtensor<[],f32> -> !torch.float
%1900 = torch.aten.item %1898 : !torch.vtensor<[],si8> -> !torch.int
%1901 = torch.aten._make_per_tensor_quantized_tensor %1896, %1899, %1900 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1902 = torch.aten.dequantize.self %1901 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1903 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_483 = torch.constant.int 1
%1904 = torch.aten.add.Tensor %1902, %1903, %int1_483 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%1905 = torch.aten.relu %1904 : !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1906 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%1907 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_484 = torch.constant.int 6
%none_485 = torch.constant.none
%false_486 = torch.constant.bool false
%1908 = torch.aten.to.dtype %1906, %int6_484, %false_486, %false_486, %none_485 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_487 = torch.constant.int 6
%none_488 = torch.constant.none
%false_489 = torch.constant.bool false
%1909 = torch.aten.to.dtype %1907, %int6_487, %false_489, %false_489, %none_488 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%1910 = torch.aten.clamp.Tensor %1905, %1908, %1909 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1911 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%1912 = torch.aten.mul.Tensor %1910, %1911 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1913 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1914 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_490 = torch.constant.int 12
%1915 = torch.aten.item %1913 : !torch.vtensor<[],f32> -> !torch.float
%1916 = torch.aten.item %1914 : !torch.vtensor<[],si8> -> !torch.int
%1917 = torch.aten.quantize_per_tensor %1912, %1915, %1916, %int12_490 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1918 = torch.aten.int_repr %1917 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1919 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1920 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1921 = torch.aten.item %1919 : !torch.vtensor<[],f32> -> !torch.float
%1922 = torch.aten.item %1920 : !torch.vtensor<[],si8> -> !torch.int
%1923 = torch.aten._make_per_tensor_quantized_tensor %1918, %1921, %1922 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1924 = torch.aten.dequantize.self %1923 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1925 = torch.aten.mul.Tensor %1902, %1924 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%1926 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1927 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_491 = torch.constant.int 12
%1928 = torch.aten.item %1926 : !torch.vtensor<[],f32> -> !torch.float
%1929 = torch.aten.item %1927 : !torch.vtensor<[],si8> -> !torch.int
%1930 = torch.aten.quantize_per_tensor %1925, %1928, %1929, %int12_491 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1931 = torch.aten.int_repr %1930 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%1932 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1933 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1934 = torch.aten.item %1932 : !torch.vtensor<[],f32> -> !torch.float
%1935 = torch.aten.item %1933 : !torch.vtensor<[],si8> -> !torch.int
%1936 = torch.aten._make_per_tensor_quantized_tensor %1931, %1934, %1935 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%1937 = torch.aten.dequantize.self %1936 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%1938 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1939 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_492 = torch.constant.int 12
%1940 = torch.aten.item %1938 : !torch.vtensor<[],f32> -> !torch.float
%1941 = torch.aten.item %1939 : !torch.vtensor<[],si8> -> !torch.int
%1942 = torch.aten.quantize_per_tensor %64, %1940, %1941, %int12_492 : !torch.vtensor<[80,184,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80,184,1,1],!torch.qint8>
%1943 = torch.aten.int_repr %1942 : !torch.vtensor<[80,184,1,1],!torch.qint8> -> !torch.vtensor<[80,184,1,1],si8>
%1944 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1945 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1946 = torch.aten.item %1944 : !torch.vtensor<[],f32> -> !torch.float
%1947 = torch.aten.item %1945 : !torch.vtensor<[],si8> -> !torch.int
%1948 = torch.aten._make_per_tensor_quantized_tensor %1943, %1946, %1947 : !torch.vtensor<[80,184,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[80,184,1,1],!torch.qint8>
%1949 = torch.aten.dequantize.self %1948 : !torch.vtensor<[80,184,1,1],!torch.qint8> -> !torch.vtensor<[80,184,1,1],f32>
%1950 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1951 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_493 = torch.constant.int 12
%1952 = torch.aten.item %1950 : !torch.vtensor<[],f32> -> !torch.float
%1953 = torch.aten.item %1951 : !torch.vtensor<[],si8> -> !torch.int
%1954 = torch.aten.quantize_per_tensor %65, %1952, %1953, %int12_493 : !torch.vtensor<[80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1955 = torch.aten.int_repr %1954 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],si8>
%1956 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%1957 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1958 = torch.aten.item %1956 : !torch.vtensor<[],f32> -> !torch.float
%1959 = torch.aten.item %1957 : !torch.vtensor<[],si8> -> !torch.int
%1960 = torch.aten._make_per_tensor_quantized_tensor %1955, %1958, %1959 : !torch.vtensor<[80],si8>, !torch.float, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%1961 = torch.aten.dequantize.self %1960 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],f32>
%int0_494 = torch.constant.int 0
%int0_495 = torch.constant.int 0
%int1_496 = torch.constant.int 1
%int1_497 = torch.constant.int 1
%int1_498 = torch.constant.int 1
%int1_499 = torch.constant.int 1
%int0_500 = torch.constant.int 0
%1962 = torch.prim.ListConstruct %int0_494, %int0_495 : (!torch.int, !torch.int) -> !torch.list<int>
%1963 = torch.prim.ListConstruct %int1_496, %int1_497 : (!torch.int, !torch.int) -> !torch.list<int>
%1964 = torch.prim.ListConstruct %int1_498, %int1_499 : (!torch.int, !torch.int) -> !torch.list<int>
%1965 = torch.prim.ListConstruct %int0_500, %int0_500 : (!torch.int, !torch.int) -> !torch.list<int>
%false_501 = torch.constant.bool false
%int1_502 = torch.constant.int 1
%1966 = torch.aten.convolution %1937, %1949, %1961, %1964, %1962, %1963, %false_501, %1965, %int1_502 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[80,184,1,1],f32>, !torch.vtensor<[80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%1967 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1968 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_503 = torch.constant.int 12
%1969 = torch.aten.item %1967 : !torch.vtensor<[],f32> -> !torch.float
%1970 = torch.aten.item %1968 : !torch.vtensor<[],si8> -> !torch.int
%1971 = torch.aten.quantize_per_tensor %1966, %1969, %1970, %int12_503 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1972 = torch.aten.int_repr %1971 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%1973 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1974 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1975 = torch.aten.item %1973 : !torch.vtensor<[],f32> -> !torch.float
%1976 = torch.aten.item %1974 : !torch.vtensor<[],si8> -> !torch.int
%1977 = torch.aten._make_per_tensor_quantized_tensor %1972, %1975, %1976 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1978 = torch.aten.dequantize.self %1977 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%int1_504 = torch.constant.int 1
%1979 = torch.aten.add.Tensor %1978, %1785, %int1_504 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[1,80,14,14],f32>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%1980 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1981 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_505 = torch.constant.int 12
%1982 = torch.aten.item %1980 : !torch.vtensor<[],f32> -> !torch.float
%1983 = torch.aten.item %1981 : !torch.vtensor<[],si8> -> !torch.int
%1984 = torch.aten.quantize_per_tensor %1979, %1982, %1983, %int12_505 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1985 = torch.aten.int_repr %1984 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%1986 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%1987 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%1988 = torch.aten.item %1986 : !torch.vtensor<[],f32> -> !torch.float
%1989 = torch.aten.item %1987 : !torch.vtensor<[],si8> -> !torch.int
%1990 = torch.aten._make_per_tensor_quantized_tensor %1985, %1988, %1989 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%1991 = torch.aten.dequantize.self %1990 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%1992 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1993 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_506 = torch.constant.int 12
%1994 = torch.aten.item %1992 : !torch.vtensor<[],f32> -> !torch.float
%1995 = torch.aten.item %1993 : !torch.vtensor<[],si8> -> !torch.int
%1996 = torch.aten.quantize_per_tensor %66, %1994, %1995, %int12_506 : !torch.vtensor<[184,80,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184,80,1,1],!torch.qint8>
%1997 = torch.aten.int_repr %1996 : !torch.vtensor<[184,80,1,1],!torch.qint8> -> !torch.vtensor<[184,80,1,1],si8>
%1998 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%1999 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2000 = torch.aten.item %1998 : !torch.vtensor<[],f32> -> !torch.float
%2001 = torch.aten.item %1999 : !torch.vtensor<[],si8> -> !torch.int
%2002 = torch.aten._make_per_tensor_quantized_tensor %1997, %2000, %2001 : !torch.vtensor<[184,80,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[184,80,1,1],!torch.qint8>
%2003 = torch.aten.dequantize.self %2002 : !torch.vtensor<[184,80,1,1],!torch.qint8> -> !torch.vtensor<[184,80,1,1],f32>
%2004 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2005 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_507 = torch.constant.int 12
%2006 = torch.aten.item %2004 : !torch.vtensor<[],f32> -> !torch.float
%2007 = torch.aten.item %2005 : !torch.vtensor<[],si8> -> !torch.int
%2008 = torch.aten.quantize_per_tensor %67, %2006, %2007, %int12_507 : !torch.vtensor<[184],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%2009 = torch.aten.int_repr %2008 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],si8>
%2010 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2011 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2012 = torch.aten.item %2010 : !torch.vtensor<[],f32> -> !torch.float
%2013 = torch.aten.item %2011 : !torch.vtensor<[],si8> -> !torch.int
%2014 = torch.aten._make_per_tensor_quantized_tensor %2009, %2012, %2013 : !torch.vtensor<[184],si8>, !torch.float, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%2015 = torch.aten.dequantize.self %2014 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],f32>
%int0_508 = torch.constant.int 0
%int0_509 = torch.constant.int 0
%int1_510 = torch.constant.int 1
%int1_511 = torch.constant.int 1
%int1_512 = torch.constant.int 1
%int1_513 = torch.constant.int 1
%int0_514 = torch.constant.int 0
%2016 = torch.prim.ListConstruct %int0_508, %int0_509 : (!torch.int, !torch.int) -> !torch.list<int>
%2017 = torch.prim.ListConstruct %int1_510, %int1_511 : (!torch.int, !torch.int) -> !torch.list<int>
%2018 = torch.prim.ListConstruct %int1_512, %int1_513 : (!torch.int, !torch.int) -> !torch.list<int>
%2019 = torch.prim.ListConstruct %int0_514, %int0_514 : (!torch.int, !torch.int) -> !torch.list<int>
%false_515 = torch.constant.bool false
%int1_516 = torch.constant.int 1
%2020 = torch.aten.convolution %1991, %2003, %2015, %2018, %2016, %2017, %false_515, %2019, %int1_516 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[184,80,1,1],f32>, !torch.vtensor<[184],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%2021 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2022 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_517 = torch.constant.int 12
%2023 = torch.aten.item %2021 : !torch.vtensor<[],f32> -> !torch.float
%2024 = torch.aten.item %2022 : !torch.vtensor<[],si8> -> !torch.int
%2025 = torch.aten.quantize_per_tensor %2020, %2023, %2024, %int12_517 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2026 = torch.aten.int_repr %2025 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2027 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2028 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2029 = torch.aten.item %2027 : !torch.vtensor<[],f32> -> !torch.float
%2030 = torch.aten.item %2028 : !torch.vtensor<[],si8> -> !torch.int
%2031 = torch.aten._make_per_tensor_quantized_tensor %2026, %2029, %2030 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2032 = torch.aten.dequantize.self %2031 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2033 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_518 = torch.constant.int 1
%2034 = torch.aten.add.Tensor %2032, %2033, %int1_518 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%2035 = torch.aten.relu %2034 : !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2036 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2037 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_519 = torch.constant.int 6
%none_520 = torch.constant.none
%false_521 = torch.constant.bool false
%2038 = torch.aten.to.dtype %2036, %int6_519, %false_521, %false_521, %none_520 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_522 = torch.constant.int 6
%none_523 = torch.constant.none
%false_524 = torch.constant.bool false
%2039 = torch.aten.to.dtype %2037, %int6_522, %false_524, %false_524, %none_523 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2040 = torch.aten.clamp.Tensor %2035, %2038, %2039 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2041 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2042 = torch.aten.mul.Tensor %2040, %2041 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2043 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2044 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_525 = torch.constant.int 12
%2045 = torch.aten.item %2043 : !torch.vtensor<[],f32> -> !torch.float
%2046 = torch.aten.item %2044 : !torch.vtensor<[],si8> -> !torch.int
%2047 = torch.aten.quantize_per_tensor %2042, %2045, %2046, %int12_525 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2048 = torch.aten.int_repr %2047 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2049 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2050 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2051 = torch.aten.item %2049 : !torch.vtensor<[],f32> -> !torch.float
%2052 = torch.aten.item %2050 : !torch.vtensor<[],si8> -> !torch.int
%2053 = torch.aten._make_per_tensor_quantized_tensor %2048, %2051, %2052 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2054 = torch.aten.dequantize.self %2053 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2055 = torch.aten.mul.Tensor %2032, %2054 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2056 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2057 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_526 = torch.constant.int 12
%2058 = torch.aten.item %2056 : !torch.vtensor<[],f32> -> !torch.float
%2059 = torch.aten.item %2057 : !torch.vtensor<[],si8> -> !torch.int
%2060 = torch.aten.quantize_per_tensor %2055, %2058, %2059, %int12_526 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2061 = torch.aten.int_repr %2060 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2062 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2063 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2064 = torch.aten.item %2062 : !torch.vtensor<[],f32> -> !torch.float
%2065 = torch.aten.item %2063 : !torch.vtensor<[],si8> -> !torch.int
%2066 = torch.aten._make_per_tensor_quantized_tensor %2061, %2064, %2065 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2067 = torch.aten.dequantize.self %2066 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2068 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2069 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_527 = torch.constant.int 12
%2070 = torch.aten.item %2068 : !torch.vtensor<[],f32> -> !torch.float
%2071 = torch.aten.item %2069 : !torch.vtensor<[],si8> -> !torch.int
%2072 = torch.aten.quantize_per_tensor %68, %2070, %2071, %int12_527 : !torch.vtensor<[184,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184,1,3,3],!torch.qint8>
%2073 = torch.aten.int_repr %2072 : !torch.vtensor<[184,1,3,3],!torch.qint8> -> !torch.vtensor<[184,1,3,3],si8>
%2074 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2075 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2076 = torch.aten.item %2074 : !torch.vtensor<[],f32> -> !torch.float
%2077 = torch.aten.item %2075 : !torch.vtensor<[],si8> -> !torch.int
%2078 = torch.aten._make_per_tensor_quantized_tensor %2073, %2076, %2077 : !torch.vtensor<[184,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[184,1,3,3],!torch.qint8>
%2079 = torch.aten.dequantize.self %2078 : !torch.vtensor<[184,1,3,3],!torch.qint8> -> !torch.vtensor<[184,1,3,3],f32>
%2080 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2081 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_528 = torch.constant.int 12
%2082 = torch.aten.item %2080 : !torch.vtensor<[],f32> -> !torch.float
%2083 = torch.aten.item %2081 : !torch.vtensor<[],si8> -> !torch.int
%2084 = torch.aten.quantize_per_tensor %69, %2082, %2083, %int12_528 : !torch.vtensor<[184],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%2085 = torch.aten.int_repr %2084 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],si8>
%2086 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2087 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2088 = torch.aten.item %2086 : !torch.vtensor<[],f32> -> !torch.float
%2089 = torch.aten.item %2087 : !torch.vtensor<[],si8> -> !torch.int
%2090 = torch.aten._make_per_tensor_quantized_tensor %2085, %2088, %2089 : !torch.vtensor<[184],si8>, !torch.float, !torch.int -> !torch.vtensor<[184],!torch.qint8>
%2091 = torch.aten.dequantize.self %2090 : !torch.vtensor<[184],!torch.qint8> -> !torch.vtensor<[184],f32>
%int1_529 = torch.constant.int 1
%int1_530 = torch.constant.int 1
%int1_531 = torch.constant.int 1
%int1_532 = torch.constant.int 1
%int1_533 = torch.constant.int 1
%int1_534 = torch.constant.int 1
%int0_535 = torch.constant.int 0
%2092 = torch.prim.ListConstruct %int1_529, %int1_530 : (!torch.int, !torch.int) -> !torch.list<int>
%2093 = torch.prim.ListConstruct %int1_531, %int1_532 : (!torch.int, !torch.int) -> !torch.list<int>
%2094 = torch.prim.ListConstruct %int1_533, %int1_534 : (!torch.int, !torch.int) -> !torch.list<int>
%2095 = torch.prim.ListConstruct %int0_535, %int0_535 : (!torch.int, !torch.int) -> !torch.list<int>
%false_536 = torch.constant.bool false
%int184_537 = torch.constant.int 184
%2096 = torch.aten.convolution %2067, %2079, %2091, %2094, %2092, %2093, %false_536, %2095, %int184_537 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[184,1,3,3],f32>, !torch.vtensor<[184],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%2097 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2098 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_538 = torch.constant.int 12
%2099 = torch.aten.item %2097 : !torch.vtensor<[],f32> -> !torch.float
%2100 = torch.aten.item %2098 : !torch.vtensor<[],si8> -> !torch.int
%2101 = torch.aten.quantize_per_tensor %2096, %2099, %2100, %int12_538 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2102 = torch.aten.int_repr %2101 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2103 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2104 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2105 = torch.aten.item %2103 : !torch.vtensor<[],f32> -> !torch.float
%2106 = torch.aten.item %2104 : !torch.vtensor<[],si8> -> !torch.int
%2107 = torch.aten._make_per_tensor_quantized_tensor %2102, %2105, %2106 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2108 = torch.aten.dequantize.self %2107 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2109 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_539 = torch.constant.int 1
%2110 = torch.aten.add.Tensor %2108, %2109, %int1_539 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,184,14,14],f32>
%2111 = torch.aten.relu %2110 : !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2112 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2113 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_540 = torch.constant.int 6
%none_541 = torch.constant.none
%false_542 = torch.constant.bool false
%2114 = torch.aten.to.dtype %2112, %int6_540, %false_542, %false_542, %none_541 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_543 = torch.constant.int 6
%none_544 = torch.constant.none
%false_545 = torch.constant.bool false
%2115 = torch.aten.to.dtype %2113, %int6_543, %false_545, %false_545, %none_544 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2116 = torch.aten.clamp.Tensor %2111, %2114, %2115 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2117 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2118 = torch.aten.mul.Tensor %2116, %2117 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2119 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2120 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_546 = torch.constant.int 12
%2121 = torch.aten.item %2119 : !torch.vtensor<[],f32> -> !torch.float
%2122 = torch.aten.item %2120 : !torch.vtensor<[],si8> -> !torch.int
%2123 = torch.aten.quantize_per_tensor %2118, %2121, %2122, %int12_546 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2124 = torch.aten.int_repr %2123 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2125 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2126 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2127 = torch.aten.item %2125 : !torch.vtensor<[],f32> -> !torch.float
%2128 = torch.aten.item %2126 : !torch.vtensor<[],si8> -> !torch.int
%2129 = torch.aten._make_per_tensor_quantized_tensor %2124, %2127, %2128 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2130 = torch.aten.dequantize.self %2129 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2131 = torch.aten.mul.Tensor %2108, %2130 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[1,184,14,14],f32> -> !torch.vtensor<[1,184,14,14],f32>
%2132 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2133 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_547 = torch.constant.int 12
%2134 = torch.aten.item %2132 : !torch.vtensor<[],f32> -> !torch.float
%2135 = torch.aten.item %2133 : !torch.vtensor<[],si8> -> !torch.int
%2136 = torch.aten.quantize_per_tensor %2131, %2134, %2135, %int12_547 : !torch.vtensor<[1,184,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2137 = torch.aten.int_repr %2136 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],si8>
%2138 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2139 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2140 = torch.aten.item %2138 : !torch.vtensor<[],f32> -> !torch.float
%2141 = torch.aten.item %2139 : !torch.vtensor<[],si8> -> !torch.int
%2142 = torch.aten._make_per_tensor_quantized_tensor %2137, %2140, %2141 : !torch.vtensor<[1,184,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,184,14,14],!torch.qint8>
%2143 = torch.aten.dequantize.self %2142 : !torch.vtensor<[1,184,14,14],!torch.qint8> -> !torch.vtensor<[1,184,14,14],f32>
%2144 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2145 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_548 = torch.constant.int 12
%2146 = torch.aten.item %2144 : !torch.vtensor<[],f32> -> !torch.float
%2147 = torch.aten.item %2145 : !torch.vtensor<[],si8> -> !torch.int
%2148 = torch.aten.quantize_per_tensor %70, %2146, %2147, %int12_548 : !torch.vtensor<[80,184,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80,184,1,1],!torch.qint8>
%2149 = torch.aten.int_repr %2148 : !torch.vtensor<[80,184,1,1],!torch.qint8> -> !torch.vtensor<[80,184,1,1],si8>
%2150 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2151 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2152 = torch.aten.item %2150 : !torch.vtensor<[],f32> -> !torch.float
%2153 = torch.aten.item %2151 : !torch.vtensor<[],si8> -> !torch.int
%2154 = torch.aten._make_per_tensor_quantized_tensor %2149, %2152, %2153 : !torch.vtensor<[80,184,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[80,184,1,1],!torch.qint8>
%2155 = torch.aten.dequantize.self %2154 : !torch.vtensor<[80,184,1,1],!torch.qint8> -> !torch.vtensor<[80,184,1,1],f32>
%2156 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2157 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_549 = torch.constant.int 12
%2158 = torch.aten.item %2156 : !torch.vtensor<[],f32> -> !torch.float
%2159 = torch.aten.item %2157 : !torch.vtensor<[],si8> -> !torch.int
%2160 = torch.aten.quantize_per_tensor %71, %2158, %2159, %int12_549 : !torch.vtensor<[80],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%2161 = torch.aten.int_repr %2160 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],si8>
%2162 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2163 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2164 = torch.aten.item %2162 : !torch.vtensor<[],f32> -> !torch.float
%2165 = torch.aten.item %2163 : !torch.vtensor<[],si8> -> !torch.int
%2166 = torch.aten._make_per_tensor_quantized_tensor %2161, %2164, %2165 : !torch.vtensor<[80],si8>, !torch.float, !torch.int -> !torch.vtensor<[80],!torch.qint8>
%2167 = torch.aten.dequantize.self %2166 : !torch.vtensor<[80],!torch.qint8> -> !torch.vtensor<[80],f32>
%int0_550 = torch.constant.int 0
%int0_551 = torch.constant.int 0
%int1_552 = torch.constant.int 1
%int1_553 = torch.constant.int 1
%int1_554 = torch.constant.int 1
%int1_555 = torch.constant.int 1
%int0_556 = torch.constant.int 0
%2168 = torch.prim.ListConstruct %int0_550, %int0_551 : (!torch.int, !torch.int) -> !torch.list<int>
%2169 = torch.prim.ListConstruct %int1_552, %int1_553 : (!torch.int, !torch.int) -> !torch.list<int>
%2170 = torch.prim.ListConstruct %int1_554, %int1_555 : (!torch.int, !torch.int) -> !torch.list<int>
%2171 = torch.prim.ListConstruct %int0_556, %int0_556 : (!torch.int, !torch.int) -> !torch.list<int>
%false_557 = torch.constant.bool false
%int1_558 = torch.constant.int 1
%2172 = torch.aten.convolution %2143, %2155, %2167, %2170, %2168, %2169, %false_557, %2171, %int1_558 : !torch.vtensor<[1,184,14,14],f32>, !torch.vtensor<[80,184,1,1],f32>, !torch.vtensor<[80],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%2173 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2174 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_559 = torch.constant.int 12
%2175 = torch.aten.item %2173 : !torch.vtensor<[],f32> -> !torch.float
%2176 = torch.aten.item %2174 : !torch.vtensor<[],si8> -> !torch.int
%2177 = torch.aten.quantize_per_tensor %2172, %2175, %2176, %int12_559 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%2178 = torch.aten.int_repr %2177 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%2179 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2180 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2181 = torch.aten.item %2179 : !torch.vtensor<[],f32> -> !torch.float
%2182 = torch.aten.item %2180 : !torch.vtensor<[],si8> -> !torch.int
%2183 = torch.aten._make_per_tensor_quantized_tensor %2178, %2181, %2182 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%2184 = torch.aten.dequantize.self %2183 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%int1_560 = torch.constant.int 1
%2185 = torch.aten.add.Tensor %2184, %1991, %int1_560 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[1,80,14,14],f32>, !torch.int -> !torch.vtensor<[1,80,14,14],f32>
%2186 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2187 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_561 = torch.constant.int 12
%2188 = torch.aten.item %2186 : !torch.vtensor<[],f32> -> !torch.float
%2189 = torch.aten.item %2187 : !torch.vtensor<[],si8> -> !torch.int
%2190 = torch.aten.quantize_per_tensor %2185, %2188, %2189, %int12_561 : !torch.vtensor<[1,80,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%2191 = torch.aten.int_repr %2190 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],si8>
%2192 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2193 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2194 = torch.aten.item %2192 : !torch.vtensor<[],f32> -> !torch.float
%2195 = torch.aten.item %2193 : !torch.vtensor<[],si8> -> !torch.int
%2196 = torch.aten._make_per_tensor_quantized_tensor %2191, %2194, %2195 : !torch.vtensor<[1,80,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,80,14,14],!torch.qint8>
%2197 = torch.aten.dequantize.self %2196 : !torch.vtensor<[1,80,14,14],!torch.qint8> -> !torch.vtensor<[1,80,14,14],f32>
%2198 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2199 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_562 = torch.constant.int 12
%2200 = torch.aten.item %2198 : !torch.vtensor<[],f32> -> !torch.float
%2201 = torch.aten.item %2199 : !torch.vtensor<[],si8> -> !torch.int
%2202 = torch.aten.quantize_per_tensor %72, %2200, %2201, %int12_562 : !torch.vtensor<[480,80,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480,80,1,1],!torch.qint8>
%2203 = torch.aten.int_repr %2202 : !torch.vtensor<[480,80,1,1],!torch.qint8> -> !torch.vtensor<[480,80,1,1],si8>
%2204 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2205 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2206 = torch.aten.item %2204 : !torch.vtensor<[],f32> -> !torch.float
%2207 = torch.aten.item %2205 : !torch.vtensor<[],si8> -> !torch.int
%2208 = torch.aten._make_per_tensor_quantized_tensor %2203, %2206, %2207 : !torch.vtensor<[480,80,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[480,80,1,1],!torch.qint8>
%2209 = torch.aten.dequantize.self %2208 : !torch.vtensor<[480,80,1,1],!torch.qint8> -> !torch.vtensor<[480,80,1,1],f32>
%2210 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_563 = torch.constant.int 12
%2212 = torch.aten.item %2210 : !torch.vtensor<[],f32> -> !torch.float
%2213 = torch.aten.item %2211 : !torch.vtensor<[],si8> -> !torch.int
%2214 = torch.aten.quantize_per_tensor %73, %2212, %2213, %int12_563 : !torch.vtensor<[480],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2215 = torch.aten.int_repr %2214 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],si8>
%2216 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2218 = torch.aten.item %2216 : !torch.vtensor<[],f32> -> !torch.float
%2219 = torch.aten.item %2217 : !torch.vtensor<[],si8> -> !torch.int
%2220 = torch.aten._make_per_tensor_quantized_tensor %2215, %2218, %2219 : !torch.vtensor<[480],si8>, !torch.float, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2221 = torch.aten.dequantize.self %2220 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],f32>
%int0_564 = torch.constant.int 0
%int0_565 = torch.constant.int 0
%int1_566 = torch.constant.int 1
%int1_567 = torch.constant.int 1
%int1_568 = torch.constant.int 1
%int1_569 = torch.constant.int 1
%int0_570 = torch.constant.int 0
%2222 = torch.prim.ListConstruct %int0_564, %int0_565 : (!torch.int, !torch.int) -> !torch.list<int>
%2223 = torch.prim.ListConstruct %int1_566, %int1_567 : (!torch.int, !torch.int) -> !torch.list<int>
%2224 = torch.prim.ListConstruct %int1_568, %int1_569 : (!torch.int, !torch.int) -> !torch.list<int>
%2225 = torch.prim.ListConstruct %int0_570, %int0_570 : (!torch.int, !torch.int) -> !torch.list<int>
%false_571 = torch.constant.bool false
%int1_572 = torch.constant.int 1
%2226 = torch.aten.convolution %2197, %2209, %2221, %2224, %2222, %2223, %false_571, %2225, %int1_572 : !torch.vtensor<[1,80,14,14],f32>, !torch.vtensor<[480,80,1,1],f32>, !torch.vtensor<[480],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,480,14,14],f32>
%2227 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2228 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_573 = torch.constant.int 12
%2229 = torch.aten.item %2227 : !torch.vtensor<[],f32> -> !torch.float
%2230 = torch.aten.item %2228 : !torch.vtensor<[],si8> -> !torch.int
%2231 = torch.aten.quantize_per_tensor %2226, %2229, %2230, %int12_573 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2232 = torch.aten.int_repr %2231 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2233 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2234 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2235 = torch.aten.item %2233 : !torch.vtensor<[],f32> -> !torch.float
%2236 = torch.aten.item %2234 : !torch.vtensor<[],si8> -> !torch.int
%2237 = torch.aten._make_per_tensor_quantized_tensor %2232, %2235, %2236 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2238 = torch.aten.dequantize.self %2237 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2239 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_574 = torch.constant.int 1
%2240 = torch.aten.add.Tensor %2238, %2239, %int1_574 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,480,14,14],f32>
%2241 = torch.aten.relu %2240 : !torch.vtensor<[1,480,14,14],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2242 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2243 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_575 = torch.constant.int 6
%none_576 = torch.constant.none
%false_577 = torch.constant.bool false
%2244 = torch.aten.to.dtype %2242, %int6_575, %false_577, %false_577, %none_576 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_578 = torch.constant.int 6
%none_579 = torch.constant.none
%false_580 = torch.constant.bool false
%2245 = torch.aten.to.dtype %2243, %int6_578, %false_580, %false_580, %none_579 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2246 = torch.aten.clamp.Tensor %2241, %2244, %2245 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2247 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2248 = torch.aten.mul.Tensor %2246, %2247 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2249 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2250 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_581 = torch.constant.int 12
%2251 = torch.aten.item %2249 : !torch.vtensor<[],f32> -> !torch.float
%2252 = torch.aten.item %2250 : !torch.vtensor<[],si8> -> !torch.int
%2253 = torch.aten.quantize_per_tensor %2248, %2251, %2252, %int12_581 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2254 = torch.aten.int_repr %2253 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2255 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2257 = torch.aten.item %2255 : !torch.vtensor<[],f32> -> !torch.float
%2258 = torch.aten.item %2256 : !torch.vtensor<[],si8> -> !torch.int
%2259 = torch.aten._make_per_tensor_quantized_tensor %2254, %2257, %2258 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2260 = torch.aten.dequantize.self %2259 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2261 = torch.aten.mul.Tensor %2238, %2260 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[1,480,14,14],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2262 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2263 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_582 = torch.constant.int 12
%2264 = torch.aten.item %2262 : !torch.vtensor<[],f32> -> !torch.float
%2265 = torch.aten.item %2263 : !torch.vtensor<[],si8> -> !torch.int
%2266 = torch.aten.quantize_per_tensor %2261, %2264, %2265, %int12_582 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2267 = torch.aten.int_repr %2266 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2268 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2269 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2270 = torch.aten.item %2268 : !torch.vtensor<[],f32> -> !torch.float
%2271 = torch.aten.item %2269 : !torch.vtensor<[],si8> -> !torch.int
%2272 = torch.aten._make_per_tensor_quantized_tensor %2267, %2270, %2271 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2273 = torch.aten.dequantize.self %2272 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2274 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2275 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_583 = torch.constant.int 12
%2276 = torch.aten.item %2274 : !torch.vtensor<[],f32> -> !torch.float
%2277 = torch.aten.item %2275 : !torch.vtensor<[],si8> -> !torch.int
%2278 = torch.aten.quantize_per_tensor %74, %2276, %2277, %int12_583 : !torch.vtensor<[480,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480,1,3,3],!torch.qint8>
%2279 = torch.aten.int_repr %2278 : !torch.vtensor<[480,1,3,3],!torch.qint8> -> !torch.vtensor<[480,1,3,3],si8>
%2280 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2281 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2282 = torch.aten.item %2280 : !torch.vtensor<[],f32> -> !torch.float
%2283 = torch.aten.item %2281 : !torch.vtensor<[],si8> -> !torch.int
%2284 = torch.aten._make_per_tensor_quantized_tensor %2279, %2282, %2283 : !torch.vtensor<[480,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[480,1,3,3],!torch.qint8>
%2285 = torch.aten.dequantize.self %2284 : !torch.vtensor<[480,1,3,3],!torch.qint8> -> !torch.vtensor<[480,1,3,3],f32>
%2286 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2287 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_584 = torch.constant.int 12
%2288 = torch.aten.item %2286 : !torch.vtensor<[],f32> -> !torch.float
%2289 = torch.aten.item %2287 : !torch.vtensor<[],si8> -> !torch.int
%2290 = torch.aten.quantize_per_tensor %75, %2288, %2289, %int12_584 : !torch.vtensor<[480],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2291 = torch.aten.int_repr %2290 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],si8>
%2292 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2294 = torch.aten.item %2292 : !torch.vtensor<[],f32> -> !torch.float
%2295 = torch.aten.item %2293 : !torch.vtensor<[],si8> -> !torch.int
%2296 = torch.aten._make_per_tensor_quantized_tensor %2291, %2294, %2295 : !torch.vtensor<[480],si8>, !torch.float, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2297 = torch.aten.dequantize.self %2296 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],f32>
%int1_585 = torch.constant.int 1
%int1_586 = torch.constant.int 1
%int1_587 = torch.constant.int 1
%int1_588 = torch.constant.int 1
%int1_589 = torch.constant.int 1
%int1_590 = torch.constant.int 1
%int0_591 = torch.constant.int 0
%2298 = torch.prim.ListConstruct %int1_585, %int1_586 : (!torch.int, !torch.int) -> !torch.list<int>
%2299 = torch.prim.ListConstruct %int1_587, %int1_588 : (!torch.int, !torch.int) -> !torch.list<int>
%2300 = torch.prim.ListConstruct %int1_589, %int1_590 : (!torch.int, !torch.int) -> !torch.list<int>
%2301 = torch.prim.ListConstruct %int0_591, %int0_591 : (!torch.int, !torch.int) -> !torch.list<int>
%false_592 = torch.constant.bool false
%int480 = torch.constant.int 480
%2302 = torch.aten.convolution %2273, %2285, %2297, %2300, %2298, %2299, %false_592, %2301, %int480 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[480,1,3,3],f32>, !torch.vtensor<[480],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,480,14,14],f32>
%2303 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_593 = torch.constant.int 12
%2305 = torch.aten.item %2303 : !torch.vtensor<[],f32> -> !torch.float
%2306 = torch.aten.item %2304 : !torch.vtensor<[],si8> -> !torch.int
%2307 = torch.aten.quantize_per_tensor %2302, %2305, %2306, %int12_593 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2308 = torch.aten.int_repr %2307 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2309 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2311 = torch.aten.item %2309 : !torch.vtensor<[],f32> -> !torch.float
%2312 = torch.aten.item %2310 : !torch.vtensor<[],si8> -> !torch.int
%2313 = torch.aten._make_per_tensor_quantized_tensor %2308, %2311, %2312 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2314 = torch.aten.dequantize.self %2313 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2315 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_594 = torch.constant.int 1
%2316 = torch.aten.add.Tensor %2314, %2315, %int1_594 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,480,14,14],f32>
%2317 = torch.aten.relu %2316 : !torch.vtensor<[1,480,14,14],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2318 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2319 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_595 = torch.constant.int 6
%none_596 = torch.constant.none
%false_597 = torch.constant.bool false
%2320 = torch.aten.to.dtype %2318, %int6_595, %false_597, %false_597, %none_596 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_598 = torch.constant.int 6
%none_599 = torch.constant.none
%false_600 = torch.constant.bool false
%2321 = torch.aten.to.dtype %2319, %int6_598, %false_600, %false_600, %none_599 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2322 = torch.aten.clamp.Tensor %2317, %2320, %2321 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2323 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2324 = torch.aten.mul.Tensor %2322, %2323 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2325 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2326 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_601 = torch.constant.int 12
%2327 = torch.aten.item %2325 : !torch.vtensor<[],f32> -> !torch.float
%2328 = torch.aten.item %2326 : !torch.vtensor<[],si8> -> !torch.int
%2329 = torch.aten.quantize_per_tensor %2324, %2327, %2328, %int12_601 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2330 = torch.aten.int_repr %2329 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2331 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2332 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2333 = torch.aten.item %2331 : !torch.vtensor<[],f32> -> !torch.float
%2334 = torch.aten.item %2332 : !torch.vtensor<[],si8> -> !torch.int
%2335 = torch.aten._make_per_tensor_quantized_tensor %2330, %2333, %2334 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2336 = torch.aten.dequantize.self %2335 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2337 = torch.aten.mul.Tensor %2314, %2336 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[1,480,14,14],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2338 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2339 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_602 = torch.constant.int 12
%2340 = torch.aten.item %2338 : !torch.vtensor<[],f32> -> !torch.float
%2341 = torch.aten.item %2339 : !torch.vtensor<[],si8> -> !torch.int
%2342 = torch.aten.quantize_per_tensor %2337, %2340, %2341, %int12_602 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2343 = torch.aten.int_repr %2342 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2344 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2345 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2346 = torch.aten.item %2344 : !torch.vtensor<[],f32> -> !torch.float
%2347 = torch.aten.item %2345 : !torch.vtensor<[],si8> -> !torch.int
%2348 = torch.aten._make_per_tensor_quantized_tensor %2343, %2346, %2347 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2349 = torch.aten.dequantize.self %2348 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%int0_603 = torch.constant.int 0
%int1_604 = torch.constant.int 1
%int14 = torch.constant.int 14
%int14_605 = torch.constant.int 14
%2350 = torch.prim.ListConstruct %int14, %int14_605 : (!torch.int, !torch.int) -> !torch.list<int>
%2351 = torch.prim.ListConstruct %int0_603, %int0_603 : (!torch.int, !torch.int) -> !torch.list<int>
%2352 = torch.prim.ListConstruct %int1_604, %int1_604 : (!torch.int, !torch.int) -> !torch.list<int>
%false_606 = torch.constant.bool false
%none_607 = torch.constant.none
%2353 = torch.aten.avg_pool2d %2349, %2350, %2352, %2351, %false_606, %false_606, %none_607 : !torch.vtensor<[1,480,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,480,1,1],f32>
%2354 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%2355 = torch.aten.mul.Tensor %2353, %2354 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,1,1],f32>
%2356 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2357 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_608 = torch.constant.int 12
%2358 = torch.aten.item %2356 : !torch.vtensor<[],f32> -> !torch.float
%2359 = torch.aten.item %2357 : !torch.vtensor<[],si8> -> !torch.int
%2360 = torch.aten.quantize_per_tensor %2355, %2358, %2359, %int12_608 : !torch.vtensor<[1,480,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2361 = torch.aten.int_repr %2360 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],si8>
%2362 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2363 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2364 = torch.aten.item %2362 : !torch.vtensor<[],f32> -> !torch.float
%2365 = torch.aten.item %2363 : !torch.vtensor<[],si8> -> !torch.int
%2366 = torch.aten._make_per_tensor_quantized_tensor %2361, %2364, %2365 : !torch.vtensor<[1,480,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2367 = torch.aten.dequantize.self %2366 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],f32>
%2368 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2369 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_609 = torch.constant.int 12
%2370 = torch.aten.item %2368 : !torch.vtensor<[],f32> -> !torch.float
%2371 = torch.aten.item %2369 : !torch.vtensor<[],si8> -> !torch.int
%2372 = torch.aten.quantize_per_tensor %76, %2370, %2371, %int12_609 : !torch.vtensor<[120,480,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120,480,1,1],!torch.qint8>
%2373 = torch.aten.int_repr %2372 : !torch.vtensor<[120,480,1,1],!torch.qint8> -> !torch.vtensor<[120,480,1,1],si8>
%2374 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2375 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2376 = torch.aten.item %2374 : !torch.vtensor<[],f32> -> !torch.float
%2377 = torch.aten.item %2375 : !torch.vtensor<[],si8> -> !torch.int
%2378 = torch.aten._make_per_tensor_quantized_tensor %2373, %2376, %2377 : !torch.vtensor<[120,480,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[120,480,1,1],!torch.qint8>
%2379 = torch.aten.dequantize.self %2378 : !torch.vtensor<[120,480,1,1],!torch.qint8> -> !torch.vtensor<[120,480,1,1],f32>
%2380 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2381 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_610 = torch.constant.int 12
%2382 = torch.aten.item %2380 : !torch.vtensor<[],f32> -> !torch.float
%2383 = torch.aten.item %2381 : !torch.vtensor<[],si8> -> !torch.int
%2384 = torch.aten.quantize_per_tensor %77, %2382, %2383, %int12_610 : !torch.vtensor<[120],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%2385 = torch.aten.int_repr %2384 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],si8>
%2386 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%2387 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2388 = torch.aten.item %2386 : !torch.vtensor<[],f32> -> !torch.float
%2389 = torch.aten.item %2387 : !torch.vtensor<[],si8> -> !torch.int
%2390 = torch.aten._make_per_tensor_quantized_tensor %2385, %2388, %2389 : !torch.vtensor<[120],si8>, !torch.float, !torch.int -> !torch.vtensor<[120],!torch.qint8>
%2391 = torch.aten.dequantize.self %2390 : !torch.vtensor<[120],!torch.qint8> -> !torch.vtensor<[120],f32>
%int0_611 = torch.constant.int 0
%int0_612 = torch.constant.int 0
%int1_613 = torch.constant.int 1
%int1_614 = torch.constant.int 1
%int1_615 = torch.constant.int 1
%int1_616 = torch.constant.int 1
%int0_617 = torch.constant.int 0
%2392 = torch.prim.ListConstruct %int0_611, %int0_612 : (!torch.int, !torch.int) -> !torch.list<int>
%2393 = torch.prim.ListConstruct %int1_613, %int1_614 : (!torch.int, !torch.int) -> !torch.list<int>
%2394 = torch.prim.ListConstruct %int1_615, %int1_616 : (!torch.int, !torch.int) -> !torch.list<int>
%2395 = torch.prim.ListConstruct %int0_617, %int0_617 : (!torch.int, !torch.int) -> !torch.list<int>
%false_618 = torch.constant.bool false
%int1_619 = torch.constant.int 1
%2396 = torch.aten.convolution %2367, %2379, %2391, %2394, %2392, %2393, %false_618, %2395, %int1_619 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[120,480,1,1],f32>, !torch.vtensor<[120],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,120,1,1],f32>
%2397 = torch.aten.relu %2396 : !torch.vtensor<[1,120,1,1],f32> -> !torch.vtensor<[1,120,1,1],f32>
%2398 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2399 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_620 = torch.constant.int 12
%2400 = torch.aten.item %2398 : !torch.vtensor<[],f32> -> !torch.float
%2401 = torch.aten.item %2399 : !torch.vtensor<[],si8> -> !torch.int
%2402 = torch.aten.quantize_per_tensor %2397, %2400, %2401, %int12_620 : !torch.vtensor<[1,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%2403 = torch.aten.int_repr %2402 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],si8>
%2404 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2405 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2406 = torch.aten.item %2404 : !torch.vtensor<[],f32> -> !torch.float
%2407 = torch.aten.item %2405 : !torch.vtensor<[],si8> -> !torch.int
%2408 = torch.aten._make_per_tensor_quantized_tensor %2403, %2406, %2407 : !torch.vtensor<[1,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,120,1,1],!torch.qint8>
%2409 = torch.aten.dequantize.self %2408 : !torch.vtensor<[1,120,1,1],!torch.qint8> -> !torch.vtensor<[1,120,1,1],f32>
%2410 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_621 = torch.constant.int 12
%2412 = torch.aten.item %2410 : !torch.vtensor<[],f32> -> !torch.float
%2413 = torch.aten.item %2411 : !torch.vtensor<[],si8> -> !torch.int
%2414 = torch.aten.quantize_per_tensor %78, %2412, %2413, %int12_621 : !torch.vtensor<[480,120,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480,120,1,1],!torch.qint8>
%2415 = torch.aten.int_repr %2414 : !torch.vtensor<[480,120,1,1],!torch.qint8> -> !torch.vtensor<[480,120,1,1],si8>
%2416 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2418 = torch.aten.item %2416 : !torch.vtensor<[],f32> -> !torch.float
%2419 = torch.aten.item %2417 : !torch.vtensor<[],si8> -> !torch.int
%2420 = torch.aten._make_per_tensor_quantized_tensor %2415, %2418, %2419 : !torch.vtensor<[480,120,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[480,120,1,1],!torch.qint8>
%2421 = torch.aten.dequantize.self %2420 : !torch.vtensor<[480,120,1,1],!torch.qint8> -> !torch.vtensor<[480,120,1,1],f32>
%2422 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_622 = torch.constant.int 12
%2424 = torch.aten.item %2422 : !torch.vtensor<[],f32> -> !torch.float
%2425 = torch.aten.item %2423 : !torch.vtensor<[],si8> -> !torch.int
%2426 = torch.aten.quantize_per_tensor %79, %2424, %2425, %int12_622 : !torch.vtensor<[480],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2427 = torch.aten.int_repr %2426 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],si8>
%2428 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2430 = torch.aten.item %2428 : !torch.vtensor<[],f32> -> !torch.float
%2431 = torch.aten.item %2429 : !torch.vtensor<[],si8> -> !torch.int
%2432 = torch.aten._make_per_tensor_quantized_tensor %2427, %2430, %2431 : !torch.vtensor<[480],si8>, !torch.float, !torch.int -> !torch.vtensor<[480],!torch.qint8>
%2433 = torch.aten.dequantize.self %2432 : !torch.vtensor<[480],!torch.qint8> -> !torch.vtensor<[480],f32>
%int0_623 = torch.constant.int 0
%int0_624 = torch.constant.int 0
%int1_625 = torch.constant.int 1
%int1_626 = torch.constant.int 1
%int1_627 = torch.constant.int 1
%int1_628 = torch.constant.int 1
%int0_629 = torch.constant.int 0
%2434 = torch.prim.ListConstruct %int0_623, %int0_624 : (!torch.int, !torch.int) -> !torch.list<int>
%2435 = torch.prim.ListConstruct %int1_625, %int1_626 : (!torch.int, !torch.int) -> !torch.list<int>
%2436 = torch.prim.ListConstruct %int1_627, %int1_628 : (!torch.int, !torch.int) -> !torch.list<int>
%2437 = torch.prim.ListConstruct %int0_629, %int0_629 : (!torch.int, !torch.int) -> !torch.list<int>
%false_630 = torch.constant.bool false
%int1_631 = torch.constant.int 1
%2438 = torch.aten.convolution %2409, %2421, %2433, %2436, %2434, %2435, %false_630, %2437, %int1_631 : !torch.vtensor<[1,120,1,1],f32>, !torch.vtensor<[480,120,1,1],f32>, !torch.vtensor<[480],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,480,1,1],f32>
%2439 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2440 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_632 = torch.constant.int 12
%2441 = torch.aten.item %2439 : !torch.vtensor<[],f32> -> !torch.float
%2442 = torch.aten.item %2440 : !torch.vtensor<[],si8> -> !torch.int
%2443 = torch.aten.quantize_per_tensor %2438, %2441, %2442, %int12_632 : !torch.vtensor<[1,480,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2444 = torch.aten.int_repr %2443 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],si8>
%2445 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2446 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2447 = torch.aten.item %2445 : !torch.vtensor<[],f32> -> !torch.float
%2448 = torch.aten.item %2446 : !torch.vtensor<[],si8> -> !torch.int
%2449 = torch.aten._make_per_tensor_quantized_tensor %2444, %2447, %2448 : !torch.vtensor<[1,480,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2450 = torch.aten.dequantize.self %2449 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],f32>
%2451 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_633 = torch.constant.int 1
%2452 = torch.aten.add.Tensor %2450, %2451, %int1_633 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,480,1,1],f32>
%2453 = torch.aten.relu %2452 : !torch.vtensor<[1,480,1,1],f32> -> !torch.vtensor<[1,480,1,1],f32>
%2454 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2455 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_634 = torch.constant.int 6
%none_635 = torch.constant.none
%false_636 = torch.constant.bool false
%2456 = torch.aten.to.dtype %2454, %int6_634, %false_636, %false_636, %none_635 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_637 = torch.constant.int 6
%none_638 = torch.constant.none
%false_639 = torch.constant.bool false
%2457 = torch.aten.to.dtype %2455, %int6_637, %false_639, %false_639, %none_638 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2458 = torch.aten.clamp.Tensor %2453, %2456, %2457 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,1,1],f32>
%2459 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2460 = torch.aten.mul.Tensor %2458, %2459 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,480,1,1],f32>
%2461 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2462 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_640 = torch.constant.int 12
%2463 = torch.aten.item %2461 : !torch.vtensor<[],f32> -> !torch.float
%2464 = torch.aten.item %2462 : !torch.vtensor<[],si8> -> !torch.int
%2465 = torch.aten.quantize_per_tensor %2460, %2463, %2464, %int12_640 : !torch.vtensor<[1,480,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2466 = torch.aten.int_repr %2465 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],si8>
%2467 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2468 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2469 = torch.aten.item %2467 : !torch.vtensor<[],f32> -> !torch.float
%2470 = torch.aten.item %2468 : !torch.vtensor<[],si8> -> !torch.int
%2471 = torch.aten._make_per_tensor_quantized_tensor %2466, %2469, %2470 : !torch.vtensor<[1,480,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,1,1],!torch.qint8>
%2472 = torch.aten.dequantize.self %2471 : !torch.vtensor<[1,480,1,1],!torch.qint8> -> !torch.vtensor<[1,480,1,1],f32>
%2473 = torch.aten.mul.Tensor %2472, %2349 : !torch.vtensor<[1,480,1,1],f32>, !torch.vtensor<[1,480,14,14],f32> -> !torch.vtensor<[1,480,14,14],f32>
%2474 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2475 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_641 = torch.constant.int 12
%2476 = torch.aten.item %2474 : !torch.vtensor<[],f32> -> !torch.float
%2477 = torch.aten.item %2475 : !torch.vtensor<[],si8> -> !torch.int
%2478 = torch.aten.quantize_per_tensor %2473, %2476, %2477, %int12_641 : !torch.vtensor<[1,480,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2479 = torch.aten.int_repr %2478 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],si8>
%2480 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2481 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2482 = torch.aten.item %2480 : !torch.vtensor<[],f32> -> !torch.float
%2483 = torch.aten.item %2481 : !torch.vtensor<[],si8> -> !torch.int
%2484 = torch.aten._make_per_tensor_quantized_tensor %2479, %2482, %2483 : !torch.vtensor<[1,480,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,480,14,14],!torch.qint8>
%2485 = torch.aten.dequantize.self %2484 : !torch.vtensor<[1,480,14,14],!torch.qint8> -> !torch.vtensor<[1,480,14,14],f32>
%2486 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2487 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_642 = torch.constant.int 12
%2488 = torch.aten.item %2486 : !torch.vtensor<[],f32> -> !torch.float
%2489 = torch.aten.item %2487 : !torch.vtensor<[],si8> -> !torch.int
%2490 = torch.aten.quantize_per_tensor %80, %2488, %2489, %int12_642 : !torch.vtensor<[112,480,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[112,480,1,1],!torch.qint8>
%2491 = torch.aten.int_repr %2490 : !torch.vtensor<[112,480,1,1],!torch.qint8> -> !torch.vtensor<[112,480,1,1],si8>
%2492 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2494 = torch.aten.item %2492 : !torch.vtensor<[],f32> -> !torch.float
%2495 = torch.aten.item %2493 : !torch.vtensor<[],si8> -> !torch.int
%2496 = torch.aten._make_per_tensor_quantized_tensor %2491, %2494, %2495 : !torch.vtensor<[112,480,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[112,480,1,1],!torch.qint8>
%2497 = torch.aten.dequantize.self %2496 : !torch.vtensor<[112,480,1,1],!torch.qint8> -> !torch.vtensor<[112,480,1,1],f32>
%2498 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_643 = torch.constant.int 12
%2500 = torch.aten.item %2498 : !torch.vtensor<[],f32> -> !torch.float
%2501 = torch.aten.item %2499 : !torch.vtensor<[],si8> -> !torch.int
%2502 = torch.aten.quantize_per_tensor %81, %2500, %2501, %int12_643 : !torch.vtensor<[112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[112],!torch.qint8>
%2503 = torch.aten.int_repr %2502 : !torch.vtensor<[112],!torch.qint8> -> !torch.vtensor<[112],si8>
%2504 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2505 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2506 = torch.aten.item %2504 : !torch.vtensor<[],f32> -> !torch.float
%2507 = torch.aten.item %2505 : !torch.vtensor<[],si8> -> !torch.int
%2508 = torch.aten._make_per_tensor_quantized_tensor %2503, %2506, %2507 : !torch.vtensor<[112],si8>, !torch.float, !torch.int -> !torch.vtensor<[112],!torch.qint8>
%2509 = torch.aten.dequantize.self %2508 : !torch.vtensor<[112],!torch.qint8> -> !torch.vtensor<[112],f32>
%int0_644 = torch.constant.int 0
%int0_645 = torch.constant.int 0
%int1_646 = torch.constant.int 1
%int1_647 = torch.constant.int 1
%int1_648 = torch.constant.int 1
%int1_649 = torch.constant.int 1
%int0_650 = torch.constant.int 0
%2510 = torch.prim.ListConstruct %int0_644, %int0_645 : (!torch.int, !torch.int) -> !torch.list<int>
%2511 = torch.prim.ListConstruct %int1_646, %int1_647 : (!torch.int, !torch.int) -> !torch.list<int>
%2512 = torch.prim.ListConstruct %int1_648, %int1_649 : (!torch.int, !torch.int) -> !torch.list<int>
%2513 = torch.prim.ListConstruct %int0_650, %int0_650 : (!torch.int, !torch.int) -> !torch.list<int>
%false_651 = torch.constant.bool false
%int1_652 = torch.constant.int 1
%2514 = torch.aten.convolution %2485, %2497, %2509, %2512, %2510, %2511, %false_651, %2513, %int1_652 : !torch.vtensor<[1,480,14,14],f32>, !torch.vtensor<[112,480,1,1],f32>, !torch.vtensor<[112],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,112,14,14],f32>
%2515 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_653 = torch.constant.int 12
%2517 = torch.aten.item %2515 : !torch.vtensor<[],f32> -> !torch.float
%2518 = torch.aten.item %2516 : !torch.vtensor<[],si8> -> !torch.int
%2519 = torch.aten.quantize_per_tensor %2514, %2517, %2518, %int12_653 : !torch.vtensor<[1,112,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2520 = torch.aten.int_repr %2519 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],si8>
%2521 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2522 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2523 = torch.aten.item %2521 : !torch.vtensor<[],f32> -> !torch.float
%2524 = torch.aten.item %2522 : !torch.vtensor<[],si8> -> !torch.int
%2525 = torch.aten._make_per_tensor_quantized_tensor %2520, %2523, %2524 : !torch.vtensor<[1,112,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2526 = torch.aten.dequantize.self %2525 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],f32>
%2527 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2528 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_654 = torch.constant.int 12
%2529 = torch.aten.item %2527 : !torch.vtensor<[],f32> -> !torch.float
%2530 = torch.aten.item %2528 : !torch.vtensor<[],si8> -> !torch.int
%2531 = torch.aten.quantize_per_tensor %82, %2529, %2530, %int12_654 : !torch.vtensor<[672,112,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,112,1,1],!torch.qint8>
%2532 = torch.aten.int_repr %2531 : !torch.vtensor<[672,112,1,1],!torch.qint8> -> !torch.vtensor<[672,112,1,1],si8>
%2533 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2534 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2535 = torch.aten.item %2533 : !torch.vtensor<[],f32> -> !torch.float
%2536 = torch.aten.item %2534 : !torch.vtensor<[],si8> -> !torch.int
%2537 = torch.aten._make_per_tensor_quantized_tensor %2532, %2535, %2536 : !torch.vtensor<[672,112,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,112,1,1],!torch.qint8>
%2538 = torch.aten.dequantize.self %2537 : !torch.vtensor<[672,112,1,1],!torch.qint8> -> !torch.vtensor<[672,112,1,1],f32>
%2539 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2540 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_655 = torch.constant.int 12
%2541 = torch.aten.item %2539 : !torch.vtensor<[],f32> -> !torch.float
%2542 = torch.aten.item %2540 : !torch.vtensor<[],si8> -> !torch.int
%2543 = torch.aten.quantize_per_tensor %83, %2541, %2542, %int12_655 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2544 = torch.aten.int_repr %2543 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%2545 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2546 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2547 = torch.aten.item %2545 : !torch.vtensor<[],f32> -> !torch.float
%2548 = torch.aten.item %2546 : !torch.vtensor<[],si8> -> !torch.int
%2549 = torch.aten._make_per_tensor_quantized_tensor %2544, %2547, %2548 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2550 = torch.aten.dequantize.self %2549 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int0_656 = torch.constant.int 0
%int0_657 = torch.constant.int 0
%int1_658 = torch.constant.int 1
%int1_659 = torch.constant.int 1
%int1_660 = torch.constant.int 1
%int1_661 = torch.constant.int 1
%int0_662 = torch.constant.int 0
%2551 = torch.prim.ListConstruct %int0_656, %int0_657 : (!torch.int, !torch.int) -> !torch.list<int>
%2552 = torch.prim.ListConstruct %int1_658, %int1_659 : (!torch.int, !torch.int) -> !torch.list<int>
%2553 = torch.prim.ListConstruct %int1_660, %int1_661 : (!torch.int, !torch.int) -> !torch.list<int>
%2554 = torch.prim.ListConstruct %int0_662, %int0_662 : (!torch.int, !torch.int) -> !torch.list<int>
%false_663 = torch.constant.bool false
%int1_664 = torch.constant.int 1
%2555 = torch.aten.convolution %2526, %2538, %2550, %2553, %2551, %2552, %false_663, %2554, %int1_664 : !torch.vtensor<[1,112,14,14],f32>, !torch.vtensor<[672,112,1,1],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2556 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2557 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_665 = torch.constant.int 12
%2558 = torch.aten.item %2556 : !torch.vtensor<[],f32> -> !torch.float
%2559 = torch.aten.item %2557 : !torch.vtensor<[],si8> -> !torch.int
%2560 = torch.aten.quantize_per_tensor %2555, %2558, %2559, %int12_665 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2561 = torch.aten.int_repr %2560 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2562 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2563 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2564 = torch.aten.item %2562 : !torch.vtensor<[],f32> -> !torch.float
%2565 = torch.aten.item %2563 : !torch.vtensor<[],si8> -> !torch.int
%2566 = torch.aten._make_per_tensor_quantized_tensor %2561, %2564, %2565 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2567 = torch.aten.dequantize.self %2566 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2568 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_666 = torch.constant.int 1
%2569 = torch.aten.add.Tensor %2567, %2568, %int1_666 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2570 = torch.aten.relu %2569 : !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2571 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2572 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_667 = torch.constant.int 6
%none_668 = torch.constant.none
%false_669 = torch.constant.bool false
%2573 = torch.aten.to.dtype %2571, %int6_667, %false_669, %false_669, %none_668 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_670 = torch.constant.int 6
%none_671 = torch.constant.none
%false_672 = torch.constant.bool false
%2574 = torch.aten.to.dtype %2572, %int6_670, %false_672, %false_672, %none_671 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2575 = torch.aten.clamp.Tensor %2570, %2573, %2574 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2576 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2577 = torch.aten.mul.Tensor %2575, %2576 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2578 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2579 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_673 = torch.constant.int 12
%2580 = torch.aten.item %2578 : !torch.vtensor<[],f32> -> !torch.float
%2581 = torch.aten.item %2579 : !torch.vtensor<[],si8> -> !torch.int
%2582 = torch.aten.quantize_per_tensor %2577, %2580, %2581, %int12_673 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2583 = torch.aten.int_repr %2582 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2584 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2585 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2586 = torch.aten.item %2584 : !torch.vtensor<[],f32> -> !torch.float
%2587 = torch.aten.item %2585 : !torch.vtensor<[],si8> -> !torch.int
%2588 = torch.aten._make_per_tensor_quantized_tensor %2583, %2586, %2587 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2589 = torch.aten.dequantize.self %2588 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2590 = torch.aten.mul.Tensor %2567, %2589 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2591 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2592 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_674 = torch.constant.int 12
%2593 = torch.aten.item %2591 : !torch.vtensor<[],f32> -> !torch.float
%2594 = torch.aten.item %2592 : !torch.vtensor<[],si8> -> !torch.int
%2595 = torch.aten.quantize_per_tensor %2590, %2593, %2594, %int12_674 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2596 = torch.aten.int_repr %2595 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2597 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2598 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2599 = torch.aten.item %2597 : !torch.vtensor<[],f32> -> !torch.float
%2600 = torch.aten.item %2598 : !torch.vtensor<[],si8> -> !torch.int
%2601 = torch.aten._make_per_tensor_quantized_tensor %2596, %2599, %2600 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2602 = torch.aten.dequantize.self %2601 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2603 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2604 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_675 = torch.constant.int 12
%2605 = torch.aten.item %2603 : !torch.vtensor<[],f32> -> !torch.float
%2606 = torch.aten.item %2604 : !torch.vtensor<[],si8> -> !torch.int
%2607 = torch.aten.quantize_per_tensor %84, %2605, %2606, %int12_675 : !torch.vtensor<[672,1,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,1,3,3],!torch.qint8>
%2608 = torch.aten.int_repr %2607 : !torch.vtensor<[672,1,3,3],!torch.qint8> -> !torch.vtensor<[672,1,3,3],si8>
%2609 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%2610 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2611 = torch.aten.item %2609 : !torch.vtensor<[],f32> -> !torch.float
%2612 = torch.aten.item %2610 : !torch.vtensor<[],si8> -> !torch.int
%2613 = torch.aten._make_per_tensor_quantized_tensor %2608, %2611, %2612 : !torch.vtensor<[672,1,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,1,3,3],!torch.qint8>
%2614 = torch.aten.dequantize.self %2613 : !torch.vtensor<[672,1,3,3],!torch.qint8> -> !torch.vtensor<[672,1,3,3],f32>
%2615 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2616 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_676 = torch.constant.int 12
%2617 = torch.aten.item %2615 : !torch.vtensor<[],f32> -> !torch.float
%2618 = torch.aten.item %2616 : !torch.vtensor<[],si8> -> !torch.int
%2619 = torch.aten.quantize_per_tensor %85, %2617, %2618, %int12_676 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2620 = torch.aten.int_repr %2619 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%2621 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2622 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2623 = torch.aten.item %2621 : !torch.vtensor<[],f32> -> !torch.float
%2624 = torch.aten.item %2622 : !torch.vtensor<[],si8> -> !torch.int
%2625 = torch.aten._make_per_tensor_quantized_tensor %2620, %2623, %2624 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2626 = torch.aten.dequantize.self %2625 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int1_677 = torch.constant.int 1
%int1_678 = torch.constant.int 1
%int1_679 = torch.constant.int 1
%int1_680 = torch.constant.int 1
%int1_681 = torch.constant.int 1
%int1_682 = torch.constant.int 1
%int0_683 = torch.constant.int 0
%2627 = torch.prim.ListConstruct %int1_677, %int1_678 : (!torch.int, !torch.int) -> !torch.list<int>
%2628 = torch.prim.ListConstruct %int1_679, %int1_680 : (!torch.int, !torch.int) -> !torch.list<int>
%2629 = torch.prim.ListConstruct %int1_681, %int1_682 : (!torch.int, !torch.int) -> !torch.list<int>
%2630 = torch.prim.ListConstruct %int0_683, %int0_683 : (!torch.int, !torch.int) -> !torch.list<int>
%false_684 = torch.constant.bool false
%int672 = torch.constant.int 672
%2631 = torch.aten.convolution %2602, %2614, %2626, %2629, %2627, %2628, %false_684, %2630, %int672 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[672,1,3,3],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2632 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2633 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_685 = torch.constant.int 12
%2634 = torch.aten.item %2632 : !torch.vtensor<[],f32> -> !torch.float
%2635 = torch.aten.item %2633 : !torch.vtensor<[],si8> -> !torch.int
%2636 = torch.aten.quantize_per_tensor %2631, %2634, %2635, %int12_685 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2637 = torch.aten.int_repr %2636 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2638 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2639 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2640 = torch.aten.item %2638 : !torch.vtensor<[],f32> -> !torch.float
%2641 = torch.aten.item %2639 : !torch.vtensor<[],si8> -> !torch.int
%2642 = torch.aten._make_per_tensor_quantized_tensor %2637, %2640, %2641 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2643 = torch.aten.dequantize.self %2642 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2644 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_686 = torch.constant.int 1
%2645 = torch.aten.add.Tensor %2643, %2644, %int1_686 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2646 = torch.aten.relu %2645 : !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2647 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2648 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_687 = torch.constant.int 6
%none_688 = torch.constant.none
%false_689 = torch.constant.bool false
%2649 = torch.aten.to.dtype %2647, %int6_687, %false_689, %false_689, %none_688 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_690 = torch.constant.int 6
%none_691 = torch.constant.none
%false_692 = torch.constant.bool false
%2650 = torch.aten.to.dtype %2648, %int6_690, %false_692, %false_692, %none_691 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2651 = torch.aten.clamp.Tensor %2646, %2649, %2650 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2652 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2653 = torch.aten.mul.Tensor %2651, %2652 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2654 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2655 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_693 = torch.constant.int 12
%2656 = torch.aten.item %2654 : !torch.vtensor<[],f32> -> !torch.float
%2657 = torch.aten.item %2655 : !torch.vtensor<[],si8> -> !torch.int
%2658 = torch.aten.quantize_per_tensor %2653, %2656, %2657, %int12_693 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2659 = torch.aten.int_repr %2658 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2660 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2661 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2662 = torch.aten.item %2660 : !torch.vtensor<[],f32> -> !torch.float
%2663 = torch.aten.item %2661 : !torch.vtensor<[],si8> -> !torch.int
%2664 = torch.aten._make_per_tensor_quantized_tensor %2659, %2662, %2663 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2665 = torch.aten.dequantize.self %2664 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2666 = torch.aten.mul.Tensor %2643, %2665 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2667 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2668 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_694 = torch.constant.int 12
%2669 = torch.aten.item %2667 : !torch.vtensor<[],f32> -> !torch.float
%2670 = torch.aten.item %2668 : !torch.vtensor<[],si8> -> !torch.int
%2671 = torch.aten.quantize_per_tensor %2666, %2669, %2670, %int12_694 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2672 = torch.aten.int_repr %2671 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2673 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2674 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2675 = torch.aten.item %2673 : !torch.vtensor<[],f32> -> !torch.float
%2676 = torch.aten.item %2674 : !torch.vtensor<[],si8> -> !torch.int
%2677 = torch.aten._make_per_tensor_quantized_tensor %2672, %2675, %2676 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2678 = torch.aten.dequantize.self %2677 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%int0_695 = torch.constant.int 0
%int1_696 = torch.constant.int 1
%int14_697 = torch.constant.int 14
%int14_698 = torch.constant.int 14
%2679 = torch.prim.ListConstruct %int14_697, %int14_698 : (!torch.int, !torch.int) -> !torch.list<int>
%2680 = torch.prim.ListConstruct %int0_695, %int0_695 : (!torch.int, !torch.int) -> !torch.list<int>
%2681 = torch.prim.ListConstruct %int1_696, %int1_696 : (!torch.int, !torch.int) -> !torch.list<int>
%false_699 = torch.constant.bool false
%none_700 = torch.constant.none
%2682 = torch.aten.avg_pool2d %2678, %2679, %2681, %2680, %false_699, %false_699, %none_700 : !torch.vtensor<[1,672,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,672,1,1],f32>
%2683 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%2684 = torch.aten.mul.Tensor %2682, %2683 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%2685 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2686 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_701 = torch.constant.int 12
%2687 = torch.aten.item %2685 : !torch.vtensor<[],f32> -> !torch.float
%2688 = torch.aten.item %2686 : !torch.vtensor<[],si8> -> !torch.int
%2689 = torch.aten.quantize_per_tensor %2684, %2687, %2688, %int12_701 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2690 = torch.aten.int_repr %2689 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%2691 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2692 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2693 = torch.aten.item %2691 : !torch.vtensor<[],f32> -> !torch.float
%2694 = torch.aten.item %2692 : !torch.vtensor<[],si8> -> !torch.int
%2695 = torch.aten._make_per_tensor_quantized_tensor %2690, %2693, %2694 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2696 = torch.aten.dequantize.self %2695 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%2697 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2698 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_702 = torch.constant.int 12
%2699 = torch.aten.item %2697 : !torch.vtensor<[],f32> -> !torch.float
%2700 = torch.aten.item %2698 : !torch.vtensor<[],si8> -> !torch.int
%2701 = torch.aten.quantize_per_tensor %86, %2699, %2700, %int12_702 : !torch.vtensor<[168,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[168,672,1,1],!torch.qint8>
%2702 = torch.aten.int_repr %2701 : !torch.vtensor<[168,672,1,1],!torch.qint8> -> !torch.vtensor<[168,672,1,1],si8>
%2703 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2704 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2705 = torch.aten.item %2703 : !torch.vtensor<[],f32> -> !torch.float
%2706 = torch.aten.item %2704 : !torch.vtensor<[],si8> -> !torch.int
%2707 = torch.aten._make_per_tensor_quantized_tensor %2702, %2705, %2706 : !torch.vtensor<[168,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[168,672,1,1],!torch.qint8>
%2708 = torch.aten.dequantize.self %2707 : !torch.vtensor<[168,672,1,1],!torch.qint8> -> !torch.vtensor<[168,672,1,1],f32>
%2709 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2710 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_703 = torch.constant.int 12
%2711 = torch.aten.item %2709 : !torch.vtensor<[],f32> -> !torch.float
%2712 = torch.aten.item %2710 : !torch.vtensor<[],si8> -> !torch.int
%2713 = torch.aten.quantize_per_tensor %87, %2711, %2712, %int12_703 : !torch.vtensor<[168],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[168],!torch.qint8>
%2714 = torch.aten.int_repr %2713 : !torch.vtensor<[168],!torch.qint8> -> !torch.vtensor<[168],si8>
%2715 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2716 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2717 = torch.aten.item %2715 : !torch.vtensor<[],f32> -> !torch.float
%2718 = torch.aten.item %2716 : !torch.vtensor<[],si8> -> !torch.int
%2719 = torch.aten._make_per_tensor_quantized_tensor %2714, %2717, %2718 : !torch.vtensor<[168],si8>, !torch.float, !torch.int -> !torch.vtensor<[168],!torch.qint8>
%2720 = torch.aten.dequantize.self %2719 : !torch.vtensor<[168],!torch.qint8> -> !torch.vtensor<[168],f32>
%int0_704 = torch.constant.int 0
%int0_705 = torch.constant.int 0
%int1_706 = torch.constant.int 1
%int1_707 = torch.constant.int 1
%int1_708 = torch.constant.int 1
%int1_709 = torch.constant.int 1
%int0_710 = torch.constant.int 0
%2721 = torch.prim.ListConstruct %int0_704, %int0_705 : (!torch.int, !torch.int) -> !torch.list<int>
%2722 = torch.prim.ListConstruct %int1_706, %int1_707 : (!torch.int, !torch.int) -> !torch.list<int>
%2723 = torch.prim.ListConstruct %int1_708, %int1_709 : (!torch.int, !torch.int) -> !torch.list<int>
%2724 = torch.prim.ListConstruct %int0_710, %int0_710 : (!torch.int, !torch.int) -> !torch.list<int>
%false_711 = torch.constant.bool false
%int1_712 = torch.constant.int 1
%2725 = torch.aten.convolution %2696, %2708, %2720, %2723, %2721, %2722, %false_711, %2724, %int1_712 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[168,672,1,1],f32>, !torch.vtensor<[168],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,168,1,1],f32>
%2726 = torch.aten.relu %2725 : !torch.vtensor<[1,168,1,1],f32> -> !torch.vtensor<[1,168,1,1],f32>
%2727 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2728 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_713 = torch.constant.int 12
%2729 = torch.aten.item %2727 : !torch.vtensor<[],f32> -> !torch.float
%2730 = torch.aten.item %2728 : !torch.vtensor<[],si8> -> !torch.int
%2731 = torch.aten.quantize_per_tensor %2726, %2729, %2730, %int12_713 : !torch.vtensor<[1,168,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,168,1,1],!torch.qint8>
%2732 = torch.aten.int_repr %2731 : !torch.vtensor<[1,168,1,1],!torch.qint8> -> !torch.vtensor<[1,168,1,1],si8>
%2733 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2734 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2735 = torch.aten.item %2733 : !torch.vtensor<[],f32> -> !torch.float
%2736 = torch.aten.item %2734 : !torch.vtensor<[],si8> -> !torch.int
%2737 = torch.aten._make_per_tensor_quantized_tensor %2732, %2735, %2736 : !torch.vtensor<[1,168,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,168,1,1],!torch.qint8>
%2738 = torch.aten.dequantize.self %2737 : !torch.vtensor<[1,168,1,1],!torch.qint8> -> !torch.vtensor<[1,168,1,1],f32>
%2739 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2740 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_714 = torch.constant.int 12
%2741 = torch.aten.item %2739 : !torch.vtensor<[],f32> -> !torch.float
%2742 = torch.aten.item %2740 : !torch.vtensor<[],si8> -> !torch.int
%2743 = torch.aten.quantize_per_tensor %88, %2741, %2742, %int12_714 : !torch.vtensor<[672,168,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,168,1,1],!torch.qint8>
%2744 = torch.aten.int_repr %2743 : !torch.vtensor<[672,168,1,1],!torch.qint8> -> !torch.vtensor<[672,168,1,1],si8>
%2745 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2746 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2747 = torch.aten.item %2745 : !torch.vtensor<[],f32> -> !torch.float
%2748 = torch.aten.item %2746 : !torch.vtensor<[],si8> -> !torch.int
%2749 = torch.aten._make_per_tensor_quantized_tensor %2744, %2747, %2748 : !torch.vtensor<[672,168,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,168,1,1],!torch.qint8>
%2750 = torch.aten.dequantize.self %2749 : !torch.vtensor<[672,168,1,1],!torch.qint8> -> !torch.vtensor<[672,168,1,1],f32>
%2751 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2752 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_715 = torch.constant.int 12
%2753 = torch.aten.item %2751 : !torch.vtensor<[],f32> -> !torch.float
%2754 = torch.aten.item %2752 : !torch.vtensor<[],si8> -> !torch.int
%2755 = torch.aten.quantize_per_tensor %89, %2753, %2754, %int12_715 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2756 = torch.aten.int_repr %2755 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%2757 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2758 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2759 = torch.aten.item %2757 : !torch.vtensor<[],f32> -> !torch.float
%2760 = torch.aten.item %2758 : !torch.vtensor<[],si8> -> !torch.int
%2761 = torch.aten._make_per_tensor_quantized_tensor %2756, %2759, %2760 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2762 = torch.aten.dequantize.self %2761 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int0_716 = torch.constant.int 0
%int0_717 = torch.constant.int 0
%int1_718 = torch.constant.int 1
%int1_719 = torch.constant.int 1
%int1_720 = torch.constant.int 1
%int1_721 = torch.constant.int 1
%int0_722 = torch.constant.int 0
%2763 = torch.prim.ListConstruct %int0_716, %int0_717 : (!torch.int, !torch.int) -> !torch.list<int>
%2764 = torch.prim.ListConstruct %int1_718, %int1_719 : (!torch.int, !torch.int) -> !torch.list<int>
%2765 = torch.prim.ListConstruct %int1_720, %int1_721 : (!torch.int, !torch.int) -> !torch.list<int>
%2766 = torch.prim.ListConstruct %int0_722, %int0_722 : (!torch.int, !torch.int) -> !torch.list<int>
%false_723 = torch.constant.bool false
%int1_724 = torch.constant.int 1
%2767 = torch.aten.convolution %2738, %2750, %2762, %2765, %2763, %2764, %false_723, %2766, %int1_724 : !torch.vtensor<[1,168,1,1],f32>, !torch.vtensor<[672,168,1,1],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,1,1],f32>
%2768 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2769 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_725 = torch.constant.int 12
%2770 = torch.aten.item %2768 : !torch.vtensor<[],f32> -> !torch.float
%2771 = torch.aten.item %2769 : !torch.vtensor<[],si8> -> !torch.int
%2772 = torch.aten.quantize_per_tensor %2767, %2770, %2771, %int12_725 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2773 = torch.aten.int_repr %2772 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%2774 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2775 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2776 = torch.aten.item %2774 : !torch.vtensor<[],f32> -> !torch.float
%2777 = torch.aten.item %2775 : !torch.vtensor<[],si8> -> !torch.int
%2778 = torch.aten._make_per_tensor_quantized_tensor %2773, %2776, %2777 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2779 = torch.aten.dequantize.self %2778 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%2780 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_726 = torch.constant.int 1
%2781 = torch.aten.add.Tensor %2779, %2780, %int1_726 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,1,1],f32>
%2782 = torch.aten.relu %2781 : !torch.vtensor<[1,672,1,1],f32> -> !torch.vtensor<[1,672,1,1],f32>
%2783 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2784 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_727 = torch.constant.int 6
%none_728 = torch.constant.none
%false_729 = torch.constant.bool false
%2785 = torch.aten.to.dtype %2783, %int6_727, %false_729, %false_729, %none_728 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_730 = torch.constant.int 6
%none_731 = torch.constant.none
%false_732 = torch.constant.bool false
%2786 = torch.aten.to.dtype %2784, %int6_730, %false_732, %false_732, %none_731 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2787 = torch.aten.clamp.Tensor %2782, %2785, %2786 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%2788 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2789 = torch.aten.mul.Tensor %2787, %2788 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%2790 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2791 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_733 = torch.constant.int 12
%2792 = torch.aten.item %2790 : !torch.vtensor<[],f32> -> !torch.float
%2793 = torch.aten.item %2791 : !torch.vtensor<[],si8> -> !torch.int
%2794 = torch.aten.quantize_per_tensor %2789, %2792, %2793, %int12_733 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2795 = torch.aten.int_repr %2794 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%2796 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2797 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2798 = torch.aten.item %2796 : !torch.vtensor<[],f32> -> !torch.float
%2799 = torch.aten.item %2797 : !torch.vtensor<[],si8> -> !torch.int
%2800 = torch.aten._make_per_tensor_quantized_tensor %2795, %2798, %2799 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%2801 = torch.aten.dequantize.self %2800 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%2802 = torch.aten.mul.Tensor %2801, %2678 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2803 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2804 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_734 = torch.constant.int 12
%2805 = torch.aten.item %2803 : !torch.vtensor<[],f32> -> !torch.float
%2806 = torch.aten.item %2804 : !torch.vtensor<[],si8> -> !torch.int
%2807 = torch.aten.quantize_per_tensor %2802, %2805, %2806, %int12_734 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2808 = torch.aten.int_repr %2807 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2809 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2810 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2811 = torch.aten.item %2809 : !torch.vtensor<[],f32> -> !torch.float
%2812 = torch.aten.item %2810 : !torch.vtensor<[],si8> -> !torch.int
%2813 = torch.aten._make_per_tensor_quantized_tensor %2808, %2811, %2812 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2814 = torch.aten.dequantize.self %2813 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2815 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2816 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_735 = torch.constant.int 12
%2817 = torch.aten.item %2815 : !torch.vtensor<[],f32> -> !torch.float
%2818 = torch.aten.item %2816 : !torch.vtensor<[],si8> -> !torch.int
%2819 = torch.aten.quantize_per_tensor %90, %2817, %2818, %int12_735 : !torch.vtensor<[112,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[112,672,1,1],!torch.qint8>
%2820 = torch.aten.int_repr %2819 : !torch.vtensor<[112,672,1,1],!torch.qint8> -> !torch.vtensor<[112,672,1,1],si8>
%2821 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2822 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2823 = torch.aten.item %2821 : !torch.vtensor<[],f32> -> !torch.float
%2824 = torch.aten.item %2822 : !torch.vtensor<[],si8> -> !torch.int
%2825 = torch.aten._make_per_tensor_quantized_tensor %2820, %2823, %2824 : !torch.vtensor<[112,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[112,672,1,1],!torch.qint8>
%2826 = torch.aten.dequantize.self %2825 : !torch.vtensor<[112,672,1,1],!torch.qint8> -> !torch.vtensor<[112,672,1,1],f32>
%2827 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2828 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_736 = torch.constant.int 12
%2829 = torch.aten.item %2827 : !torch.vtensor<[],f32> -> !torch.float
%2830 = torch.aten.item %2828 : !torch.vtensor<[],si8> -> !torch.int
%2831 = torch.aten.quantize_per_tensor %91, %2829, %2830, %int12_736 : !torch.vtensor<[112],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[112],!torch.qint8>
%2832 = torch.aten.int_repr %2831 : !torch.vtensor<[112],!torch.qint8> -> !torch.vtensor<[112],si8>
%2833 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2834 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2835 = torch.aten.item %2833 : !torch.vtensor<[],f32> -> !torch.float
%2836 = torch.aten.item %2834 : !torch.vtensor<[],si8> -> !torch.int
%2837 = torch.aten._make_per_tensor_quantized_tensor %2832, %2835, %2836 : !torch.vtensor<[112],si8>, !torch.float, !torch.int -> !torch.vtensor<[112],!torch.qint8>
%2838 = torch.aten.dequantize.self %2837 : !torch.vtensor<[112],!torch.qint8> -> !torch.vtensor<[112],f32>
%int0_737 = torch.constant.int 0
%int0_738 = torch.constant.int 0
%int1_739 = torch.constant.int 1
%int1_740 = torch.constant.int 1
%int1_741 = torch.constant.int 1
%int1_742 = torch.constant.int 1
%int0_743 = torch.constant.int 0
%2839 = torch.prim.ListConstruct %int0_737, %int0_738 : (!torch.int, !torch.int) -> !torch.list<int>
%2840 = torch.prim.ListConstruct %int1_739, %int1_740 : (!torch.int, !torch.int) -> !torch.list<int>
%2841 = torch.prim.ListConstruct %int1_741, %int1_742 : (!torch.int, !torch.int) -> !torch.list<int>
%2842 = torch.prim.ListConstruct %int0_743, %int0_743 : (!torch.int, !torch.int) -> !torch.list<int>
%false_744 = torch.constant.bool false
%int1_745 = torch.constant.int 1
%2843 = torch.aten.convolution %2814, %2826, %2838, %2841, %2839, %2840, %false_744, %2842, %int1_745 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[112,672,1,1],f32>, !torch.vtensor<[112],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,112,14,14],f32>
%2844 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2845 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_746 = torch.constant.int 12
%2846 = torch.aten.item %2844 : !torch.vtensor<[],f32> -> !torch.float
%2847 = torch.aten.item %2845 : !torch.vtensor<[],si8> -> !torch.int
%2848 = torch.aten.quantize_per_tensor %2843, %2846, %2847, %int12_746 : !torch.vtensor<[1,112,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2849 = torch.aten.int_repr %2848 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],si8>
%2850 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2851 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2852 = torch.aten.item %2850 : !torch.vtensor<[],f32> -> !torch.float
%2853 = torch.aten.item %2851 : !torch.vtensor<[],si8> -> !torch.int
%2854 = torch.aten._make_per_tensor_quantized_tensor %2849, %2852, %2853 : !torch.vtensor<[1,112,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2855 = torch.aten.dequantize.self %2854 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],f32>
%int1_747 = torch.constant.int 1
%2856 = torch.aten.add.Tensor %2855, %2526, %int1_747 : !torch.vtensor<[1,112,14,14],f32>, !torch.vtensor<[1,112,14,14],f32>, !torch.int -> !torch.vtensor<[1,112,14,14],f32>
%2857 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2858 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_748 = torch.constant.int 12
%2859 = torch.aten.item %2857 : !torch.vtensor<[],f32> -> !torch.float
%2860 = torch.aten.item %2858 : !torch.vtensor<[],si8> -> !torch.int
%2861 = torch.aten.quantize_per_tensor %2856, %2859, %2860, %int12_748 : !torch.vtensor<[1,112,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2862 = torch.aten.int_repr %2861 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],si8>
%2863 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2864 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2865 = torch.aten.item %2863 : !torch.vtensor<[],f32> -> !torch.float
%2866 = torch.aten.item %2864 : !torch.vtensor<[],si8> -> !torch.int
%2867 = torch.aten._make_per_tensor_quantized_tensor %2862, %2865, %2866 : !torch.vtensor<[1,112,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,112,14,14],!torch.qint8>
%2868 = torch.aten.dequantize.self %2867 : !torch.vtensor<[1,112,14,14],!torch.qint8> -> !torch.vtensor<[1,112,14,14],f32>
%2869 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2870 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_749 = torch.constant.int 12
%2871 = torch.aten.item %2869 : !torch.vtensor<[],f32> -> !torch.float
%2872 = torch.aten.item %2870 : !torch.vtensor<[],si8> -> !torch.int
%2873 = torch.aten.quantize_per_tensor %92, %2871, %2872, %int12_749 : !torch.vtensor<[672,112,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,112,1,1],!torch.qint8>
%2874 = torch.aten.int_repr %2873 : !torch.vtensor<[672,112,1,1],!torch.qint8> -> !torch.vtensor<[672,112,1,1],si8>
%2875 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2876 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2877 = torch.aten.item %2875 : !torch.vtensor<[],f32> -> !torch.float
%2878 = torch.aten.item %2876 : !torch.vtensor<[],si8> -> !torch.int
%2879 = torch.aten._make_per_tensor_quantized_tensor %2874, %2877, %2878 : !torch.vtensor<[672,112,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,112,1,1],!torch.qint8>
%2880 = torch.aten.dequantize.self %2879 : !torch.vtensor<[672,112,1,1],!torch.qint8> -> !torch.vtensor<[672,112,1,1],f32>
%2881 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2882 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_750 = torch.constant.int 12
%2883 = torch.aten.item %2881 : !torch.vtensor<[],f32> -> !torch.float
%2884 = torch.aten.item %2882 : !torch.vtensor<[],si8> -> !torch.int
%2885 = torch.aten.quantize_per_tensor %93, %2883, %2884, %int12_750 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2886 = torch.aten.int_repr %2885 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%2887 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2888 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2889 = torch.aten.item %2887 : !torch.vtensor<[],f32> -> !torch.float
%2890 = torch.aten.item %2888 : !torch.vtensor<[],si8> -> !torch.int
%2891 = torch.aten._make_per_tensor_quantized_tensor %2886, %2889, %2890 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2892 = torch.aten.dequantize.self %2891 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int0_751 = torch.constant.int 0
%int0_752 = torch.constant.int 0
%int1_753 = torch.constant.int 1
%int1_754 = torch.constant.int 1
%int1_755 = torch.constant.int 1
%int1_756 = torch.constant.int 1
%int0_757 = torch.constant.int 0
%2893 = torch.prim.ListConstruct %int0_751, %int0_752 : (!torch.int, !torch.int) -> !torch.list<int>
%2894 = torch.prim.ListConstruct %int1_753, %int1_754 : (!torch.int, !torch.int) -> !torch.list<int>
%2895 = torch.prim.ListConstruct %int1_755, %int1_756 : (!torch.int, !torch.int) -> !torch.list<int>
%2896 = torch.prim.ListConstruct %int0_757, %int0_757 : (!torch.int, !torch.int) -> !torch.list<int>
%false_758 = torch.constant.bool false
%int1_759 = torch.constant.int 1
%2897 = torch.aten.convolution %2868, %2880, %2892, %2895, %2893, %2894, %false_758, %2896, %int1_759 : !torch.vtensor<[1,112,14,14],f32>, !torch.vtensor<[672,112,1,1],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2898 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2899 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_760 = torch.constant.int 12
%2900 = torch.aten.item %2898 : !torch.vtensor<[],f32> -> !torch.float
%2901 = torch.aten.item %2899 : !torch.vtensor<[],si8> -> !torch.int
%2902 = torch.aten.quantize_per_tensor %2897, %2900, %2901, %int12_760 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2903 = torch.aten.int_repr %2902 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2904 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2905 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2906 = torch.aten.item %2904 : !torch.vtensor<[],f32> -> !torch.float
%2907 = torch.aten.item %2905 : !torch.vtensor<[],si8> -> !torch.int
%2908 = torch.aten._make_per_tensor_quantized_tensor %2903, %2906, %2907 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2909 = torch.aten.dequantize.self %2908 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2910 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_761 = torch.constant.int 1
%2911 = torch.aten.add.Tensor %2909, %2910, %int1_761 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2912 = torch.aten.relu %2911 : !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2913 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2914 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_762 = torch.constant.int 6
%none_763 = torch.constant.none
%false_764 = torch.constant.bool false
%2915 = torch.aten.to.dtype %2913, %int6_762, %false_764, %false_764, %none_763 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_765 = torch.constant.int 6
%none_766 = torch.constant.none
%false_767 = torch.constant.bool false
%2916 = torch.aten.to.dtype %2914, %int6_765, %false_767, %false_767, %none_766 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2917 = torch.aten.clamp.Tensor %2912, %2915, %2916 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2918 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2919 = torch.aten.mul.Tensor %2917, %2918 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2920 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2921 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_768 = torch.constant.int 12
%2922 = torch.aten.item %2920 : !torch.vtensor<[],f32> -> !torch.float
%2923 = torch.aten.item %2921 : !torch.vtensor<[],si8> -> !torch.int
%2924 = torch.aten.quantize_per_tensor %2919, %2922, %2923, %int12_768 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2925 = torch.aten.int_repr %2924 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2926 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2927 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2928 = torch.aten.item %2926 : !torch.vtensor<[],f32> -> !torch.float
%2929 = torch.aten.item %2927 : !torch.vtensor<[],si8> -> !torch.int
%2930 = torch.aten._make_per_tensor_quantized_tensor %2925, %2928, %2929 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2931 = torch.aten.dequantize.self %2930 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2932 = torch.aten.mul.Tensor %2909, %2931 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2933 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2934 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_769 = torch.constant.int 12
%2935 = torch.aten.item %2933 : !torch.vtensor<[],f32> -> !torch.float
%2936 = torch.aten.item %2934 : !torch.vtensor<[],si8> -> !torch.int
%2937 = torch.aten.quantize_per_tensor %2932, %2935, %2936, %int12_769 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2938 = torch.aten.int_repr %2937 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2939 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2940 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2941 = torch.aten.item %2939 : !torch.vtensor<[],f32> -> !torch.float
%2942 = torch.aten.item %2940 : !torch.vtensor<[],si8> -> !torch.int
%2943 = torch.aten._make_per_tensor_quantized_tensor %2938, %2941, %2942 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2944 = torch.aten.dequantize.self %2943 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2945 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2946 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_770 = torch.constant.int 12
%2947 = torch.aten.item %2945 : !torch.vtensor<[],f32> -> !torch.float
%2948 = torch.aten.item %2946 : !torch.vtensor<[],si8> -> !torch.int
%2949 = torch.aten.quantize_per_tensor %94, %2947, %2948, %int12_770 : !torch.vtensor<[672,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,1,5,5],!torch.qint8>
%2950 = torch.aten.int_repr %2949 : !torch.vtensor<[672,1,5,5],!torch.qint8> -> !torch.vtensor<[672,1,5,5],si8>
%2951 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2952 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2953 = torch.aten.item %2951 : !torch.vtensor<[],f32> -> !torch.float
%2954 = torch.aten.item %2952 : !torch.vtensor<[],si8> -> !torch.int
%2955 = torch.aten._make_per_tensor_quantized_tensor %2950, %2953, %2954 : !torch.vtensor<[672,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,1,5,5],!torch.qint8>
%2956 = torch.aten.dequantize.self %2955 : !torch.vtensor<[672,1,5,5],!torch.qint8> -> !torch.vtensor<[672,1,5,5],f32>
%2957 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2958 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_771 = torch.constant.int 12
%2959 = torch.aten.item %2957 : !torch.vtensor<[],f32> -> !torch.float
%2960 = torch.aten.item %2958 : !torch.vtensor<[],si8> -> !torch.int
%2961 = torch.aten.quantize_per_tensor %95, %2959, %2960, %int12_771 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2962 = torch.aten.int_repr %2961 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%2963 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2964 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2965 = torch.aten.item %2963 : !torch.vtensor<[],f32> -> !torch.float
%2966 = torch.aten.item %2964 : !torch.vtensor<[],si8> -> !torch.int
%2967 = torch.aten._make_per_tensor_quantized_tensor %2962, %2965, %2966 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%2968 = torch.aten.dequantize.self %2967 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int4 = torch.constant.int 4
%int4_772 = torch.constant.int 4
%int2_773 = torch.constant.int 2
%int2_774 = torch.constant.int 2
%int1_775 = torch.constant.int 1
%int1_776 = torch.constant.int 1
%int0_777 = torch.constant.int 0
%2969 = torch.prim.ListConstruct %int4, %int4_772 : (!torch.int, !torch.int) -> !torch.list<int>
%2970 = torch.prim.ListConstruct %int2_773, %int2_774 : (!torch.int, !torch.int) -> !torch.list<int>
%2971 = torch.prim.ListConstruct %int1_775, %int1_776 : (!torch.int, !torch.int) -> !torch.list<int>
%2972 = torch.prim.ListConstruct %int0_777, %int0_777 : (!torch.int, !torch.int) -> !torch.list<int>
%false_778 = torch.constant.bool false
%int672_779 = torch.constant.int 672
%2973 = torch.aten.convolution %2944, %2956, %2968, %2971, %2969, %2970, %false_778, %2972, %int672_779 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[672,1,5,5],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2974 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2975 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_780 = torch.constant.int 12
%2976 = torch.aten.item %2974 : !torch.vtensor<[],f32> -> !torch.float
%2977 = torch.aten.item %2975 : !torch.vtensor<[],si8> -> !torch.int
%2978 = torch.aten.quantize_per_tensor %2973, %2976, %2977, %int12_780 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2979 = torch.aten.int_repr %2978 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%2980 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%2981 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%2982 = torch.aten.item %2980 : !torch.vtensor<[],f32> -> !torch.float
%2983 = torch.aten.item %2981 : !torch.vtensor<[],si8> -> !torch.int
%2984 = torch.aten._make_per_tensor_quantized_tensor %2979, %2982, %2983 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%2985 = torch.aten.dequantize.self %2984 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%2986 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_781 = torch.constant.int 1
%2987 = torch.aten.add.Tensor %2985, %2986, %int1_781 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,14,14],f32>
%2988 = torch.aten.relu %2987 : !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2989 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%2990 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_782 = torch.constant.int 6
%none_783 = torch.constant.none
%false_784 = torch.constant.bool false
%2991 = torch.aten.to.dtype %2989, %int6_782, %false_784, %false_784, %none_783 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_785 = torch.constant.int 6
%none_786 = torch.constant.none
%false_787 = torch.constant.bool false
%2992 = torch.aten.to.dtype %2990, %int6_785, %false_787, %false_787, %none_786 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%2993 = torch.aten.clamp.Tensor %2988, %2991, %2992 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2994 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%2995 = torch.aten.mul.Tensor %2993, %2994 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,14,14],f32>
%2996 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%2997 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_788 = torch.constant.int 12
%2998 = torch.aten.item %2996 : !torch.vtensor<[],f32> -> !torch.float
%2999 = torch.aten.item %2997 : !torch.vtensor<[],si8> -> !torch.int
%3000 = torch.aten.quantize_per_tensor %2995, %2998, %2999, %int12_788 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3001 = torch.aten.int_repr %3000 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%3002 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3003 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3004 = torch.aten.item %3002 : !torch.vtensor<[],f32> -> !torch.float
%3005 = torch.aten.item %3003 : !torch.vtensor<[],si8> -> !torch.int
%3006 = torch.aten._make_per_tensor_quantized_tensor %3001, %3004, %3005 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3007 = torch.aten.dequantize.self %3006 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%3008 = torch.aten.mul.Tensor %2985, %3007 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%3009 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3010 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_789 = torch.constant.int 12
%3011 = torch.aten.item %3009 : !torch.vtensor<[],f32> -> !torch.float
%3012 = torch.aten.item %3010 : !torch.vtensor<[],si8> -> !torch.int
%3013 = torch.aten.quantize_per_tensor %3008, %3011, %3012, %int12_789 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3014 = torch.aten.int_repr %3013 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%3015 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3016 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3017 = torch.aten.item %3015 : !torch.vtensor<[],f32> -> !torch.float
%3018 = torch.aten.item %3016 : !torch.vtensor<[],si8> -> !torch.int
%3019 = torch.aten._make_per_tensor_quantized_tensor %3014, %3017, %3018 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3020 = torch.aten.dequantize.self %3019 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%int0_790 = torch.constant.int 0
%int1_791 = torch.constant.int 1
%int14_792 = torch.constant.int 14
%int14_793 = torch.constant.int 14
%3021 = torch.prim.ListConstruct %int14_792, %int14_793 : (!torch.int, !torch.int) -> !torch.list<int>
%3022 = torch.prim.ListConstruct %int0_790, %int0_790 : (!torch.int, !torch.int) -> !torch.list<int>
%3023 = torch.prim.ListConstruct %int1_791, %int1_791 : (!torch.int, !torch.int) -> !torch.list<int>
%false_794 = torch.constant.bool false
%none_795 = torch.constant.none
%3024 = torch.aten.avg_pool2d %3020, %3021, %3023, %3022, %false_794, %false_794, %none_795 : !torch.vtensor<[1,672,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,672,1,1],f32>
%3025 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%3026 = torch.aten.mul.Tensor %3024, %3025 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%3027 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3028 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_796 = torch.constant.int 12
%3029 = torch.aten.item %3027 : !torch.vtensor<[],f32> -> !torch.float
%3030 = torch.aten.item %3028 : !torch.vtensor<[],si8> -> !torch.int
%3031 = torch.aten.quantize_per_tensor %3026, %3029, %3030, %int12_796 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3032 = torch.aten.int_repr %3031 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%3033 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3034 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3035 = torch.aten.item %3033 : !torch.vtensor<[],f32> -> !torch.float
%3036 = torch.aten.item %3034 : !torch.vtensor<[],si8> -> !torch.int
%3037 = torch.aten._make_per_tensor_quantized_tensor %3032, %3035, %3036 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3038 = torch.aten.dequantize.self %3037 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%3039 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3040 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_797 = torch.constant.int 12
%3041 = torch.aten.item %3039 : !torch.vtensor<[],f32> -> !torch.float
%3042 = torch.aten.item %3040 : !torch.vtensor<[],si8> -> !torch.int
%3043 = torch.aten.quantize_per_tensor %96, %3041, %3042, %int12_797 : !torch.vtensor<[168,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[168,672,1,1],!torch.qint8>
%3044 = torch.aten.int_repr %3043 : !torch.vtensor<[168,672,1,1],!torch.qint8> -> !torch.vtensor<[168,672,1,1],si8>
%3045 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3046 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3047 = torch.aten.item %3045 : !torch.vtensor<[],f32> -> !torch.float
%3048 = torch.aten.item %3046 : !torch.vtensor<[],si8> -> !torch.int
%3049 = torch.aten._make_per_tensor_quantized_tensor %3044, %3047, %3048 : !torch.vtensor<[168,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[168,672,1,1],!torch.qint8>
%3050 = torch.aten.dequantize.self %3049 : !torch.vtensor<[168,672,1,1],!torch.qint8> -> !torch.vtensor<[168,672,1,1],f32>
%3051 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3052 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_798 = torch.constant.int 12
%3053 = torch.aten.item %3051 : !torch.vtensor<[],f32> -> !torch.float
%3054 = torch.aten.item %3052 : !torch.vtensor<[],si8> -> !torch.int
%3055 = torch.aten.quantize_per_tensor %97, %3053, %3054, %int12_798 : !torch.vtensor<[168],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[168],!torch.qint8>
%3056 = torch.aten.int_repr %3055 : !torch.vtensor<[168],!torch.qint8> -> !torch.vtensor<[168],si8>
%3057 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3058 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3059 = torch.aten.item %3057 : !torch.vtensor<[],f32> -> !torch.float
%3060 = torch.aten.item %3058 : !torch.vtensor<[],si8> -> !torch.int
%3061 = torch.aten._make_per_tensor_quantized_tensor %3056, %3059, %3060 : !torch.vtensor<[168],si8>, !torch.float, !torch.int -> !torch.vtensor<[168],!torch.qint8>
%3062 = torch.aten.dequantize.self %3061 : !torch.vtensor<[168],!torch.qint8> -> !torch.vtensor<[168],f32>
%int0_799 = torch.constant.int 0
%int0_800 = torch.constant.int 0
%int1_801 = torch.constant.int 1
%int1_802 = torch.constant.int 1
%int1_803 = torch.constant.int 1
%int1_804 = torch.constant.int 1
%int0_805 = torch.constant.int 0
%3063 = torch.prim.ListConstruct %int0_799, %int0_800 : (!torch.int, !torch.int) -> !torch.list<int>
%3064 = torch.prim.ListConstruct %int1_801, %int1_802 : (!torch.int, !torch.int) -> !torch.list<int>
%3065 = torch.prim.ListConstruct %int1_803, %int1_804 : (!torch.int, !torch.int) -> !torch.list<int>
%3066 = torch.prim.ListConstruct %int0_805, %int0_805 : (!torch.int, !torch.int) -> !torch.list<int>
%false_806 = torch.constant.bool false
%int1_807 = torch.constant.int 1
%3067 = torch.aten.convolution %3038, %3050, %3062, %3065, %3063, %3064, %false_806, %3066, %int1_807 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[168,672,1,1],f32>, !torch.vtensor<[168],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,168,1,1],f32>
%3068 = torch.aten.relu %3067 : !torch.vtensor<[1,168,1,1],f32> -> !torch.vtensor<[1,168,1,1],f32>
%3069 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3070 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_808 = torch.constant.int 12
%3071 = torch.aten.item %3069 : !torch.vtensor<[],f32> -> !torch.float
%3072 = torch.aten.item %3070 : !torch.vtensor<[],si8> -> !torch.int
%3073 = torch.aten.quantize_per_tensor %3068, %3071, %3072, %int12_808 : !torch.vtensor<[1,168,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,168,1,1],!torch.qint8>
%3074 = torch.aten.int_repr %3073 : !torch.vtensor<[1,168,1,1],!torch.qint8> -> !torch.vtensor<[1,168,1,1],si8>
%3075 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3076 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3077 = torch.aten.item %3075 : !torch.vtensor<[],f32> -> !torch.float
%3078 = torch.aten.item %3076 : !torch.vtensor<[],si8> -> !torch.int
%3079 = torch.aten._make_per_tensor_quantized_tensor %3074, %3077, %3078 : !torch.vtensor<[1,168,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,168,1,1],!torch.qint8>
%3080 = torch.aten.dequantize.self %3079 : !torch.vtensor<[1,168,1,1],!torch.qint8> -> !torch.vtensor<[1,168,1,1],f32>
%3081 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3082 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_809 = torch.constant.int 12
%3083 = torch.aten.item %3081 : !torch.vtensor<[],f32> -> !torch.float
%3084 = torch.aten.item %3082 : !torch.vtensor<[],si8> -> !torch.int
%3085 = torch.aten.quantize_per_tensor %98, %3083, %3084, %int12_809 : !torch.vtensor<[672,168,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672,168,1,1],!torch.qint8>
%3086 = torch.aten.int_repr %3085 : !torch.vtensor<[672,168,1,1],!torch.qint8> -> !torch.vtensor<[672,168,1,1],si8>
%3087 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3088 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3089 = torch.aten.item %3087 : !torch.vtensor<[],f32> -> !torch.float
%3090 = torch.aten.item %3088 : !torch.vtensor<[],si8> -> !torch.int
%3091 = torch.aten._make_per_tensor_quantized_tensor %3086, %3089, %3090 : !torch.vtensor<[672,168,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[672,168,1,1],!torch.qint8>
%3092 = torch.aten.dequantize.self %3091 : !torch.vtensor<[672,168,1,1],!torch.qint8> -> !torch.vtensor<[672,168,1,1],f32>
%3093 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3094 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_810 = torch.constant.int 12
%3095 = torch.aten.item %3093 : !torch.vtensor<[],f32> -> !torch.float
%3096 = torch.aten.item %3094 : !torch.vtensor<[],si8> -> !torch.int
%3097 = torch.aten.quantize_per_tensor %99, %3095, %3096, %int12_810 : !torch.vtensor<[672],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%3098 = torch.aten.int_repr %3097 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],si8>
%3099 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3100 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3101 = torch.aten.item %3099 : !torch.vtensor<[],f32> -> !torch.float
%3102 = torch.aten.item %3100 : !torch.vtensor<[],si8> -> !torch.int
%3103 = torch.aten._make_per_tensor_quantized_tensor %3098, %3101, %3102 : !torch.vtensor<[672],si8>, !torch.float, !torch.int -> !torch.vtensor<[672],!torch.qint8>
%3104 = torch.aten.dequantize.self %3103 : !torch.vtensor<[672],!torch.qint8> -> !torch.vtensor<[672],f32>
%int0_811 = torch.constant.int 0
%int0_812 = torch.constant.int 0
%int1_813 = torch.constant.int 1
%int1_814 = torch.constant.int 1
%int1_815 = torch.constant.int 1
%int1_816 = torch.constant.int 1
%int0_817 = torch.constant.int 0
%3105 = torch.prim.ListConstruct %int0_811, %int0_812 : (!torch.int, !torch.int) -> !torch.list<int>
%3106 = torch.prim.ListConstruct %int1_813, %int1_814 : (!torch.int, !torch.int) -> !torch.list<int>
%3107 = torch.prim.ListConstruct %int1_815, %int1_816 : (!torch.int, !torch.int) -> !torch.list<int>
%3108 = torch.prim.ListConstruct %int0_817, %int0_817 : (!torch.int, !torch.int) -> !torch.list<int>
%false_818 = torch.constant.bool false
%int1_819 = torch.constant.int 1
%3109 = torch.aten.convolution %3080, %3092, %3104, %3107, %3105, %3106, %false_818, %3108, %int1_819 : !torch.vtensor<[1,168,1,1],f32>, !torch.vtensor<[672,168,1,1],f32>, !torch.vtensor<[672],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,672,1,1],f32>
%3110 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3111 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_820 = torch.constant.int 12
%3112 = torch.aten.item %3110 : !torch.vtensor<[],f32> -> !torch.float
%3113 = torch.aten.item %3111 : !torch.vtensor<[],si8> -> !torch.int
%3114 = torch.aten.quantize_per_tensor %3109, %3112, %3113, %int12_820 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3115 = torch.aten.int_repr %3114 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%3116 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3117 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3118 = torch.aten.item %3116 : !torch.vtensor<[],f32> -> !torch.float
%3119 = torch.aten.item %3117 : !torch.vtensor<[],si8> -> !torch.int
%3120 = torch.aten._make_per_tensor_quantized_tensor %3115, %3118, %3119 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3121 = torch.aten.dequantize.self %3120 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%3122 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_821 = torch.constant.int 1
%3123 = torch.aten.add.Tensor %3121, %3122, %int1_821 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,672,1,1],f32>
%3124 = torch.aten.relu %3123 : !torch.vtensor<[1,672,1,1],f32> -> !torch.vtensor<[1,672,1,1],f32>
%3125 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3126 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_822 = torch.constant.int 6
%none_823 = torch.constant.none
%false_824 = torch.constant.bool false
%3127 = torch.aten.to.dtype %3125, %int6_822, %false_824, %false_824, %none_823 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_825 = torch.constant.int 6
%none_826 = torch.constant.none
%false_827 = torch.constant.bool false
%3128 = torch.aten.to.dtype %3126, %int6_825, %false_827, %false_827, %none_826 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3129 = torch.aten.clamp.Tensor %3124, %3127, %3128 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%3130 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3131 = torch.aten.mul.Tensor %3129, %3130 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,672,1,1],f32>
%3132 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3133 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_828 = torch.constant.int 12
%3134 = torch.aten.item %3132 : !torch.vtensor<[],f32> -> !torch.float
%3135 = torch.aten.item %3133 : !torch.vtensor<[],si8> -> !torch.int
%3136 = torch.aten.quantize_per_tensor %3131, %3134, %3135, %int12_828 : !torch.vtensor<[1,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3137 = torch.aten.int_repr %3136 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],si8>
%3138 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3139 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3140 = torch.aten.item %3138 : !torch.vtensor<[],f32> -> !torch.float
%3141 = torch.aten.item %3139 : !torch.vtensor<[],si8> -> !torch.int
%3142 = torch.aten._make_per_tensor_quantized_tensor %3137, %3140, %3141 : !torch.vtensor<[1,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,1,1],!torch.qint8>
%3143 = torch.aten.dequantize.self %3142 : !torch.vtensor<[1,672,1,1],!torch.qint8> -> !torch.vtensor<[1,672,1,1],f32>
%3144 = torch.aten.mul.Tensor %3143, %3020 : !torch.vtensor<[1,672,1,1],f32>, !torch.vtensor<[1,672,14,14],f32> -> !torch.vtensor<[1,672,14,14],f32>
%3145 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_829 = torch.constant.int 12
%3147 = torch.aten.item %3145 : !torch.vtensor<[],f32> -> !torch.float
%3148 = torch.aten.item %3146 : !torch.vtensor<[],si8> -> !torch.int
%3149 = torch.aten.quantize_per_tensor %3144, %3147, %3148, %int12_829 : !torch.vtensor<[1,672,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3150 = torch.aten.int_repr %3149 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],si8>
%3151 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3153 = torch.aten.item %3151 : !torch.vtensor<[],f32> -> !torch.float
%3154 = torch.aten.item %3152 : !torch.vtensor<[],si8> -> !torch.int
%3155 = torch.aten._make_per_tensor_quantized_tensor %3150, %3153, %3154 : !torch.vtensor<[1,672,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,672,14,14],!torch.qint8>
%3156 = torch.aten.dequantize.self %3155 : !torch.vtensor<[1,672,14,14],!torch.qint8> -> !torch.vtensor<[1,672,14,14],f32>
%3157 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3158 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_830 = torch.constant.int 12
%3159 = torch.aten.item %3157 : !torch.vtensor<[],f32> -> !torch.float
%3160 = torch.aten.item %3158 : !torch.vtensor<[],si8> -> !torch.int
%3161 = torch.aten.quantize_per_tensor %100, %3159, %3160, %int12_830 : !torch.vtensor<[160,672,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160,672,1,1],!torch.qint8>
%3162 = torch.aten.int_repr %3161 : !torch.vtensor<[160,672,1,1],!torch.qint8> -> !torch.vtensor<[160,672,1,1],si8>
%3163 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3164 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3165 = torch.aten.item %3163 : !torch.vtensor<[],f32> -> !torch.float
%3166 = torch.aten.item %3164 : !torch.vtensor<[],si8> -> !torch.int
%3167 = torch.aten._make_per_tensor_quantized_tensor %3162, %3165, %3166 : !torch.vtensor<[160,672,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[160,672,1,1],!torch.qint8>
%3168 = torch.aten.dequantize.self %3167 : !torch.vtensor<[160,672,1,1],!torch.qint8> -> !torch.vtensor<[160,672,1,1],f32>
%3169 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3170 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_831 = torch.constant.int 12
%3171 = torch.aten.item %3169 : !torch.vtensor<[],f32> -> !torch.float
%3172 = torch.aten.item %3170 : !torch.vtensor<[],si8> -> !torch.int
%3173 = torch.aten.quantize_per_tensor %101, %3171, %3172, %int12_831 : !torch.vtensor<[160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3174 = torch.aten.int_repr %3173 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],si8>
%3175 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3176 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3177 = torch.aten.item %3175 : !torch.vtensor<[],f32> -> !torch.float
%3178 = torch.aten.item %3176 : !torch.vtensor<[],si8> -> !torch.int
%3179 = torch.aten._make_per_tensor_quantized_tensor %3174, %3177, %3178 : !torch.vtensor<[160],si8>, !torch.float, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3180 = torch.aten.dequantize.self %3179 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],f32>
%int0_832 = torch.constant.int 0
%int0_833 = torch.constant.int 0
%int1_834 = torch.constant.int 1
%int1_835 = torch.constant.int 1
%int1_836 = torch.constant.int 1
%int1_837 = torch.constant.int 1
%int0_838 = torch.constant.int 0
%3181 = torch.prim.ListConstruct %int0_832, %int0_833 : (!torch.int, !torch.int) -> !torch.list<int>
%3182 = torch.prim.ListConstruct %int1_834, %int1_835 : (!torch.int, !torch.int) -> !torch.list<int>
%3183 = torch.prim.ListConstruct %int1_836, %int1_837 : (!torch.int, !torch.int) -> !torch.list<int>
%3184 = torch.prim.ListConstruct %int0_838, %int0_838 : (!torch.int, !torch.int) -> !torch.list<int>
%false_839 = torch.constant.bool false
%int1_840 = torch.constant.int 1
%3185 = torch.aten.convolution %3156, %3168, %3180, %3183, %3181, %3182, %false_839, %3184, %int1_840 : !torch.vtensor<[1,672,14,14],f32>, !torch.vtensor<[160,672,1,1],f32>, !torch.vtensor<[160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,160,14,14],f32>
%3186 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3187 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_841 = torch.constant.int 12
%3188 = torch.aten.item %3186 : !torch.vtensor<[],f32> -> !torch.float
%3189 = torch.aten.item %3187 : !torch.vtensor<[],si8> -> !torch.int
%3190 = torch.aten.quantize_per_tensor %3185, %3188, %3189, %int12_841 : !torch.vtensor<[1,160,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3191 = torch.aten.int_repr %3190 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],si8>
%3192 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3193 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3194 = torch.aten.item %3192 : !torch.vtensor<[],f32> -> !torch.float
%3195 = torch.aten.item %3193 : !torch.vtensor<[],si8> -> !torch.int
%3196 = torch.aten._make_per_tensor_quantized_tensor %3191, %3194, %3195 : !torch.vtensor<[1,160,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3197 = torch.aten.dequantize.self %3196 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],f32>
%3198 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3199 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_842 = torch.constant.int 12
%3200 = torch.aten.item %3198 : !torch.vtensor<[],f32> -> !torch.float
%3201 = torch.aten.item %3199 : !torch.vtensor<[],si8> -> !torch.int
%3202 = torch.aten.quantize_per_tensor %102, %3200, %3201, %int12_842 : !torch.vtensor<[960,160,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3203 = torch.aten.int_repr %3202 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],si8>
%3204 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3205 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3206 = torch.aten.item %3204 : !torch.vtensor<[],f32> -> !torch.float
%3207 = torch.aten.item %3205 : !torch.vtensor<[],si8> -> !torch.int
%3208 = torch.aten._make_per_tensor_quantized_tensor %3203, %3206, %3207 : !torch.vtensor<[960,160,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3209 = torch.aten.dequantize.self %3208 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],f32>
%3210 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3211 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_843 = torch.constant.int 12
%3212 = torch.aten.item %3210 : !torch.vtensor<[],f32> -> !torch.float
%3213 = torch.aten.item %3211 : !torch.vtensor<[],si8> -> !torch.int
%3214 = torch.aten.quantize_per_tensor %103, %3212, %3213, %int12_843 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3215 = torch.aten.int_repr %3214 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3216 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3218 = torch.aten.item %3216 : !torch.vtensor<[],f32> -> !torch.float
%3219 = torch.aten.item %3217 : !torch.vtensor<[],si8> -> !torch.int
%3220 = torch.aten._make_per_tensor_quantized_tensor %3215, %3218, %3219 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3221 = torch.aten.dequantize.self %3220 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int0_844 = torch.constant.int 0
%int0_845 = torch.constant.int 0
%int1_846 = torch.constant.int 1
%int1_847 = torch.constant.int 1
%int1_848 = torch.constant.int 1
%int1_849 = torch.constant.int 1
%int0_850 = torch.constant.int 0
%3222 = torch.prim.ListConstruct %int0_844, %int0_845 : (!torch.int, !torch.int) -> !torch.list<int>
%3223 = torch.prim.ListConstruct %int1_846, %int1_847 : (!torch.int, !torch.int) -> !torch.list<int>
%3224 = torch.prim.ListConstruct %int1_848, %int1_849 : (!torch.int, !torch.int) -> !torch.list<int>
%3225 = torch.prim.ListConstruct %int0_850, %int0_850 : (!torch.int, !torch.int) -> !torch.list<int>
%false_851 = torch.constant.bool false
%int1_852 = torch.constant.int 1
%3226 = torch.aten.convolution %3197, %3209, %3221, %3224, %3222, %3223, %false_851, %3225, %int1_852 : !torch.vtensor<[1,160,14,14],f32>, !torch.vtensor<[960,160,1,1],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3227 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3228 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_853 = torch.constant.int 12
%3229 = torch.aten.item %3227 : !torch.vtensor<[],f32> -> !torch.float
%3230 = torch.aten.item %3228 : !torch.vtensor<[],si8> -> !torch.int
%3231 = torch.aten.quantize_per_tensor %3226, %3229, %3230, %int12_853 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3232 = torch.aten.int_repr %3231 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3233 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3234 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3235 = torch.aten.item %3233 : !torch.vtensor<[],f32> -> !torch.float
%3236 = torch.aten.item %3234 : !torch.vtensor<[],si8> -> !torch.int
%3237 = torch.aten._make_per_tensor_quantized_tensor %3232, %3235, %3236 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3238 = torch.aten.dequantize.self %3237 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3239 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_854 = torch.constant.int 1
%3240 = torch.aten.add.Tensor %3238, %3239, %int1_854 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3241 = torch.aten.relu %3240 : !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3242 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3243 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_855 = torch.constant.int 6
%none_856 = torch.constant.none
%false_857 = torch.constant.bool false
%3244 = torch.aten.to.dtype %3242, %int6_855, %false_857, %false_857, %none_856 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_858 = torch.constant.int 6
%none_859 = torch.constant.none
%false_860 = torch.constant.bool false
%3245 = torch.aten.to.dtype %3243, %int6_858, %false_860, %false_860, %none_859 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3246 = torch.aten.clamp.Tensor %3241, %3244, %3245 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3247 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3248 = torch.aten.mul.Tensor %3246, %3247 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3249 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3250 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_861 = torch.constant.int 12
%3251 = torch.aten.item %3249 : !torch.vtensor<[],f32> -> !torch.float
%3252 = torch.aten.item %3250 : !torch.vtensor<[],si8> -> !torch.int
%3253 = torch.aten.quantize_per_tensor %3248, %3251, %3252, %int12_861 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3254 = torch.aten.int_repr %3253 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3255 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3256 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3257 = torch.aten.item %3255 : !torch.vtensor<[],f32> -> !torch.float
%3258 = torch.aten.item %3256 : !torch.vtensor<[],si8> -> !torch.int
%3259 = torch.aten._make_per_tensor_quantized_tensor %3254, %3257, %3258 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3260 = torch.aten.dequantize.self %3259 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3261 = torch.aten.mul.Tensor %3238, %3260 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3262 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3263 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_862 = torch.constant.int 12
%3264 = torch.aten.item %3262 : !torch.vtensor<[],f32> -> !torch.float
%3265 = torch.aten.item %3263 : !torch.vtensor<[],si8> -> !torch.int
%3266 = torch.aten.quantize_per_tensor %3261, %3264, %3265, %int12_862 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3267 = torch.aten.int_repr %3266 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3268 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3269 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3270 = torch.aten.item %3268 : !torch.vtensor<[],f32> -> !torch.float
%3271 = torch.aten.item %3269 : !torch.vtensor<[],si8> -> !torch.int
%3272 = torch.aten._make_per_tensor_quantized_tensor %3267, %3270, %3271 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3273 = torch.aten.dequantize.self %3272 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3274 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3275 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_863 = torch.constant.int 12
%3276 = torch.aten.item %3274 : !torch.vtensor<[],f32> -> !torch.float
%3277 = torch.aten.item %3275 : !torch.vtensor<[],si8> -> !torch.int
%3278 = torch.aten.quantize_per_tensor %104, %3276, %3277, %int12_863 : !torch.vtensor<[960,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,1,5,5],!torch.qint8>
%3279 = torch.aten.int_repr %3278 : !torch.vtensor<[960,1,5,5],!torch.qint8> -> !torch.vtensor<[960,1,5,5],si8>
%3280 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3281 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3282 = torch.aten.item %3280 : !torch.vtensor<[],f32> -> !torch.float
%3283 = torch.aten.item %3281 : !torch.vtensor<[],si8> -> !torch.int
%3284 = torch.aten._make_per_tensor_quantized_tensor %3279, %3282, %3283 : !torch.vtensor<[960,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,1,5,5],!torch.qint8>
%3285 = torch.aten.dequantize.self %3284 : !torch.vtensor<[960,1,5,5],!torch.qint8> -> !torch.vtensor<[960,1,5,5],f32>
%3286 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3287 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_864 = torch.constant.int 12
%3288 = torch.aten.item %3286 : !torch.vtensor<[],f32> -> !torch.float
%3289 = torch.aten.item %3287 : !torch.vtensor<[],si8> -> !torch.int
%3290 = torch.aten.quantize_per_tensor %105, %3288, %3289, %int12_864 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3291 = torch.aten.int_repr %3290 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3292 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3293 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3294 = torch.aten.item %3292 : !torch.vtensor<[],f32> -> !torch.float
%3295 = torch.aten.item %3293 : !torch.vtensor<[],si8> -> !torch.int
%3296 = torch.aten._make_per_tensor_quantized_tensor %3291, %3294, %3295 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3297 = torch.aten.dequantize.self %3296 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int4_865 = torch.constant.int 4
%int4_866 = torch.constant.int 4
%int2_867 = torch.constant.int 2
%int2_868 = torch.constant.int 2
%int1_869 = torch.constant.int 1
%int1_870 = torch.constant.int 1
%int0_871 = torch.constant.int 0
%3298 = torch.prim.ListConstruct %int4_865, %int4_866 : (!torch.int, !torch.int) -> !torch.list<int>
%3299 = torch.prim.ListConstruct %int2_867, %int2_868 : (!torch.int, !torch.int) -> !torch.list<int>
%3300 = torch.prim.ListConstruct %int1_869, %int1_870 : (!torch.int, !torch.int) -> !torch.list<int>
%3301 = torch.prim.ListConstruct %int0_871, %int0_871 : (!torch.int, !torch.int) -> !torch.list<int>
%false_872 = torch.constant.bool false
%int960 = torch.constant.int 960
%3302 = torch.aten.convolution %3273, %3285, %3297, %3300, %3298, %3299, %false_872, %3301, %int960 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[960,1,5,5],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3303 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3304 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_873 = torch.constant.int 12
%3305 = torch.aten.item %3303 : !torch.vtensor<[],f32> -> !torch.float
%3306 = torch.aten.item %3304 : !torch.vtensor<[],si8> -> !torch.int
%3307 = torch.aten.quantize_per_tensor %3302, %3305, %3306, %int12_873 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3308 = torch.aten.int_repr %3307 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3309 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3310 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3311 = torch.aten.item %3309 : !torch.vtensor<[],f32> -> !torch.float
%3312 = torch.aten.item %3310 : !torch.vtensor<[],si8> -> !torch.int
%3313 = torch.aten._make_per_tensor_quantized_tensor %3308, %3311, %3312 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3314 = torch.aten.dequantize.self %3313 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3315 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_874 = torch.constant.int 1
%3316 = torch.aten.add.Tensor %3314, %3315, %int1_874 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3317 = torch.aten.relu %3316 : !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3318 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3319 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_875 = torch.constant.int 6
%none_876 = torch.constant.none
%false_877 = torch.constant.bool false
%3320 = torch.aten.to.dtype %3318, %int6_875, %false_877, %false_877, %none_876 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_878 = torch.constant.int 6
%none_879 = torch.constant.none
%false_880 = torch.constant.bool false
%3321 = torch.aten.to.dtype %3319, %int6_878, %false_880, %false_880, %none_879 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3322 = torch.aten.clamp.Tensor %3317, %3320, %3321 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3323 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3324 = torch.aten.mul.Tensor %3322, %3323 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3325 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3326 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_881 = torch.constant.int 12
%3327 = torch.aten.item %3325 : !torch.vtensor<[],f32> -> !torch.float
%3328 = torch.aten.item %3326 : !torch.vtensor<[],si8> -> !torch.int
%3329 = torch.aten.quantize_per_tensor %3324, %3327, %3328, %int12_881 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3330 = torch.aten.int_repr %3329 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3331 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3332 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3333 = torch.aten.item %3331 : !torch.vtensor<[],f32> -> !torch.float
%3334 = torch.aten.item %3332 : !torch.vtensor<[],si8> -> !torch.int
%3335 = torch.aten._make_per_tensor_quantized_tensor %3330, %3333, %3334 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3336 = torch.aten.dequantize.self %3335 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3337 = torch.aten.mul.Tensor %3314, %3336 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3338 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3339 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_882 = torch.constant.int 12
%3340 = torch.aten.item %3338 : !torch.vtensor<[],f32> -> !torch.float
%3341 = torch.aten.item %3339 : !torch.vtensor<[],si8> -> !torch.int
%3342 = torch.aten.quantize_per_tensor %3337, %3340, %3341, %int12_882 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3343 = torch.aten.int_repr %3342 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3344 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3345 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3346 = torch.aten.item %3344 : !torch.vtensor<[],f32> -> !torch.float
%3347 = torch.aten.item %3345 : !torch.vtensor<[],si8> -> !torch.int
%3348 = torch.aten._make_per_tensor_quantized_tensor %3343, %3346, %3347 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3349 = torch.aten.dequantize.self %3348 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%int0_883 = torch.constant.int 0
%int1_884 = torch.constant.int 1
%int14_885 = torch.constant.int 14
%int14_886 = torch.constant.int 14
%3350 = torch.prim.ListConstruct %int14_885, %int14_886 : (!torch.int, !torch.int) -> !torch.list<int>
%3351 = torch.prim.ListConstruct %int0_883, %int0_883 : (!torch.int, !torch.int) -> !torch.list<int>
%3352 = torch.prim.ListConstruct %int1_884, %int1_884 : (!torch.int, !torch.int) -> !torch.list<int>
%false_887 = torch.constant.bool false
%none_888 = torch.constant.none
%3353 = torch.aten.avg_pool2d %3349, %3350, %3352, %3351, %false_887, %false_887, %none_888 : !torch.vtensor<[1,960,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,960,1,1],f32>
%3354 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%3355 = torch.aten.mul.Tensor %3353, %3354 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3356 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3357 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_889 = torch.constant.int 12
%3358 = torch.aten.item %3356 : !torch.vtensor<[],f32> -> !torch.float
%3359 = torch.aten.item %3357 : !torch.vtensor<[],si8> -> !torch.int
%3360 = torch.aten.quantize_per_tensor %3355, %3358, %3359, %int12_889 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3361 = torch.aten.int_repr %3360 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3362 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3363 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3364 = torch.aten.item %3362 : !torch.vtensor<[],f32> -> !torch.float
%3365 = torch.aten.item %3363 : !torch.vtensor<[],si8> -> !torch.int
%3366 = torch.aten._make_per_tensor_quantized_tensor %3361, %3364, %3365 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3367 = torch.aten.dequantize.self %3366 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3368 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3369 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_890 = torch.constant.int 12
%3370 = torch.aten.item %3368 : !torch.vtensor<[],f32> -> !torch.float
%3371 = torch.aten.item %3369 : !torch.vtensor<[],si8> -> !torch.int
%3372 = torch.aten.quantize_per_tensor %106, %3370, %3371, %int12_890 : !torch.vtensor<[240,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240,960,1,1],!torch.qint8>
%3373 = torch.aten.int_repr %3372 : !torch.vtensor<[240,960,1,1],!torch.qint8> -> !torch.vtensor<[240,960,1,1],si8>
%3374 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3375 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3376 = torch.aten.item %3374 : !torch.vtensor<[],f32> -> !torch.float
%3377 = torch.aten.item %3375 : !torch.vtensor<[],si8> -> !torch.int
%3378 = torch.aten._make_per_tensor_quantized_tensor %3373, %3376, %3377 : !torch.vtensor<[240,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[240,960,1,1],!torch.qint8>
%3379 = torch.aten.dequantize.self %3378 : !torch.vtensor<[240,960,1,1],!torch.qint8> -> !torch.vtensor<[240,960,1,1],f32>
%3380 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3381 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_891 = torch.constant.int 12
%3382 = torch.aten.item %3380 : !torch.vtensor<[],f32> -> !torch.float
%3383 = torch.aten.item %3381 : !torch.vtensor<[],si8> -> !torch.int
%3384 = torch.aten.quantize_per_tensor %107, %3382, %3383, %int12_891 : !torch.vtensor<[240],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%3385 = torch.aten.int_repr %3384 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],si8>
%3386 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3387 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3388 = torch.aten.item %3386 : !torch.vtensor<[],f32> -> !torch.float
%3389 = torch.aten.item %3387 : !torch.vtensor<[],si8> -> !torch.int
%3390 = torch.aten._make_per_tensor_quantized_tensor %3385, %3388, %3389 : !torch.vtensor<[240],si8>, !torch.float, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%3391 = torch.aten.dequantize.self %3390 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],f32>
%int0_892 = torch.constant.int 0
%int0_893 = torch.constant.int 0
%int1_894 = torch.constant.int 1
%int1_895 = torch.constant.int 1
%int1_896 = torch.constant.int 1
%int1_897 = torch.constant.int 1
%int0_898 = torch.constant.int 0
%3392 = torch.prim.ListConstruct %int0_892, %int0_893 : (!torch.int, !torch.int) -> !torch.list<int>
%3393 = torch.prim.ListConstruct %int1_894, %int1_895 : (!torch.int, !torch.int) -> !torch.list<int>
%3394 = torch.prim.ListConstruct %int1_896, %int1_897 : (!torch.int, !torch.int) -> !torch.list<int>
%3395 = torch.prim.ListConstruct %int0_898, %int0_898 : (!torch.int, !torch.int) -> !torch.list<int>
%false_899 = torch.constant.bool false
%int1_900 = torch.constant.int 1
%3396 = torch.aten.convolution %3367, %3379, %3391, %3394, %3392, %3393, %false_899, %3395, %int1_900 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[240,960,1,1],f32>, !torch.vtensor<[240],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,240,1,1],f32>
%3397 = torch.aten.relu %3396 : !torch.vtensor<[1,240,1,1],f32> -> !torch.vtensor<[1,240,1,1],f32>
%3398 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3399 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_901 = torch.constant.int 12
%3400 = torch.aten.item %3398 : !torch.vtensor<[],f32> -> !torch.float
%3401 = torch.aten.item %3399 : !torch.vtensor<[],si8> -> !torch.int
%3402 = torch.aten.quantize_per_tensor %3397, %3400, %3401, %int12_901 : !torch.vtensor<[1,240,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,1,1],!torch.qint8>
%3403 = torch.aten.int_repr %3402 : !torch.vtensor<[1,240,1,1],!torch.qint8> -> !torch.vtensor<[1,240,1,1],si8>
%3404 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3405 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3406 = torch.aten.item %3404 : !torch.vtensor<[],f32> -> !torch.float
%3407 = torch.aten.item %3405 : !torch.vtensor<[],si8> -> !torch.int
%3408 = torch.aten._make_per_tensor_quantized_tensor %3403, %3406, %3407 : !torch.vtensor<[1,240,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,1,1],!torch.qint8>
%3409 = torch.aten.dequantize.self %3408 : !torch.vtensor<[1,240,1,1],!torch.qint8> -> !torch.vtensor<[1,240,1,1],f32>
%3410 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3411 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_902 = torch.constant.int 12
%3412 = torch.aten.item %3410 : !torch.vtensor<[],f32> -> !torch.float
%3413 = torch.aten.item %3411 : !torch.vtensor<[],si8> -> !torch.int
%3414 = torch.aten.quantize_per_tensor %108, %3412, %3413, %int12_902 : !torch.vtensor<[960,240,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,240,1,1],!torch.qint8>
%3415 = torch.aten.int_repr %3414 : !torch.vtensor<[960,240,1,1],!torch.qint8> -> !torch.vtensor<[960,240,1,1],si8>
%3416 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3417 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3418 = torch.aten.item %3416 : !torch.vtensor<[],f32> -> !torch.float
%3419 = torch.aten.item %3417 : !torch.vtensor<[],si8> -> !torch.int
%3420 = torch.aten._make_per_tensor_quantized_tensor %3415, %3418, %3419 : !torch.vtensor<[960,240,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,240,1,1],!torch.qint8>
%3421 = torch.aten.dequantize.self %3420 : !torch.vtensor<[960,240,1,1],!torch.qint8> -> !torch.vtensor<[960,240,1,1],f32>
%3422 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3423 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_903 = torch.constant.int 12
%3424 = torch.aten.item %3422 : !torch.vtensor<[],f32> -> !torch.float
%3425 = torch.aten.item %3423 : !torch.vtensor<[],si8> -> !torch.int
%3426 = torch.aten.quantize_per_tensor %109, %3424, %3425, %int12_903 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3427 = torch.aten.int_repr %3426 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3428 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3429 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3430 = torch.aten.item %3428 : !torch.vtensor<[],f32> -> !torch.float
%3431 = torch.aten.item %3429 : !torch.vtensor<[],si8> -> !torch.int
%3432 = torch.aten._make_per_tensor_quantized_tensor %3427, %3430, %3431 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3433 = torch.aten.dequantize.self %3432 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int0_904 = torch.constant.int 0
%int0_905 = torch.constant.int 0
%int1_906 = torch.constant.int 1
%int1_907 = torch.constant.int 1
%int1_908 = torch.constant.int 1
%int1_909 = torch.constant.int 1
%int0_910 = torch.constant.int 0
%3434 = torch.prim.ListConstruct %int0_904, %int0_905 : (!torch.int, !torch.int) -> !torch.list<int>
%3435 = torch.prim.ListConstruct %int1_906, %int1_907 : (!torch.int, !torch.int) -> !torch.list<int>
%3436 = torch.prim.ListConstruct %int1_908, %int1_909 : (!torch.int, !torch.int) -> !torch.list<int>
%3437 = torch.prim.ListConstruct %int0_910, %int0_910 : (!torch.int, !torch.int) -> !torch.list<int>
%false_911 = torch.constant.bool false
%int1_912 = torch.constant.int 1
%3438 = torch.aten.convolution %3409, %3421, %3433, %3436, %3434, %3435, %false_911, %3437, %int1_912 : !torch.vtensor<[1,240,1,1],f32>, !torch.vtensor<[960,240,1,1],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,1,1],f32>
%3439 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3440 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_913 = torch.constant.int 12
%3441 = torch.aten.item %3439 : !torch.vtensor<[],f32> -> !torch.float
%3442 = torch.aten.item %3440 : !torch.vtensor<[],si8> -> !torch.int
%3443 = torch.aten.quantize_per_tensor %3438, %3441, %3442, %int12_913 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3444 = torch.aten.int_repr %3443 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3445 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3446 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3447 = torch.aten.item %3445 : !torch.vtensor<[],f32> -> !torch.float
%3448 = torch.aten.item %3446 : !torch.vtensor<[],si8> -> !torch.int
%3449 = torch.aten._make_per_tensor_quantized_tensor %3444, %3447, %3448 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3450 = torch.aten.dequantize.self %3449 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3451 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_914 = torch.constant.int 1
%3452 = torch.aten.add.Tensor %3450, %3451, %int1_914 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,1,1],f32>
%3453 = torch.aten.relu %3452 : !torch.vtensor<[1,960,1,1],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3454 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3455 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_915 = torch.constant.int 6
%none_916 = torch.constant.none
%false_917 = torch.constant.bool false
%3456 = torch.aten.to.dtype %3454, %int6_915, %false_917, %false_917, %none_916 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_918 = torch.constant.int 6
%none_919 = torch.constant.none
%false_920 = torch.constant.bool false
%3457 = torch.aten.to.dtype %3455, %int6_918, %false_920, %false_920, %none_919 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3458 = torch.aten.clamp.Tensor %3453, %3456, %3457 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3459 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3460 = torch.aten.mul.Tensor %3458, %3459 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3461 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3462 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_921 = torch.constant.int 12
%3463 = torch.aten.item %3461 : !torch.vtensor<[],f32> -> !torch.float
%3464 = torch.aten.item %3462 : !torch.vtensor<[],si8> -> !torch.int
%3465 = torch.aten.quantize_per_tensor %3460, %3463, %3464, %int12_921 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3466 = torch.aten.int_repr %3465 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3467 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3468 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3469 = torch.aten.item %3467 : !torch.vtensor<[],f32> -> !torch.float
%3470 = torch.aten.item %3468 : !torch.vtensor<[],si8> -> !torch.int
%3471 = torch.aten._make_per_tensor_quantized_tensor %3466, %3469, %3470 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3472 = torch.aten.dequantize.self %3471 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3473 = torch.aten.mul.Tensor %3472, %3349 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3474 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3475 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_922 = torch.constant.int 12
%3476 = torch.aten.item %3474 : !torch.vtensor<[],f32> -> !torch.float
%3477 = torch.aten.item %3475 : !torch.vtensor<[],si8> -> !torch.int
%3478 = torch.aten.quantize_per_tensor %3473, %3476, %3477, %int12_922 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3479 = torch.aten.int_repr %3478 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3480 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3481 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3482 = torch.aten.item %3480 : !torch.vtensor<[],f32> -> !torch.float
%3483 = torch.aten.item %3481 : !torch.vtensor<[],si8> -> !torch.int
%3484 = torch.aten._make_per_tensor_quantized_tensor %3479, %3482, %3483 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3485 = torch.aten.dequantize.self %3484 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3486 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3487 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_923 = torch.constant.int 12
%3488 = torch.aten.item %3486 : !torch.vtensor<[],f32> -> !torch.float
%3489 = torch.aten.item %3487 : !torch.vtensor<[],si8> -> !torch.int
%3490 = torch.aten.quantize_per_tensor %110, %3488, %3489, %int12_923 : !torch.vtensor<[160,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160,960,1,1],!torch.qint8>
%3491 = torch.aten.int_repr %3490 : !torch.vtensor<[160,960,1,1],!torch.qint8> -> !torch.vtensor<[160,960,1,1],si8>
%3492 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3493 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3494 = torch.aten.item %3492 : !torch.vtensor<[],f32> -> !torch.float
%3495 = torch.aten.item %3493 : !torch.vtensor<[],si8> -> !torch.int
%3496 = torch.aten._make_per_tensor_quantized_tensor %3491, %3494, %3495 : !torch.vtensor<[160,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[160,960,1,1],!torch.qint8>
%3497 = torch.aten.dequantize.self %3496 : !torch.vtensor<[160,960,1,1],!torch.qint8> -> !torch.vtensor<[160,960,1,1],f32>
%3498 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3499 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_924 = torch.constant.int 12
%3500 = torch.aten.item %3498 : !torch.vtensor<[],f32> -> !torch.float
%3501 = torch.aten.item %3499 : !torch.vtensor<[],si8> -> !torch.int
%3502 = torch.aten.quantize_per_tensor %111, %3500, %3501, %int12_924 : !torch.vtensor<[160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3503 = torch.aten.int_repr %3502 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],si8>
%3504 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3505 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3506 = torch.aten.item %3504 : !torch.vtensor<[],f32> -> !torch.float
%3507 = torch.aten.item %3505 : !torch.vtensor<[],si8> -> !torch.int
%3508 = torch.aten._make_per_tensor_quantized_tensor %3503, %3506, %3507 : !torch.vtensor<[160],si8>, !torch.float, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3509 = torch.aten.dequantize.self %3508 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],f32>
%int0_925 = torch.constant.int 0
%int0_926 = torch.constant.int 0
%int1_927 = torch.constant.int 1
%int1_928 = torch.constant.int 1
%int1_929 = torch.constant.int 1
%int1_930 = torch.constant.int 1
%int0_931 = torch.constant.int 0
%3510 = torch.prim.ListConstruct %int0_925, %int0_926 : (!torch.int, !torch.int) -> !torch.list<int>
%3511 = torch.prim.ListConstruct %int1_927, %int1_928 : (!torch.int, !torch.int) -> !torch.list<int>
%3512 = torch.prim.ListConstruct %int1_929, %int1_930 : (!torch.int, !torch.int) -> !torch.list<int>
%3513 = torch.prim.ListConstruct %int0_931, %int0_931 : (!torch.int, !torch.int) -> !torch.list<int>
%false_932 = torch.constant.bool false
%int1_933 = torch.constant.int 1
%3514 = torch.aten.convolution %3485, %3497, %3509, %3512, %3510, %3511, %false_932, %3513, %int1_933 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[160,960,1,1],f32>, !torch.vtensor<[160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,160,14,14],f32>
%3515 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3516 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_934 = torch.constant.int 12
%3517 = torch.aten.item %3515 : !torch.vtensor<[],f32> -> !torch.float
%3518 = torch.aten.item %3516 : !torch.vtensor<[],si8> -> !torch.int
%3519 = torch.aten.quantize_per_tensor %3514, %3517, %3518, %int12_934 : !torch.vtensor<[1,160,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3520 = torch.aten.int_repr %3519 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],si8>
%3521 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3522 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3523 = torch.aten.item %3521 : !torch.vtensor<[],f32> -> !torch.float
%3524 = torch.aten.item %3522 : !torch.vtensor<[],si8> -> !torch.int
%3525 = torch.aten._make_per_tensor_quantized_tensor %3520, %3523, %3524 : !torch.vtensor<[1,160,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3526 = torch.aten.dequantize.self %3525 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],f32>
%int1_935 = torch.constant.int 1
%3527 = torch.aten.add.Tensor %3526, %3197, %int1_935 : !torch.vtensor<[1,160,14,14],f32>, !torch.vtensor<[1,160,14,14],f32>, !torch.int -> !torch.vtensor<[1,160,14,14],f32>
%3528 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3529 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_936 = torch.constant.int 12
%3530 = torch.aten.item %3528 : !torch.vtensor<[],f32> -> !torch.float
%3531 = torch.aten.item %3529 : !torch.vtensor<[],si8> -> !torch.int
%3532 = torch.aten.quantize_per_tensor %3527, %3530, %3531, %int12_936 : !torch.vtensor<[1,160,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3533 = torch.aten.int_repr %3532 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],si8>
%3534 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3535 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3536 = torch.aten.item %3534 : !torch.vtensor<[],f32> -> !torch.float
%3537 = torch.aten.item %3535 : !torch.vtensor<[],si8> -> !torch.int
%3538 = torch.aten._make_per_tensor_quantized_tensor %3533, %3536, %3537 : !torch.vtensor<[1,160,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3539 = torch.aten.dequantize.self %3538 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],f32>
%3540 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3541 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_937 = torch.constant.int 12
%3542 = torch.aten.item %3540 : !torch.vtensor<[],f32> -> !torch.float
%3543 = torch.aten.item %3541 : !torch.vtensor<[],si8> -> !torch.int
%3544 = torch.aten.quantize_per_tensor %112, %3542, %3543, %int12_937 : !torch.vtensor<[960,160,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3545 = torch.aten.int_repr %3544 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],si8>
%3546 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3547 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3548 = torch.aten.item %3546 : !torch.vtensor<[],f32> -> !torch.float
%3549 = torch.aten.item %3547 : !torch.vtensor<[],si8> -> !torch.int
%3550 = torch.aten._make_per_tensor_quantized_tensor %3545, %3548, %3549 : !torch.vtensor<[960,160,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3551 = torch.aten.dequantize.self %3550 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],f32>
%3552 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3553 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_938 = torch.constant.int 12
%3554 = torch.aten.item %3552 : !torch.vtensor<[],f32> -> !torch.float
%3555 = torch.aten.item %3553 : !torch.vtensor<[],si8> -> !torch.int
%3556 = torch.aten.quantize_per_tensor %113, %3554, %3555, %int12_938 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3557 = torch.aten.int_repr %3556 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3558 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3559 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3560 = torch.aten.item %3558 : !torch.vtensor<[],f32> -> !torch.float
%3561 = torch.aten.item %3559 : !torch.vtensor<[],si8> -> !torch.int
%3562 = torch.aten._make_per_tensor_quantized_tensor %3557, %3560, %3561 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3563 = torch.aten.dequantize.self %3562 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int0_939 = torch.constant.int 0
%int0_940 = torch.constant.int 0
%int1_941 = torch.constant.int 1
%int1_942 = torch.constant.int 1
%int1_943 = torch.constant.int 1
%int1_944 = torch.constant.int 1
%int0_945 = torch.constant.int 0
%3564 = torch.prim.ListConstruct %int0_939, %int0_940 : (!torch.int, !torch.int) -> !torch.list<int>
%3565 = torch.prim.ListConstruct %int1_941, %int1_942 : (!torch.int, !torch.int) -> !torch.list<int>
%3566 = torch.prim.ListConstruct %int1_943, %int1_944 : (!torch.int, !torch.int) -> !torch.list<int>
%3567 = torch.prim.ListConstruct %int0_945, %int0_945 : (!torch.int, !torch.int) -> !torch.list<int>
%false_946 = torch.constant.bool false
%int1_947 = torch.constant.int 1
%3568 = torch.aten.convolution %3539, %3551, %3563, %3566, %3564, %3565, %false_946, %3567, %int1_947 : !torch.vtensor<[1,160,14,14],f32>, !torch.vtensor<[960,160,1,1],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3569 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3570 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_948 = torch.constant.int 12
%3571 = torch.aten.item %3569 : !torch.vtensor<[],f32> -> !torch.float
%3572 = torch.aten.item %3570 : !torch.vtensor<[],si8> -> !torch.int
%3573 = torch.aten.quantize_per_tensor %3568, %3571, %3572, %int12_948 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3574 = torch.aten.int_repr %3573 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3575 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3576 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3577 = torch.aten.item %3575 : !torch.vtensor<[],f32> -> !torch.float
%3578 = torch.aten.item %3576 : !torch.vtensor<[],si8> -> !torch.int
%3579 = torch.aten._make_per_tensor_quantized_tensor %3574, %3577, %3578 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3580 = torch.aten.dequantize.self %3579 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3581 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_949 = torch.constant.int 1
%3582 = torch.aten.add.Tensor %3580, %3581, %int1_949 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3583 = torch.aten.relu %3582 : !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3584 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3585 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_950 = torch.constant.int 6
%none_951 = torch.constant.none
%false_952 = torch.constant.bool false
%3586 = torch.aten.to.dtype %3584, %int6_950, %false_952, %false_952, %none_951 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_953 = torch.constant.int 6
%none_954 = torch.constant.none
%false_955 = torch.constant.bool false
%3587 = torch.aten.to.dtype %3585, %int6_953, %false_955, %false_955, %none_954 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3588 = torch.aten.clamp.Tensor %3583, %3586, %3587 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3589 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3590 = torch.aten.mul.Tensor %3588, %3589 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3591 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3592 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_956 = torch.constant.int 12
%3593 = torch.aten.item %3591 : !torch.vtensor<[],f32> -> !torch.float
%3594 = torch.aten.item %3592 : !torch.vtensor<[],si8> -> !torch.int
%3595 = torch.aten.quantize_per_tensor %3590, %3593, %3594, %int12_956 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3596 = torch.aten.int_repr %3595 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3597 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3598 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3599 = torch.aten.item %3597 : !torch.vtensor<[],f32> -> !torch.float
%3600 = torch.aten.item %3598 : !torch.vtensor<[],si8> -> !torch.int
%3601 = torch.aten._make_per_tensor_quantized_tensor %3596, %3599, %3600 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3602 = torch.aten.dequantize.self %3601 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3603 = torch.aten.mul.Tensor %3580, %3602 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3604 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3605 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_957 = torch.constant.int 12
%3606 = torch.aten.item %3604 : !torch.vtensor<[],f32> -> !torch.float
%3607 = torch.aten.item %3605 : !torch.vtensor<[],si8> -> !torch.int
%3608 = torch.aten.quantize_per_tensor %3603, %3606, %3607, %int12_957 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3609 = torch.aten.int_repr %3608 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3610 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3611 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3612 = torch.aten.item %3610 : !torch.vtensor<[],f32> -> !torch.float
%3613 = torch.aten.item %3611 : !torch.vtensor<[],si8> -> !torch.int
%3614 = torch.aten._make_per_tensor_quantized_tensor %3609, %3612, %3613 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3615 = torch.aten.dequantize.self %3614 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3616 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3617 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_958 = torch.constant.int 12
%3618 = torch.aten.item %3616 : !torch.vtensor<[],f32> -> !torch.float
%3619 = torch.aten.item %3617 : !torch.vtensor<[],si8> -> !torch.int
%3620 = torch.aten.quantize_per_tensor %114, %3618, %3619, %int12_958 : !torch.vtensor<[960,1,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,1,5,5],!torch.qint8>
%3621 = torch.aten.int_repr %3620 : !torch.vtensor<[960,1,5,5],!torch.qint8> -> !torch.vtensor<[960,1,5,5],si8>
%3622 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%3623 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3624 = torch.aten.item %3622 : !torch.vtensor<[],f32> -> !torch.float
%3625 = torch.aten.item %3623 : !torch.vtensor<[],si8> -> !torch.int
%3626 = torch.aten._make_per_tensor_quantized_tensor %3621, %3624, %3625 : !torch.vtensor<[960,1,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,1,5,5],!torch.qint8>
%3627 = torch.aten.dequantize.self %3626 : !torch.vtensor<[960,1,5,5],!torch.qint8> -> !torch.vtensor<[960,1,5,5],f32>
%3628 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3629 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_959 = torch.constant.int 12
%3630 = torch.aten.item %3628 : !torch.vtensor<[],f32> -> !torch.float
%3631 = torch.aten.item %3629 : !torch.vtensor<[],si8> -> !torch.int
%3632 = torch.aten.quantize_per_tensor %115, %3630, %3631, %int12_959 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3633 = torch.aten.int_repr %3632 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3634 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3635 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3636 = torch.aten.item %3634 : !torch.vtensor<[],f32> -> !torch.float
%3637 = torch.aten.item %3635 : !torch.vtensor<[],si8> -> !torch.int
%3638 = torch.aten._make_per_tensor_quantized_tensor %3633, %3636, %3637 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3639 = torch.aten.dequantize.self %3638 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int4_960 = torch.constant.int 4
%int4_961 = torch.constant.int 4
%int2_962 = torch.constant.int 2
%int2_963 = torch.constant.int 2
%int1_964 = torch.constant.int 1
%int1_965 = torch.constant.int 1
%int0_966 = torch.constant.int 0
%3640 = torch.prim.ListConstruct %int4_960, %int4_961 : (!torch.int, !torch.int) -> !torch.list<int>
%3641 = torch.prim.ListConstruct %int2_962, %int2_963 : (!torch.int, !torch.int) -> !torch.list<int>
%3642 = torch.prim.ListConstruct %int1_964, %int1_965 : (!torch.int, !torch.int) -> !torch.list<int>
%3643 = torch.prim.ListConstruct %int0_966, %int0_966 : (!torch.int, !torch.int) -> !torch.list<int>
%false_967 = torch.constant.bool false
%int960_968 = torch.constant.int 960
%3644 = torch.aten.convolution %3615, %3627, %3639, %3642, %3640, %3641, %false_967, %3643, %int960_968 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[960,1,5,5],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3645 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3646 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_969 = torch.constant.int 12
%3647 = torch.aten.item %3645 : !torch.vtensor<[],f32> -> !torch.float
%3648 = torch.aten.item %3646 : !torch.vtensor<[],si8> -> !torch.int
%3649 = torch.aten.quantize_per_tensor %3644, %3647, %3648, %int12_969 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3650 = torch.aten.int_repr %3649 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3651 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3652 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3653 = torch.aten.item %3651 : !torch.vtensor<[],f32> -> !torch.float
%3654 = torch.aten.item %3652 : !torch.vtensor<[],si8> -> !torch.int
%3655 = torch.aten._make_per_tensor_quantized_tensor %3650, %3653, %3654 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3656 = torch.aten.dequantize.self %3655 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3657 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_970 = torch.constant.int 1
%3658 = torch.aten.add.Tensor %3656, %3657, %int1_970 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3659 = torch.aten.relu %3658 : !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3660 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3661 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_971 = torch.constant.int 6
%none_972 = torch.constant.none
%false_973 = torch.constant.bool false
%3662 = torch.aten.to.dtype %3660, %int6_971, %false_973, %false_973, %none_972 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_974 = torch.constant.int 6
%none_975 = torch.constant.none
%false_976 = torch.constant.bool false
%3663 = torch.aten.to.dtype %3661, %int6_974, %false_976, %false_976, %none_975 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3664 = torch.aten.clamp.Tensor %3659, %3662, %3663 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3665 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3666 = torch.aten.mul.Tensor %3664, %3665 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3667 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3668 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_977 = torch.constant.int 12
%3669 = torch.aten.item %3667 : !torch.vtensor<[],f32> -> !torch.float
%3670 = torch.aten.item %3668 : !torch.vtensor<[],si8> -> !torch.int
%3671 = torch.aten.quantize_per_tensor %3666, %3669, %3670, %int12_977 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3672 = torch.aten.int_repr %3671 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3673 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3674 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3675 = torch.aten.item %3673 : !torch.vtensor<[],f32> -> !torch.float
%3676 = torch.aten.item %3674 : !torch.vtensor<[],si8> -> !torch.int
%3677 = torch.aten._make_per_tensor_quantized_tensor %3672, %3675, %3676 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3678 = torch.aten.dequantize.self %3677 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3679 = torch.aten.mul.Tensor %3656, %3678 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3680 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3681 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_978 = torch.constant.int 12
%3682 = torch.aten.item %3680 : !torch.vtensor<[],f32> -> !torch.float
%3683 = torch.aten.item %3681 : !torch.vtensor<[],si8> -> !torch.int
%3684 = torch.aten.quantize_per_tensor %3679, %3682, %3683, %int12_978 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3685 = torch.aten.int_repr %3684 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3686 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3687 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3688 = torch.aten.item %3686 : !torch.vtensor<[],f32> -> !torch.float
%3689 = torch.aten.item %3687 : !torch.vtensor<[],si8> -> !torch.int
%3690 = torch.aten._make_per_tensor_quantized_tensor %3685, %3688, %3689 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3691 = torch.aten.dequantize.self %3690 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%int0_979 = torch.constant.int 0
%int1_980 = torch.constant.int 1
%int14_981 = torch.constant.int 14
%int14_982 = torch.constant.int 14
%3692 = torch.prim.ListConstruct %int14_981, %int14_982 : (!torch.int, !torch.int) -> !torch.list<int>
%3693 = torch.prim.ListConstruct %int0_979, %int0_979 : (!torch.int, !torch.int) -> !torch.list<int>
%3694 = torch.prim.ListConstruct %int1_980, %int1_980 : (!torch.int, !torch.int) -> !torch.list<int>
%false_983 = torch.constant.bool false
%none_984 = torch.constant.none
%3695 = torch.aten.avg_pool2d %3691, %3692, %3694, %3693, %false_983, %false_983, %none_984 : !torch.vtensor<[1,960,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,960,1,1],f32>
%3696 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%3697 = torch.aten.mul.Tensor %3695, %3696 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3698 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3699 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_985 = torch.constant.int 12
%3700 = torch.aten.item %3698 : !torch.vtensor<[],f32> -> !torch.float
%3701 = torch.aten.item %3699 : !torch.vtensor<[],si8> -> !torch.int
%3702 = torch.aten.quantize_per_tensor %3697, %3700, %3701, %int12_985 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3703 = torch.aten.int_repr %3702 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3704 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3705 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3706 = torch.aten.item %3704 : !torch.vtensor<[],f32> -> !torch.float
%3707 = torch.aten.item %3705 : !torch.vtensor<[],si8> -> !torch.int
%3708 = torch.aten._make_per_tensor_quantized_tensor %3703, %3706, %3707 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3709 = torch.aten.dequantize.self %3708 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3710 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3711 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_986 = torch.constant.int 12
%3712 = torch.aten.item %3710 : !torch.vtensor<[],f32> -> !torch.float
%3713 = torch.aten.item %3711 : !torch.vtensor<[],si8> -> !torch.int
%3714 = torch.aten.quantize_per_tensor %116, %3712, %3713, %int12_986 : !torch.vtensor<[240,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240,960,1,1],!torch.qint8>
%3715 = torch.aten.int_repr %3714 : !torch.vtensor<[240,960,1,1],!torch.qint8> -> !torch.vtensor<[240,960,1,1],si8>
%3716 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3717 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3718 = torch.aten.item %3716 : !torch.vtensor<[],f32> -> !torch.float
%3719 = torch.aten.item %3717 : !torch.vtensor<[],si8> -> !torch.int
%3720 = torch.aten._make_per_tensor_quantized_tensor %3715, %3718, %3719 : !torch.vtensor<[240,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[240,960,1,1],!torch.qint8>
%3721 = torch.aten.dequantize.self %3720 : !torch.vtensor<[240,960,1,1],!torch.qint8> -> !torch.vtensor<[240,960,1,1],f32>
%3722 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3723 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_987 = torch.constant.int 12
%3724 = torch.aten.item %3722 : !torch.vtensor<[],f32> -> !torch.float
%3725 = torch.aten.item %3723 : !torch.vtensor<[],si8> -> !torch.int
%3726 = torch.aten.quantize_per_tensor %117, %3724, %3725, %int12_987 : !torch.vtensor<[240],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%3727 = torch.aten.int_repr %3726 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],si8>
%3728 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3729 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3730 = torch.aten.item %3728 : !torch.vtensor<[],f32> -> !torch.float
%3731 = torch.aten.item %3729 : !torch.vtensor<[],si8> -> !torch.int
%3732 = torch.aten._make_per_tensor_quantized_tensor %3727, %3730, %3731 : !torch.vtensor<[240],si8>, !torch.float, !torch.int -> !torch.vtensor<[240],!torch.qint8>
%3733 = torch.aten.dequantize.self %3732 : !torch.vtensor<[240],!torch.qint8> -> !torch.vtensor<[240],f32>
%int0_988 = torch.constant.int 0
%int0_989 = torch.constant.int 0
%int1_990 = torch.constant.int 1
%int1_991 = torch.constant.int 1
%int1_992 = torch.constant.int 1
%int1_993 = torch.constant.int 1
%int0_994 = torch.constant.int 0
%3734 = torch.prim.ListConstruct %int0_988, %int0_989 : (!torch.int, !torch.int) -> !torch.list<int>
%3735 = torch.prim.ListConstruct %int1_990, %int1_991 : (!torch.int, !torch.int) -> !torch.list<int>
%3736 = torch.prim.ListConstruct %int1_992, %int1_993 : (!torch.int, !torch.int) -> !torch.list<int>
%3737 = torch.prim.ListConstruct %int0_994, %int0_994 : (!torch.int, !torch.int) -> !torch.list<int>
%false_995 = torch.constant.bool false
%int1_996 = torch.constant.int 1
%3738 = torch.aten.convolution %3709, %3721, %3733, %3736, %3734, %3735, %false_995, %3737, %int1_996 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[240,960,1,1],f32>, !torch.vtensor<[240],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,240,1,1],f32>
%3739 = torch.aten.relu %3738 : !torch.vtensor<[1,240,1,1],f32> -> !torch.vtensor<[1,240,1,1],f32>
%3740 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3741 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_997 = torch.constant.int 12
%3742 = torch.aten.item %3740 : !torch.vtensor<[],f32> -> !torch.float
%3743 = torch.aten.item %3741 : !torch.vtensor<[],si8> -> !torch.int
%3744 = torch.aten.quantize_per_tensor %3739, %3742, %3743, %int12_997 : !torch.vtensor<[1,240,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,240,1,1],!torch.qint8>
%3745 = torch.aten.int_repr %3744 : !torch.vtensor<[1,240,1,1],!torch.qint8> -> !torch.vtensor<[1,240,1,1],si8>
%3746 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3747 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3748 = torch.aten.item %3746 : !torch.vtensor<[],f32> -> !torch.float
%3749 = torch.aten.item %3747 : !torch.vtensor<[],si8> -> !torch.int
%3750 = torch.aten._make_per_tensor_quantized_tensor %3745, %3748, %3749 : !torch.vtensor<[1,240,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,240,1,1],!torch.qint8>
%3751 = torch.aten.dequantize.self %3750 : !torch.vtensor<[1,240,1,1],!torch.qint8> -> !torch.vtensor<[1,240,1,1],f32>
%3752 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3753 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_998 = torch.constant.int 12
%3754 = torch.aten.item %3752 : !torch.vtensor<[],f32> -> !torch.float
%3755 = torch.aten.item %3753 : !torch.vtensor<[],si8> -> !torch.int
%3756 = torch.aten.quantize_per_tensor %118, %3754, %3755, %int12_998 : !torch.vtensor<[960,240,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,240,1,1],!torch.qint8>
%3757 = torch.aten.int_repr %3756 : !torch.vtensor<[960,240,1,1],!torch.qint8> -> !torch.vtensor<[960,240,1,1],si8>
%3758 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3759 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3760 = torch.aten.item %3758 : !torch.vtensor<[],f32> -> !torch.float
%3761 = torch.aten.item %3759 : !torch.vtensor<[],si8> -> !torch.int
%3762 = torch.aten._make_per_tensor_quantized_tensor %3757, %3760, %3761 : !torch.vtensor<[960,240,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,240,1,1],!torch.qint8>
%3763 = torch.aten.dequantize.self %3762 : !torch.vtensor<[960,240,1,1],!torch.qint8> -> !torch.vtensor<[960,240,1,1],f32>
%3764 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3765 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_999 = torch.constant.int 12
%3766 = torch.aten.item %3764 : !torch.vtensor<[],f32> -> !torch.float
%3767 = torch.aten.item %3765 : !torch.vtensor<[],si8> -> !torch.int
%3768 = torch.aten.quantize_per_tensor %119, %3766, %3767, %int12_999 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3769 = torch.aten.int_repr %3768 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3770 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3771 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3772 = torch.aten.item %3770 : !torch.vtensor<[],f32> -> !torch.float
%3773 = torch.aten.item %3771 : !torch.vtensor<[],si8> -> !torch.int
%3774 = torch.aten._make_per_tensor_quantized_tensor %3769, %3772, %3773 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3775 = torch.aten.dequantize.self %3774 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int0_1000 = torch.constant.int 0
%int0_1001 = torch.constant.int 0
%int1_1002 = torch.constant.int 1
%int1_1003 = torch.constant.int 1
%int1_1004 = torch.constant.int 1
%int1_1005 = torch.constant.int 1
%int0_1006 = torch.constant.int 0
%3776 = torch.prim.ListConstruct %int0_1000, %int0_1001 : (!torch.int, !torch.int) -> !torch.list<int>
%3777 = torch.prim.ListConstruct %int1_1002, %int1_1003 : (!torch.int, !torch.int) -> !torch.list<int>
%3778 = torch.prim.ListConstruct %int1_1004, %int1_1005 : (!torch.int, !torch.int) -> !torch.list<int>
%3779 = torch.prim.ListConstruct %int0_1006, %int0_1006 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1007 = torch.constant.bool false
%int1_1008 = torch.constant.int 1
%3780 = torch.aten.convolution %3751, %3763, %3775, %3778, %3776, %3777, %false_1007, %3779, %int1_1008 : !torch.vtensor<[1,240,1,1],f32>, !torch.vtensor<[960,240,1,1],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,1,1],f32>
%3781 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3782 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1009 = torch.constant.int 12
%3783 = torch.aten.item %3781 : !torch.vtensor<[],f32> -> !torch.float
%3784 = torch.aten.item %3782 : !torch.vtensor<[],si8> -> !torch.int
%3785 = torch.aten.quantize_per_tensor %3780, %3783, %3784, %int12_1009 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3786 = torch.aten.int_repr %3785 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3787 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3788 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3789 = torch.aten.item %3787 : !torch.vtensor<[],f32> -> !torch.float
%3790 = torch.aten.item %3788 : !torch.vtensor<[],si8> -> !torch.int
%3791 = torch.aten._make_per_tensor_quantized_tensor %3786, %3789, %3790 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3792 = torch.aten.dequantize.self %3791 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3793 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_1010 = torch.constant.int 1
%3794 = torch.aten.add.Tensor %3792, %3793, %int1_1010 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,1,1],f32>
%3795 = torch.aten.relu %3794 : !torch.vtensor<[1,960,1,1],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3796 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3797 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_1011 = torch.constant.int 6
%none_1012 = torch.constant.none
%false_1013 = torch.constant.bool false
%3798 = torch.aten.to.dtype %3796, %int6_1011, %false_1013, %false_1013, %none_1012 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_1014 = torch.constant.int 6
%none_1015 = torch.constant.none
%false_1016 = torch.constant.bool false
%3799 = torch.aten.to.dtype %3797, %int6_1014, %false_1016, %false_1016, %none_1015 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3800 = torch.aten.clamp.Tensor %3795, %3798, %3799 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3801 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3802 = torch.aten.mul.Tensor %3800, %3801 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%3803 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3804 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1017 = torch.constant.int 12
%3805 = torch.aten.item %3803 : !torch.vtensor<[],f32> -> !torch.float
%3806 = torch.aten.item %3804 : !torch.vtensor<[],si8> -> !torch.int
%3807 = torch.aten.quantize_per_tensor %3802, %3805, %3806, %int12_1017 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3808 = torch.aten.int_repr %3807 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%3809 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3810 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3811 = torch.aten.item %3809 : !torch.vtensor<[],f32> -> !torch.float
%3812 = torch.aten.item %3810 : !torch.vtensor<[],si8> -> !torch.int
%3813 = torch.aten._make_per_tensor_quantized_tensor %3808, %3811, %3812 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%3814 = torch.aten.dequantize.self %3813 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%3815 = torch.aten.mul.Tensor %3814, %3691 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3816 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3817 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1018 = torch.constant.int 12
%3818 = torch.aten.item %3816 : !torch.vtensor<[],f32> -> !torch.float
%3819 = torch.aten.item %3817 : !torch.vtensor<[],si8> -> !torch.int
%3820 = torch.aten.quantize_per_tensor %3815, %3818, %3819, %int12_1018 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3821 = torch.aten.int_repr %3820 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3822 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3823 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3824 = torch.aten.item %3822 : !torch.vtensor<[],f32> -> !torch.float
%3825 = torch.aten.item %3823 : !torch.vtensor<[],si8> -> !torch.int
%3826 = torch.aten._make_per_tensor_quantized_tensor %3821, %3824, %3825 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3827 = torch.aten.dequantize.self %3826 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3828 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3829 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1019 = torch.constant.int 12
%3830 = torch.aten.item %3828 : !torch.vtensor<[],f32> -> !torch.float
%3831 = torch.aten.item %3829 : !torch.vtensor<[],si8> -> !torch.int
%3832 = torch.aten.quantize_per_tensor %120, %3830, %3831, %int12_1019 : !torch.vtensor<[160,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160,960,1,1],!torch.qint8>
%3833 = torch.aten.int_repr %3832 : !torch.vtensor<[160,960,1,1],!torch.qint8> -> !torch.vtensor<[160,960,1,1],si8>
%3834 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3835 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3836 = torch.aten.item %3834 : !torch.vtensor<[],f32> -> !torch.float
%3837 = torch.aten.item %3835 : !torch.vtensor<[],si8> -> !torch.int
%3838 = torch.aten._make_per_tensor_quantized_tensor %3833, %3836, %3837 : !torch.vtensor<[160,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[160,960,1,1],!torch.qint8>
%3839 = torch.aten.dequantize.self %3838 : !torch.vtensor<[160,960,1,1],!torch.qint8> -> !torch.vtensor<[160,960,1,1],f32>
%3840 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3841 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1020 = torch.constant.int 12
%3842 = torch.aten.item %3840 : !torch.vtensor<[],f32> -> !torch.float
%3843 = torch.aten.item %3841 : !torch.vtensor<[],si8> -> !torch.int
%3844 = torch.aten.quantize_per_tensor %121, %3842, %3843, %int12_1020 : !torch.vtensor<[160],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3845 = torch.aten.int_repr %3844 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],si8>
%3846 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3847 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3848 = torch.aten.item %3846 : !torch.vtensor<[],f32> -> !torch.float
%3849 = torch.aten.item %3847 : !torch.vtensor<[],si8> -> !torch.int
%3850 = torch.aten._make_per_tensor_quantized_tensor %3845, %3848, %3849 : !torch.vtensor<[160],si8>, !torch.float, !torch.int -> !torch.vtensor<[160],!torch.qint8>
%3851 = torch.aten.dequantize.self %3850 : !torch.vtensor<[160],!torch.qint8> -> !torch.vtensor<[160],f32>
%int0_1021 = torch.constant.int 0
%int0_1022 = torch.constant.int 0
%int1_1023 = torch.constant.int 1
%int1_1024 = torch.constant.int 1
%int1_1025 = torch.constant.int 1
%int1_1026 = torch.constant.int 1
%int0_1027 = torch.constant.int 0
%3852 = torch.prim.ListConstruct %int0_1021, %int0_1022 : (!torch.int, !torch.int) -> !torch.list<int>
%3853 = torch.prim.ListConstruct %int1_1023, %int1_1024 : (!torch.int, !torch.int) -> !torch.list<int>
%3854 = torch.prim.ListConstruct %int1_1025, %int1_1026 : (!torch.int, !torch.int) -> !torch.list<int>
%3855 = torch.prim.ListConstruct %int0_1027, %int0_1027 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1028 = torch.constant.bool false
%int1_1029 = torch.constant.int 1
%3856 = torch.aten.convolution %3827, %3839, %3851, %3854, %3852, %3853, %false_1028, %3855, %int1_1029 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[160,960,1,1],f32>, !torch.vtensor<[160],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,160,14,14],f32>
%3857 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3858 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1030 = torch.constant.int 12
%3859 = torch.aten.item %3857 : !torch.vtensor<[],f32> -> !torch.float
%3860 = torch.aten.item %3858 : !torch.vtensor<[],si8> -> !torch.int
%3861 = torch.aten.quantize_per_tensor %3856, %3859, %3860, %int12_1030 : !torch.vtensor<[1,160,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3862 = torch.aten.int_repr %3861 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],si8>
%3863 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3864 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3865 = torch.aten.item %3863 : !torch.vtensor<[],f32> -> !torch.float
%3866 = torch.aten.item %3864 : !torch.vtensor<[],si8> -> !torch.int
%3867 = torch.aten._make_per_tensor_quantized_tensor %3862, %3865, %3866 : !torch.vtensor<[1,160,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3868 = torch.aten.dequantize.self %3867 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],f32>
%int1_1031 = torch.constant.int 1
%3869 = torch.aten.add.Tensor %3868, %3539, %int1_1031 : !torch.vtensor<[1,160,14,14],f32>, !torch.vtensor<[1,160,14,14],f32>, !torch.int -> !torch.vtensor<[1,160,14,14],f32>
%3870 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3871 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1032 = torch.constant.int 12
%3872 = torch.aten.item %3870 : !torch.vtensor<[],f32> -> !torch.float
%3873 = torch.aten.item %3871 : !torch.vtensor<[],si8> -> !torch.int
%3874 = torch.aten.quantize_per_tensor %3869, %3872, %3873, %int12_1032 : !torch.vtensor<[1,160,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3875 = torch.aten.int_repr %3874 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],si8>
%3876 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3877 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3878 = torch.aten.item %3876 : !torch.vtensor<[],f32> -> !torch.float
%3879 = torch.aten.item %3877 : !torch.vtensor<[],si8> -> !torch.int
%3880 = torch.aten._make_per_tensor_quantized_tensor %3875, %3878, %3879 : !torch.vtensor<[1,160,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,160,14,14],!torch.qint8>
%3881 = torch.aten.dequantize.self %3880 : !torch.vtensor<[1,160,14,14],!torch.qint8> -> !torch.vtensor<[1,160,14,14],f32>
%3882 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3883 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1033 = torch.constant.int 12
%3884 = torch.aten.item %3882 : !torch.vtensor<[],f32> -> !torch.float
%3885 = torch.aten.item %3883 : !torch.vtensor<[],si8> -> !torch.int
%3886 = torch.aten.quantize_per_tensor %122, %3884, %3885, %int12_1033 : !torch.vtensor<[960,160,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3887 = torch.aten.int_repr %3886 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],si8>
%3888 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3889 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3890 = torch.aten.item %3888 : !torch.vtensor<[],f32> -> !torch.float
%3891 = torch.aten.item %3889 : !torch.vtensor<[],si8> -> !torch.int
%3892 = torch.aten._make_per_tensor_quantized_tensor %3887, %3890, %3891 : !torch.vtensor<[960,160,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[960,160,1,1],!torch.qint8>
%3893 = torch.aten.dequantize.self %3892 : !torch.vtensor<[960,160,1,1],!torch.qint8> -> !torch.vtensor<[960,160,1,1],f32>
%3894 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3895 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1034 = torch.constant.int 12
%3896 = torch.aten.item %3894 : !torch.vtensor<[],f32> -> !torch.float
%3897 = torch.aten.item %3895 : !torch.vtensor<[],si8> -> !torch.int
%3898 = torch.aten.quantize_per_tensor %123, %3896, %3897, %int12_1034 : !torch.vtensor<[960],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3899 = torch.aten.int_repr %3898 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],si8>
%3900 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3901 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3902 = torch.aten.item %3900 : !torch.vtensor<[],f32> -> !torch.float
%3903 = torch.aten.item %3901 : !torch.vtensor<[],si8> -> !torch.int
%3904 = torch.aten._make_per_tensor_quantized_tensor %3899, %3902, %3903 : !torch.vtensor<[960],si8>, !torch.float, !torch.int -> !torch.vtensor<[960],!torch.qint8>
%3905 = torch.aten.dequantize.self %3904 : !torch.vtensor<[960],!torch.qint8> -> !torch.vtensor<[960],f32>
%int0_1035 = torch.constant.int 0
%int0_1036 = torch.constant.int 0
%int1_1037 = torch.constant.int 1
%int1_1038 = torch.constant.int 1
%int1_1039 = torch.constant.int 1
%int1_1040 = torch.constant.int 1
%int0_1041 = torch.constant.int 0
%3906 = torch.prim.ListConstruct %int0_1035, %int0_1036 : (!torch.int, !torch.int) -> !torch.list<int>
%3907 = torch.prim.ListConstruct %int1_1037, %int1_1038 : (!torch.int, !torch.int) -> !torch.list<int>
%3908 = torch.prim.ListConstruct %int1_1039, %int1_1040 : (!torch.int, !torch.int) -> !torch.list<int>
%3909 = torch.prim.ListConstruct %int0_1041, %int0_1041 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1042 = torch.constant.bool false
%int1_1043 = torch.constant.int 1
%3910 = torch.aten.convolution %3881, %3893, %3905, %3908, %3906, %3907, %false_1042, %3909, %int1_1043 : !torch.vtensor<[1,160,14,14],f32>, !torch.vtensor<[960,160,1,1],f32>, !torch.vtensor<[960],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3911 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3912 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1044 = torch.constant.int 12
%3913 = torch.aten.item %3911 : !torch.vtensor<[],f32> -> !torch.float
%3914 = torch.aten.item %3912 : !torch.vtensor<[],si8> -> !torch.int
%3915 = torch.aten.quantize_per_tensor %3910, %3913, %3914, %int12_1044 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3916 = torch.aten.int_repr %3915 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3917 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3918 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3919 = torch.aten.item %3917 : !torch.vtensor<[],f32> -> !torch.float
%3920 = torch.aten.item %3918 : !torch.vtensor<[],si8> -> !torch.int
%3921 = torch.aten._make_per_tensor_quantized_tensor %3916, %3919, %3920 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3922 = torch.aten.dequantize.self %3921 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3923 = torch.vtensor.literal(dense<3.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int1_1045 = torch.constant.int 1
%3924 = torch.aten.add.Tensor %3922, %3923, %int1_1045 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.int -> !torch.vtensor<[1,960,14,14],f32>
%3925 = torch.aten.relu %3924 : !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3926 = torch.vtensor.literal(dense<0.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%3927 = torch.vtensor.literal(dense<6.000000e+00> : tensor<f32>) : !torch.vtensor<[],f32>
%int6_1046 = torch.constant.int 6
%none_1047 = torch.constant.none
%false_1048 = torch.constant.bool false
%3928 = torch.aten.to.dtype %3926, %int6_1046, %false_1048, %false_1048, %none_1047 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%int6_1049 = torch.constant.int 6
%none_1050 = torch.constant.none
%false_1051 = torch.constant.bool false
%3929 = torch.aten.to.dtype %3927, %int6_1049, %false_1051, %false_1051, %none_1050 : !torch.vtensor<[],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[],f32>
%3930 = torch.aten.clamp.Tensor %3925, %3928, %3929 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3931 = torch.vtensor.literal(dense<0.166687012> : tensor<f32>) : !torch.vtensor<[],f32>
%3932 = torch.aten.mul.Tensor %3930, %3931 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3933 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3934 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1052 = torch.constant.int 12
%3935 = torch.aten.item %3933 : !torch.vtensor<[],f32> -> !torch.float
%3936 = torch.aten.item %3934 : !torch.vtensor<[],si8> -> !torch.int
%3937 = torch.aten.quantize_per_tensor %3932, %3935, %3936, %int12_1052 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3938 = torch.aten.int_repr %3937 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3939 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%3940 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3941 = torch.aten.item %3939 : !torch.vtensor<[],f32> -> !torch.float
%3942 = torch.aten.item %3940 : !torch.vtensor<[],si8> -> !torch.int
%3943 = torch.aten._make_per_tensor_quantized_tensor %3938, %3941, %3942 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3944 = torch.aten.dequantize.self %3943 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3945 = torch.aten.mul.Tensor %3922, %3944 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[1,960,14,14],f32> -> !torch.vtensor<[1,960,14,14],f32>
%3946 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3947 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1053 = torch.constant.int 12
%3948 = torch.aten.item %3946 : !torch.vtensor<[],f32> -> !torch.float
%3949 = torch.aten.item %3947 : !torch.vtensor<[],si8> -> !torch.int
%3950 = torch.aten.quantize_per_tensor %3945, %3948, %3949, %int12_1053 : !torch.vtensor<[1,960,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3951 = torch.aten.int_repr %3950 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],si8>
%3952 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3953 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3954 = torch.aten.item %3952 : !torch.vtensor<[],f32> -> !torch.float
%3955 = torch.aten.item %3953 : !torch.vtensor<[],si8> -> !torch.int
%3956 = torch.aten._make_per_tensor_quantized_tensor %3951, %3954, %3955 : !torch.vtensor<[1,960,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,14,14],!torch.qint8>
%3957 = torch.aten.dequantize.self %3956 : !torch.vtensor<[1,960,14,14],!torch.qint8> -> !torch.vtensor<[1,960,14,14],f32>
%3958 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3959 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1054 = torch.constant.int 12
%3960 = torch.aten.item %3958 : !torch.vtensor<[],f32> -> !torch.float
%3961 = torch.aten.item %3959 : !torch.vtensor<[],si8> -> !torch.int
%3962 = torch.aten.quantize_per_tensor %124, %3960, %3961, %int12_1054 : !torch.vtensor<[128,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,960,1,1],!torch.qint8>
%3963 = torch.aten.int_repr %3962 : !torch.vtensor<[128,960,1,1],!torch.qint8> -> !torch.vtensor<[128,960,1,1],si8>
%3964 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32>
%3965 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3966 = torch.aten.item %3964 : !torch.vtensor<[],f32> -> !torch.float
%3967 = torch.aten.item %3965 : !torch.vtensor<[],si8> -> !torch.int
%3968 = torch.aten._make_per_tensor_quantized_tensor %3963, %3966, %3967 : !torch.vtensor<[128,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,960,1,1],!torch.qint8>
%3969 = torch.aten.dequantize.self %3968 : !torch.vtensor<[128,960,1,1],!torch.qint8> -> !torch.vtensor<[128,960,1,1],f32>
%3970 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3971 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1055 = torch.constant.int 12
%3972 = torch.aten.item %3970 : !torch.vtensor<[],f32> -> !torch.float
%3973 = torch.aten.item %3971 : !torch.vtensor<[],si8> -> !torch.int
%3974 = torch.aten.quantize_per_tensor %125, %3972, %3973, %int12_1055 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3975 = torch.aten.int_repr %3974 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8>
%3976 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3977 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3978 = torch.aten.item %3976 : !torch.vtensor<[],f32> -> !torch.float
%3979 = torch.aten.item %3977 : !torch.vtensor<[],si8> -> !torch.int
%3980 = torch.aten._make_per_tensor_quantized_tensor %3975, %3978, %3979 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8>
%3981 = torch.aten.dequantize.self %3980 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32>
%int0_1056 = torch.constant.int 0
%int0_1057 = torch.constant.int 0
%int1_1058 = torch.constant.int 1
%int1_1059 = torch.constant.int 1
%int1_1060 = torch.constant.int 1
%int1_1061 = torch.constant.int 1
%int0_1062 = torch.constant.int 0
%3982 = torch.prim.ListConstruct %int0_1056, %int0_1057 : (!torch.int, !torch.int) -> !torch.list<int>
%3983 = torch.prim.ListConstruct %int1_1058, %int1_1059 : (!torch.int, !torch.int) -> !torch.list<int>
%3984 = torch.prim.ListConstruct %int1_1060, %int1_1061 : (!torch.int, !torch.int) -> !torch.list<int>
%3985 = torch.prim.ListConstruct %int0_1062, %int0_1062 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1063 = torch.constant.bool false
%int1_1064 = torch.constant.int 1
%3986 = torch.aten.convolution %3957, %3969, %3981, %3984, %3982, %3983, %false_1063, %3985, %int1_1064 : !torch.vtensor<[1,960,14,14],f32>, !torch.vtensor<[128,960,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,14,14],f32>
%3987 = torch.aten.relu %3986 : !torch.vtensor<[1,128,14,14],f32> -> !torch.vtensor<[1,128,14,14],f32>
%3988 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3989 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1065 = torch.constant.int 12
%3990 = torch.aten.item %3988 : !torch.vtensor<[],f32> -> !torch.float
%3991 = torch.aten.item %3989 : !torch.vtensor<[],si8> -> !torch.int
%3992 = torch.aten.quantize_per_tensor %3987, %3990, %3991, %int12_1065 : !torch.vtensor<[1,128,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,14,14],!torch.qint8>
%3993 = torch.aten.int_repr %3992 : !torch.vtensor<[1,128,14,14],!torch.qint8> -> !torch.vtensor<[1,128,14,14],si8>
%3994 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%3995 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%3996 = torch.aten.item %3994 : !torch.vtensor<[],f32> -> !torch.float
%3997 = torch.aten.item %3995 : !torch.vtensor<[],si8> -> !torch.int
%3998 = torch.aten._make_per_tensor_quantized_tensor %3993, %3996, %3997 : !torch.vtensor<[1,128,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,14,14],!torch.qint8>
%3999 = torch.aten.dequantize.self %3998 : !torch.vtensor<[1,128,14,14],!torch.qint8> -> !torch.vtensor<[1,128,14,14],f32>
%int0_1066 = torch.constant.int 0
%int1_1067 = torch.constant.int 1
%int14_1068 = torch.constant.int 14
%int14_1069 = torch.constant.int 14
%4000 = torch.prim.ListConstruct %int14_1068, %int14_1069 : (!torch.int, !torch.int) -> !torch.list<int>
%4001 = torch.prim.ListConstruct %int0_1066, %int0_1066 : (!torch.int, !torch.int) -> !torch.list<int>
%4002 = torch.prim.ListConstruct %int1_1067, %int1_1067 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1070 = torch.constant.bool false
%none_1071 = torch.constant.none
%4003 = torch.aten.avg_pool2d %3957, %4000, %4002, %4001, %false_1070, %false_1070, %none_1071 : !torch.vtensor<[1,960,14,14],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,960,1,1],f32>
%4004 = torch.vtensor.literal(dense<1.00488281> : tensor<f32>) : !torch.vtensor<[],f32>
%4005 = torch.aten.mul.Tensor %4003, %4004 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,960,1,1],f32>
%4006 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4007 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1072 = torch.constant.int 12
%4008 = torch.aten.item %4006 : !torch.vtensor<[],f32> -> !torch.float
%4009 = torch.aten.item %4007 : !torch.vtensor<[],si8> -> !torch.int
%4010 = torch.aten.quantize_per_tensor %4005, %4008, %4009, %int12_1072 : !torch.vtensor<[1,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%4011 = torch.aten.int_repr %4010 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],si8>
%4012 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4013 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4014 = torch.aten.item %4012 : !torch.vtensor<[],f32> -> !torch.float
%4015 = torch.aten.item %4013 : !torch.vtensor<[],si8> -> !torch.int
%4016 = torch.aten._make_per_tensor_quantized_tensor %4011, %4014, %4015 : !torch.vtensor<[1,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,960,1,1],!torch.qint8>
%4017 = torch.aten.dequantize.self %4016 : !torch.vtensor<[1,960,1,1],!torch.qint8> -> !torch.vtensor<[1,960,1,1],f32>
%4018 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4019 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1073 = torch.constant.int 12
%4020 = torch.aten.item %4018 : !torch.vtensor<[],f32> -> !torch.float
%4021 = torch.aten.item %4019 : !torch.vtensor<[],si8> -> !torch.int
%4022 = torch.aten.quantize_per_tensor %126, %4020, %4021, %int12_1073 : !torch.vtensor<[128,960,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,960,1,1],!torch.qint8>
%4023 = torch.aten.int_repr %4022 : !torch.vtensor<[128,960,1,1],!torch.qint8> -> !torch.vtensor<[128,960,1,1],si8>
%4024 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4025 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4026 = torch.aten.item %4024 : !torch.vtensor<[],f32> -> !torch.float
%4027 = torch.aten.item %4025 : !torch.vtensor<[],si8> -> !torch.int
%4028 = torch.aten._make_per_tensor_quantized_tensor %4023, %4026, %4027 : !torch.vtensor<[128,960,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,960,1,1],!torch.qint8>
%4029 = torch.aten.dequantize.self %4028 : !torch.vtensor<[128,960,1,1],!torch.qint8> -> !torch.vtensor<[128,960,1,1],f32>
%int0_1074 = torch.constant.int 0
%int0_1075 = torch.constant.int 0
%int1_1076 = torch.constant.int 1
%int1_1077 = torch.constant.int 1
%int1_1078 = torch.constant.int 1
%int1_1079 = torch.constant.int 1
%int0_1080 = torch.constant.int 0
%4030 = torch.prim.ListConstruct %int0_1074, %int0_1075 : (!torch.int, !torch.int) -> !torch.list<int>
%4031 = torch.prim.ListConstruct %int1_1076, %int1_1077 : (!torch.int, !torch.int) -> !torch.list<int>
%4032 = torch.prim.ListConstruct %int1_1078, %int1_1079 : (!torch.int, !torch.int) -> !torch.list<int>
%4033 = torch.prim.ListConstruct %int0_1080, %int0_1080 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1081 = torch.constant.bool false
%none_1082 = torch.constant.none
%int1_1083 = torch.constant.int 1
%4034 = torch.aten.convolution %4017, %4029, %none_1082, %4032, %4030, %4031, %false_1081, %4033, %int1_1083 : !torch.vtensor<[1,960,1,1],f32>, !torch.vtensor<[128,960,1,1],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,128,1,1],f32>
%4035 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4036 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1084 = torch.constant.int 12
%4037 = torch.aten.item %4035 : !torch.vtensor<[],f32> -> !torch.float
%4038 = torch.aten.item %4036 : !torch.vtensor<[],si8> -> !torch.int
%4039 = torch.aten.quantize_per_tensor %4034, %4037, %4038, %int12_1084 : !torch.vtensor<[1,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,1,1],!torch.qint8>
%4040 = torch.aten.int_repr %4039 : !torch.vtensor<[1,128,1,1],!torch.qint8> -> !torch.vtensor<[1,128,1,1],si8>
%4041 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4042 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4043 = torch.aten.item %4041 : !torch.vtensor<[],f32> -> !torch.float
%4044 = torch.aten.item %4042 : !torch.vtensor<[],si8> -> !torch.int
%4045 = torch.aten._make_per_tensor_quantized_tensor %4040, %4043, %4044 : !torch.vtensor<[1,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,1,1],!torch.qint8>
%4046 = torch.aten.dequantize.self %4045 : !torch.vtensor<[1,128,1,1],!torch.qint8> -> !torch.vtensor<[1,128,1,1],f32>
%4047 = torch.aten.sigmoid %4046 : !torch.vtensor<[1,128,1,1],f32> -> !torch.vtensor<[1,128,1,1],f32>
%4048 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4049 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1085 = torch.constant.int 12
%4050 = torch.aten.item %4048 : !torch.vtensor<[],f32> -> !torch.float
%4051 = torch.aten.item %4049 : !torch.vtensor<[],si8> -> !torch.int
%4052 = torch.aten.quantize_per_tensor %4047, %4050, %4051, %int12_1085 : !torch.vtensor<[1,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,1,1],!torch.qint8>
%4053 = torch.aten.int_repr %4052 : !torch.vtensor<[1,128,1,1],!torch.qint8> -> !torch.vtensor<[1,128,1,1],si8>
%4054 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4055 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4056 = torch.aten.item %4054 : !torch.vtensor<[],f32> -> !torch.float
%4057 = torch.aten.item %4055 : !torch.vtensor<[],si8> -> !torch.int
%4058 = torch.aten._make_per_tensor_quantized_tensor %4053, %4056, %4057 : !torch.vtensor<[1,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,1,1],!torch.qint8>
%4059 = torch.aten.dequantize.self %4058 : !torch.vtensor<[1,128,1,1],!torch.qint8> -> !torch.vtensor<[1,128,1,1],f32>
%4060 = torch.aten.mul.Tensor %3999, %4059 : !torch.vtensor<[1,128,14,14],f32>, !torch.vtensor<[1,128,1,1],f32> -> !torch.vtensor<[1,128,14,14],f32>
%4061 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4062 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1086 = torch.constant.int 12
%4063 = torch.aten.item %4061 : !torch.vtensor<[],f32> -> !torch.float
%4064 = torch.aten.item %4062 : !torch.vtensor<[],si8> -> !torch.int
%4065 = torch.aten.quantize_per_tensor %4060, %4063, %4064, %int12_1086 : !torch.vtensor<[1,128,14,14],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,128,14,14],!torch.qint8>
%4066 = torch.aten.int_repr %4065 : !torch.vtensor<[1,128,14,14],!torch.qint8> -> !torch.vtensor<[1,128,14,14],si8>
%4067 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4068 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4069 = torch.aten.item %4067 : !torch.vtensor<[],f32> -> !torch.float
%4070 = torch.aten.item %4068 : !torch.vtensor<[],si8> -> !torch.int
%4071 = torch.aten._make_per_tensor_quantized_tensor %4066, %4069, %4070 : !torch.vtensor<[1,128,14,14],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,128,14,14],!torch.qint8>
%4072 = torch.aten.dequantize.self %4071 : !torch.vtensor<[1,128,14,14],!torch.qint8> -> !torch.vtensor<[1,128,14,14],f32>
%4073 = torch.vtensor.literal(dense<28> : tensor<si64>) : !torch.vtensor<[],si64>
%4074 = torch.vtensor.literal(dense<28> : tensor<si64>) : !torch.vtensor<[],si64>
%4075 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1087 = torch.constant.int 0
%int0_1088 = torch.constant.int 0
%int0_1089 = torch.constant.int 0
%4076 = torch.aten.select.int %4075, %int0_1087, %int0_1089 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4077 = torch.aten.item %4076 : !torch.vtensor<[1],si64> -> !torch.int
%4078 = torch.aten.lt.int %4077, %int0_1087 : !torch.int, !torch.int -> !torch.bool
%4079 = torch.aten.Int.bool %4078 : !torch.bool -> !torch.int
%4080 = torch.aten.mul.int %4079, %int0_1088 : !torch.int, !torch.int -> !torch.int
%4081 = torch.aten.add.int %4077, %4080 : !torch.int, !torch.int -> !torch.int
%4082 = torch.prim.ListConstruct %4081 : (!torch.int) -> !torch.list<int>
%false_1090 = torch.constant.bool false
%none_1091 = torch.constant.none
%4083 = torch.aten.tensor %4082, %none_1091, %none_1091, %false_1090 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values, %indices = torch.aten.sort %4083, %int0_1087, %false_1090 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1092 = torch.constant.int 0
%4084 = torch.aten.select.int %values, %int0_1087, %int0_1092 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4085 = torch.aten.item %4084 : !torch.vtensor<[1],si64> -> !torch.int
%4086 = torch.aten.unsqueeze %4073, %4085 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4087 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1093 = torch.constant.int 0
%int0_1094 = torch.constant.int 0
%int0_1095 = torch.constant.int 0
%4088 = torch.aten.select.int %4087, %int0_1093, %int0_1095 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4089 = torch.aten.item %4088 : !torch.vtensor<[1],si64> -> !torch.int
%4090 = torch.aten.lt.int %4089, %int0_1093 : !torch.int, !torch.int -> !torch.bool
%4091 = torch.aten.Int.bool %4090 : !torch.bool -> !torch.int
%4092 = torch.aten.mul.int %4091, %int0_1094 : !torch.int, !torch.int -> !torch.int
%4093 = torch.aten.add.int %4089, %4092 : !torch.int, !torch.int -> !torch.int
%4094 = torch.prim.ListConstruct %4093 : (!torch.int) -> !torch.list<int>
%false_1096 = torch.constant.bool false
%none_1097 = torch.constant.none
%4095 = torch.aten.tensor %4094, %none_1097, %none_1097, %false_1096 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1098, %indices_1099 = torch.aten.sort %4095, %int0_1093, %false_1096 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1100 = torch.constant.int 0
%4096 = torch.aten.select.int %values_1098, %int0_1093, %int0_1100 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4097 = torch.aten.item %4096 : !torch.vtensor<[1],si64> -> !torch.int
%4098 = torch.aten.unsqueeze %4074, %4097 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4099 = torch.prim.ListConstruct %4086, %4098 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1101 = torch.constant.int 0
%4100 = torch.aten.cat %4099, %int0_1101 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4101 = torch.aten._shape_as_tensor %4072 : !torch.vtensor<[1,128,14,14],f32> -> !torch.vtensor<[4],si64>
%4102 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4103 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4104 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1102 = torch.constant.none
%int1_1103 = torch.constant.int 1
%4105 = torch.prim.ListConstruct %int1_1103 : (!torch.int) -> !torch.list<int>
%4106 = torch.aten.ones %4105, %none_1102, %none_1102, %none_1102, %none_1102 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1104 = torch.constant.int 0
%int0_1105 = torch.constant.int 0
%4107 = torch.prim.NumToTensor.Scalar %int0_1105 : !torch.int -> !torch.vtensor<[1],si64>
%4108 = torch.aten.index_select %4103, %int0_1104, %4107 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4109 = torch.aten.item %4108 : !torch.vtensor<[1],si64> -> !torch.int
%4110 = torch.aten.index_select %4104, %int0_1104, %4107 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4111 = torch.aten.item %4110 : !torch.vtensor<[1],si64> -> !torch.int
%4112 = torch.aten.index_select %4102, %int0_1104, %4107 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4113 = torch.aten.item %4112 : !torch.vtensor<[1],si64> -> !torch.int
%4114 = torch.aten.index_select %4106, %int0_1104, %4107 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4115 = torch.aten.item %4114 : !torch.vtensor<[1],si64> -> !torch.int
%4116 = torch.aten.slice.Tensor %4101, %4113, %4109, %4111, %4115 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1106 = torch.constant.int 4
%none_1107 = torch.constant.none
%false_1108 = torch.constant.bool false
%4117 = torch.aten.to.dtype %4100, %int4_1106, %false_1108, %false_1108, %none_1107 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4118 = torch.prim.ListConstruct %4116, %4117 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1109 = torch.constant.int 0
%4119 = torch.aten.cat %4118, %int0_1109 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4120 = torch.operator "onnx.Resize"(%4072, %none, %none, %4119) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[1,128,14,14],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4121 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4122 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1110 = torch.constant.int 12
%4123 = torch.aten.item %4121 : !torch.vtensor<[],f32> -> !torch.float
%4124 = torch.aten.item %4122 : !torch.vtensor<[],si8> -> !torch.int
%4125 = torch.aten.quantize_per_tensor %4120, %4123, %4124, %int12_1110 : !torch.vtensor<[?,?,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,?,?,?],!torch.qint8>
%4126 = torch.aten.int_repr %4125 : !torch.vtensor<[?,?,?,?],!torch.qint8> -> !torch.vtensor<[?,?,?,?],si8>
%4127 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4128 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4129 = torch.aten.item %4127 : !torch.vtensor<[],f32> -> !torch.float
%4130 = torch.aten.item %4128 : !torch.vtensor<[],si8> -> !torch.int
%4131 = torch.aten._make_per_tensor_quantized_tensor %4126, %4129, %4130 : !torch.vtensor<[?,?,?,?],si8>, !torch.float, !torch.int -> !torch.vtensor<[?,?,?,?],!torch.qint8>
%4132 = torch.aten.dequantize.self %4131 : !torch.vtensor<[?,?,?,?],!torch.qint8> -> !torch.vtensor<[?,?,?,?],f32>
%4133 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4134 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1111 = torch.constant.int 12
%4135 = torch.aten.item %4133 : !torch.vtensor<[],f32> -> !torch.float
%4136 = torch.aten.item %4134 : !torch.vtensor<[],si8> -> !torch.int
%4137 = torch.aten.quantize_per_tensor %127, %4135, %4136, %int12_1111 : !torch.vtensor<[21,40,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21,40,1,1],!torch.qint8>
%4138 = torch.aten.int_repr %4137 : !torch.vtensor<[21,40,1,1],!torch.qint8> -> !torch.vtensor<[21,40,1,1],si8>
%4139 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32>
%4140 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4141 = torch.aten.item %4139 : !torch.vtensor<[],f32> -> !torch.float
%4142 = torch.aten.item %4140 : !torch.vtensor<[],si8> -> !torch.int
%4143 = torch.aten._make_per_tensor_quantized_tensor %4138, %4141, %4142 : !torch.vtensor<[21,40,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[21,40,1,1],!torch.qint8>
%4144 = torch.aten.dequantize.self %4143 : !torch.vtensor<[21,40,1,1],!torch.qint8> -> !torch.vtensor<[21,40,1,1],f32>
%4145 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4146 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1112 = torch.constant.int 12
%4147 = torch.aten.item %4145 : !torch.vtensor<[],f32> -> !torch.float
%4148 = torch.aten.item %4146 : !torch.vtensor<[],si8> -> !torch.int
%4149 = torch.aten.quantize_per_tensor %128, %4147, %4148, %int12_1112 : !torch.vtensor<[21],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%4150 = torch.aten.int_repr %4149 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],si8>
%4151 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4152 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4153 = torch.aten.item %4151 : !torch.vtensor<[],f32> -> !torch.float
%4154 = torch.aten.item %4152 : !torch.vtensor<[],si8> -> !torch.int
%4155 = torch.aten._make_per_tensor_quantized_tensor %4150, %4153, %4154 : !torch.vtensor<[21],si8>, !torch.float, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%4156 = torch.aten.dequantize.self %4155 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],f32>
%int0_1113 = torch.constant.int 0
%int0_1114 = torch.constant.int 0
%int1_1115 = torch.constant.int 1
%int1_1116 = torch.constant.int 1
%int1_1117 = torch.constant.int 1
%int1_1118 = torch.constant.int 1
%int0_1119 = torch.constant.int 0
%4157 = torch.prim.ListConstruct %int0_1113, %int0_1114 : (!torch.int, !torch.int) -> !torch.list<int>
%4158 = torch.prim.ListConstruct %int1_1115, %int1_1116 : (!torch.int, !torch.int) -> !torch.list<int>
%4159 = torch.prim.ListConstruct %int1_1117, %int1_1118 : (!torch.int, !torch.int) -> !torch.list<int>
%4160 = torch.prim.ListConstruct %int0_1119, %int0_1119 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1120 = torch.constant.bool false
%int1_1121 = torch.constant.int 1
%4161 = torch.aten.convolution %838, %4144, %4156, %4159, %4157, %4158, %false_1120, %4160, %int1_1121 : !torch.vtensor<[1,40,28,28],f32>, !torch.vtensor<[21,40,1,1],f32>, !torch.vtensor<[21],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,21,28,28],f32>
%4162 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4163 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1122 = torch.constant.int 12
%4164 = torch.aten.item %4162 : !torch.vtensor<[],f32> -> !torch.float
%4165 = torch.aten.item %4163 : !torch.vtensor<[],si8> -> !torch.int
%4166 = torch.aten.quantize_per_tensor %4161, %4164, %4165, %int12_1122 : !torch.vtensor<[1,21,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%4167 = torch.aten.int_repr %4166 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],si8>
%4168 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32>
%4169 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4170 = torch.aten.item %4168 : !torch.vtensor<[],f32> -> !torch.float
%4171 = torch.aten.item %4169 : !torch.vtensor<[],si8> -> !torch.int
%4172 = torch.aten._make_per_tensor_quantized_tensor %4167, %4170, %4171 : !torch.vtensor<[1,21,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,28,28],!torch.qint8>
%4173 = torch.aten.dequantize.self %4172 : !torch.vtensor<[1,21,28,28],!torch.qint8> -> !torch.vtensor<[1,21,28,28],f32>
%4174 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4175 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1123 = torch.constant.int 12
%4176 = torch.aten.item %4174 : !torch.vtensor<[],f32> -> !torch.float
%4177 = torch.aten.item %4175 : !torch.vtensor<[],si8> -> !torch.int
%4178 = torch.aten.quantize_per_tensor %129, %4176, %4177, %int12_1123 : !torch.vtensor<[21,128,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21,128,1,1],!torch.qint8>
%4179 = torch.aten.int_repr %4178 : !torch.vtensor<[21,128,1,1],!torch.qint8> -> !torch.vtensor<[21,128,1,1],si8>
%4180 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4181 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4182 = torch.aten.item %4180 : !torch.vtensor<[],f32> -> !torch.float
%4183 = torch.aten.item %4181 : !torch.vtensor<[],si8> -> !torch.int
%4184 = torch.aten._make_per_tensor_quantized_tensor %4179, %4182, %4183 : !torch.vtensor<[21,128,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[21,128,1,1],!torch.qint8>
%4185 = torch.aten.dequantize.self %4184 : !torch.vtensor<[21,128,1,1],!torch.qint8> -> !torch.vtensor<[21,128,1,1],f32>
%4186 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4187 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1124 = torch.constant.int 12
%4188 = torch.aten.item %4186 : !torch.vtensor<[],f32> -> !torch.float
%4189 = torch.aten.item %4187 : !torch.vtensor<[],si8> -> !torch.int
%4190 = torch.aten.quantize_per_tensor %130, %4188, %4189, %int12_1124 : !torch.vtensor<[21],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%4191 = torch.aten.int_repr %4190 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],si8>
%4192 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32>
%4193 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4194 = torch.aten.item %4192 : !torch.vtensor<[],f32> -> !torch.float
%4195 = torch.aten.item %4193 : !torch.vtensor<[],si8> -> !torch.int
%4196 = torch.aten._make_per_tensor_quantized_tensor %4191, %4194, %4195 : !torch.vtensor<[21],si8>, !torch.float, !torch.int -> !torch.vtensor<[21],!torch.qint8>
%4197 = torch.aten.dequantize.self %4196 : !torch.vtensor<[21],!torch.qint8> -> !torch.vtensor<[21],f32>
%int0_1125 = torch.constant.int 0
%int0_1126 = torch.constant.int 0
%int1_1127 = torch.constant.int 1
%int1_1128 = torch.constant.int 1
%int1_1129 = torch.constant.int 1
%int1_1130 = torch.constant.int 1
%int0_1131 = torch.constant.int 0
%4198 = torch.prim.ListConstruct %int0_1125, %int0_1126 : (!torch.int, !torch.int) -> !torch.list<int>
%4199 = torch.prim.ListConstruct %int1_1127, %int1_1128 : (!torch.int, !torch.int) -> !torch.list<int>
%4200 = torch.prim.ListConstruct %int1_1129, %int1_1130 : (!torch.int, !torch.int) -> !torch.list<int>
%4201 = torch.prim.ListConstruct %int0_1131, %int0_1131 : (!torch.int, !torch.int) -> !torch.list<int>
%false_1132 = torch.constant.bool false
%int1_1133 = torch.constant.int 1
%4202 = torch.aten.convolution %4132, %4185, %4197, %4200, %4198, %4199, %false_1132, %4201, %int1_1133 : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[21,128,1,1],f32>, !torch.vtensor<[21],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[?,21,?,?],f32>
%4203 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4204 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1134 = torch.constant.int 12
%4205 = torch.aten.item %4203 : !torch.vtensor<[],f32> -> !torch.float
%4206 = torch.aten.item %4204 : !torch.vtensor<[],si8> -> !torch.int
%4207 = torch.aten.quantize_per_tensor %4202, %4205, %4206, %int12_1134 : !torch.vtensor<[?,21,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,21,?,?],!torch.qint8>
%4208 = torch.aten.int_repr %4207 : !torch.vtensor<[?,21,?,?],!torch.qint8> -> !torch.vtensor<[?,21,?,?],si8>
%4209 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4210 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4211 = torch.aten.item %4209 : !torch.vtensor<[],f32> -> !torch.float
%4212 = torch.aten.item %4210 : !torch.vtensor<[],si8> -> !torch.int
%4213 = torch.aten._make_per_tensor_quantized_tensor %4208, %4211, %4212 : !torch.vtensor<[?,21,?,?],si8>, !torch.float, !torch.int -> !torch.vtensor<[?,21,?,?],!torch.qint8>
%4214 = torch.aten.dequantize.self %4213 : !torch.vtensor<[?,21,?,?],!torch.qint8> -> !torch.vtensor<[?,21,?,?],f32>
%int1_1135 = torch.constant.int 1
%4215 = torch.aten.add.Tensor %4173, %4214, %int1_1135 : !torch.vtensor<[1,21,28,28],f32>, !torch.vtensor<[?,21,?,?],f32>, !torch.int -> !torch.vtensor<[?,21,28,28],f32>
%4216 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4217 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1136 = torch.constant.int 12
%4218 = torch.aten.item %4216 : !torch.vtensor<[],f32> -> !torch.float
%4219 = torch.aten.item %4217 : !torch.vtensor<[],si8> -> !torch.int
%4220 = torch.aten.quantize_per_tensor %4215, %4218, %4219, %int12_1136 : !torch.vtensor<[?,21,28,28],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,21,28,28],!torch.qint8>
%4221 = torch.aten.int_repr %4220 : !torch.vtensor<[?,21,28,28],!torch.qint8> -> !torch.vtensor<[?,21,28,28],si8>
%4222 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4223 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4224 = torch.aten.item %4222 : !torch.vtensor<[],f32> -> !torch.float
%4225 = torch.aten.item %4223 : !torch.vtensor<[],si8> -> !torch.int
%4226 = torch.aten._make_per_tensor_quantized_tensor %4221, %4224, %4225 : !torch.vtensor<[?,21,28,28],si8>, !torch.float, !torch.int -> !torch.vtensor<[?,21,28,28],!torch.qint8>
%4227 = torch.aten.dequantize.self %4226 : !torch.vtensor<[?,21,28,28],!torch.qint8> -> !torch.vtensor<[?,21,28,28],f32>
%4228 = torch.vtensor.literal(dense<224> : tensor<si64>) : !torch.vtensor<[],si64>
%4229 = torch.vtensor.literal(dense<224> : tensor<si64>) : !torch.vtensor<[],si64>
%4230 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1137 = torch.constant.int 0
%int0_1138 = torch.constant.int 0
%int0_1139 = torch.constant.int 0
%4231 = torch.aten.select.int %4230, %int0_1137, %int0_1139 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4232 = torch.aten.item %4231 : !torch.vtensor<[1],si64> -> !torch.int
%4233 = torch.aten.lt.int %4232, %int0_1137 : !torch.int, !torch.int -> !torch.bool
%4234 = torch.aten.Int.bool %4233 : !torch.bool -> !torch.int
%4235 = torch.aten.mul.int %4234, %int0_1138 : !torch.int, !torch.int -> !torch.int
%4236 = torch.aten.add.int %4232, %4235 : !torch.int, !torch.int -> !torch.int
%4237 = torch.prim.ListConstruct %4236 : (!torch.int) -> !torch.list<int>
%false_1140 = torch.constant.bool false
%none_1141 = torch.constant.none
%4238 = torch.aten.tensor %4237, %none_1141, %none_1141, %false_1140 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1142, %indices_1143 = torch.aten.sort %4238, %int0_1137, %false_1140 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1144 = torch.constant.int 0
%4239 = torch.aten.select.int %values_1142, %int0_1137, %int0_1144 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4240 = torch.aten.item %4239 : !torch.vtensor<[1],si64> -> !torch.int
%4241 = torch.aten.unsqueeze %4228, %4240 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4242 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%int0_1145 = torch.constant.int 0
%int0_1146 = torch.constant.int 0
%int0_1147 = torch.constant.int 0
%4243 = torch.aten.select.int %4242, %int0_1145, %int0_1147 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4244 = torch.aten.item %4243 : !torch.vtensor<[1],si64> -> !torch.int
%4245 = torch.aten.lt.int %4244, %int0_1145 : !torch.int, !torch.int -> !torch.bool
%4246 = torch.aten.Int.bool %4245 : !torch.bool -> !torch.int
%4247 = torch.aten.mul.int %4246, %int0_1146 : !torch.int, !torch.int -> !torch.int
%4248 = torch.aten.add.int %4244, %4247 : !torch.int, !torch.int -> !torch.int
%4249 = torch.prim.ListConstruct %4248 : (!torch.int) -> !torch.list<int>
%false_1148 = torch.constant.bool false
%none_1149 = torch.constant.none
%4250 = torch.aten.tensor %4249, %none_1149, %none_1149, %false_1148 : !torch.list<int>, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[1],si64>
%values_1150, %indices_1151 = torch.aten.sort %4250, %int0_1145, %false_1148 : !torch.vtensor<[1],si64>, !torch.int, !torch.bool -> !torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>
%int0_1152 = torch.constant.int 0
%4251 = torch.aten.select.int %values_1150, %int0_1145, %int0_1152 : !torch.vtensor<[1],si64>, !torch.int, !torch.int -> !torch.vtensor<[1],si64>
%4252 = torch.aten.item %4251 : !torch.vtensor<[1],si64> -> !torch.int
%4253 = torch.aten.unsqueeze %4229, %4252 : !torch.vtensor<[],si64>, !torch.int -> !torch.vtensor<[1],si64>
%4254 = torch.prim.ListConstruct %4241, %4253 : (!torch.vtensor<[1],si64>, !torch.vtensor<[1],si64>) -> !torch.list<vtensor>
%int0_1153 = torch.constant.int 0
%4255 = torch.aten.cat %4254, %int0_1153 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2],si64>
%4256 = torch.aten._shape_as_tensor %4227 : !torch.vtensor<[?,21,28,28],f32> -> !torch.vtensor<[4],si64>
%4257 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4258 = torch.vtensor.literal(dense<0> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%4259 = torch.vtensor.literal(dense<2> : tensor<1xsi64>) : !torch.vtensor<[1],si64>
%none_1154 = torch.constant.none
%int1_1155 = torch.constant.int 1
%4260 = torch.prim.ListConstruct %int1_1155 : (!torch.int) -> !torch.list<int>
%4261 = torch.aten.ones %4260, %none_1154, %none_1154, %none_1154, %none_1154 : !torch.list<int>, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1],si64>
%int0_1156 = torch.constant.int 0
%int0_1157 = torch.constant.int 0
%4262 = torch.prim.NumToTensor.Scalar %int0_1157 : !torch.int -> !torch.vtensor<[1],si64>
%4263 = torch.aten.index_select %4258, %int0_1156, %4262 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4264 = torch.aten.item %4263 : !torch.vtensor<[1],si64> -> !torch.int
%4265 = torch.aten.index_select %4259, %int0_1156, %4262 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4266 = torch.aten.item %4265 : !torch.vtensor<[1],si64> -> !torch.int
%4267 = torch.aten.index_select %4257, %int0_1156, %4262 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4268 = torch.aten.item %4267 : !torch.vtensor<[1],si64> -> !torch.int
%4269 = torch.aten.index_select %4261, %int0_1156, %4262 : !torch.vtensor<[1],si64>, !torch.int, !torch.vtensor<[1],si64> -> !torch.vtensor<[1],si64>
%4270 = torch.aten.item %4269 : !torch.vtensor<[1],si64> -> !torch.int
%4271 = torch.aten.slice.Tensor %4256, %4268, %4264, %4266, %4270 : !torch.vtensor<[4],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64>
%int4_1158 = torch.constant.int 4
%none_1159 = torch.constant.none
%false_1160 = torch.constant.bool false
%4272 = torch.aten.to.dtype %4255, %int4_1158, %false_1160, %false_1160, %none_1159 : !torch.vtensor<[2],si64>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[2],si64>
%4273 = torch.prim.ListConstruct %4271, %4272 : (!torch.vtensor<[2],si64>, !torch.vtensor<[2],si64>) -> !torch.list<vtensor>
%int0_1161 = torch.constant.int 0
%4274 = torch.aten.cat %4273, %int0_1161 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[4],si64>
%4275 = torch.operator "onnx.Resize"(%4227, %none, %none, %4274) {torch.onnx.coordinate_transformation_mode = "half_pixel", torch.onnx.cubic_coeff_a = -7.500000e-01 : f32, torch.onnx.mode = "linear", torch.onnx.nearest_mode = "floor"} : (!torch.vtensor<[?,21,28,28],f32>, !torch.none, !torch.none, !torch.vtensor<[4],si64>) -> !torch.vtensor<[?,?,?,?],f32>
%4276 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4277 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%int12_1162 = torch.constant.int 12
%4278 = torch.aten.item %4276 : !torch.vtensor<[],f32> -> !torch.float
%4279 = torch.aten.item %4277 : !torch.vtensor<[],si8> -> !torch.int
%4280 = torch.aten.quantize_per_tensor %4275, %4278, %4279, %int12_1162 : !torch.vtensor<[?,?,?,?],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[?,?,?,?],!torch.qint8>
%4281 = torch.aten.int_repr %4280 : !torch.vtensor<[?,?,?,?],!torch.qint8> -> !torch.vtensor<[?,?,?,?],si8>
%4282 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32>
%4283 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8>
%4284 = torch.aten.item %4282 : !torch.vtensor<[],f32> -> !torch.float
%4285 = torch.aten.item %4283 : !torch.vtensor<[],si8> -> !torch.int
%4286 = torch.aten._make_per_tensor_quantized_tensor %4281, %4284, %4285 : !torch.vtensor<[?,?,?,?],si8>, !torch.float, !torch.int -> !torch.vtensor<[1,21,224,224],!torch.qint8>
%4287 = torch.aten.dequantize.self %4286 : !torch.vtensor<[1,21,224,224],!torch.qint8> -> !torch.vtensor<[1,21,224,224],f32>
return %4287 : !torch.vtensor<[1,21,224,224],f32>
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment