Created
April 19, 2024 22:31
-
-
Save AmosLewis/c9b0933d0d1d69176d89298e7b1ff8b5 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| module { | |
| func.func @torch_jit(%arg0: !torch.vtensor<[32,3,224,224],f32>) -> !torch.vtensor<[32,1000],f32> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 15 : si64, torch.onnx_meta.producer_name = "pytorch", torch.onnx_meta.producer_version = "1.12.1"} { | |
| %0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x3x3x3xf32>) : !torch.vtensor<[32,3,3,3],f32> | |
| %1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
| %2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32x32x3x3xf32>) : !torch.vtensor<[32,32,3,3],f32> | |
| %3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<32xf32>) : !torch.vtensor<[32],f32> | |
| %4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x32x3x3xf32>) : !torch.vtensor<[64,32,3,3],f32> | |
| %5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x160x1x1xf32>) : !torch.vtensor<[64,160,1,1],f32> | |
| %9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x160x1x1xf32>) : !torch.vtensor<[64,160,1,1],f32> | |
| %13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x1x7xf32>) : !torch.vtensor<[64,64,1,7],f32> | |
| %15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x64x7x1xf32>) : !torch.vtensor<[64,64,7,1],f32> | |
| %17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x3x3xf32>) : !torch.vtensor<[192,192,3,3],f32> | |
| %21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x96x3x3xf32>) : !torch.vtensor<[96,96,3,3],f32> | |
| %33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x96x3x3xf32>) : !torch.vtensor<[96,96,3,3],f32> | |
| %47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x96x3x3xf32>) : !torch.vtensor<[96,96,3,3],f32> | |
| %61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64x384x1x1xf32>) : !torch.vtensor<[64,384,1,1],f32> | |
| %71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<64xf32>) : !torch.vtensor<[64],f32> | |
| %72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x64x3x3xf32>) : !torch.vtensor<[96,64,3,3],f32> | |
| %73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x96x3x3xf32>) : !torch.vtensor<[96,96,3,3],f32> | |
| %75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96x384x1x1xf32>) : !torch.vtensor<[96,384,1,1],f32> | |
| %77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<96xf32>) : !torch.vtensor<[96],f32> | |
| %78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x384x3x3xf32>) : !torch.vtensor<[384,384,3,3],f32> | |
| %79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x384x1x1xf32>) : !torch.vtensor<[192,384,1,1],f32> | |
| %81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x3x3xf32>) : !torch.vtensor<[224,192,3,3],f32> | |
| %83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x3x3xf32>) : !torch.vtensor<[256,224,3,3],f32> | |
| %85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %187 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %190 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %198 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %199 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %200 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %201 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %202 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %203 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %204 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %205 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %206 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1024x1x1xf32>) : !torch.vtensor<[384,1024,1,1],f32> | |
| %207 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %208 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %209 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %210 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %211 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %212 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x7x1xf32>) : !torch.vtensor<[256,224,7,1],f32> | |
| %213 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %214 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %215 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %216 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x7x1xf32>) : !torch.vtensor<[192,192,7,1],f32> | |
| %217 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %218 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x192x1x7xf32>) : !torch.vtensor<[224,192,1,7],f32> | |
| %219 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %220 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224x224x7x1xf32>) : !torch.vtensor<[224,224,7,1],f32> | |
| %221 = torch.vtensor.literal(dense_resource<__elided__> : tensor<224xf32>) : !torch.vtensor<[224],f32> | |
| %222 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x224x1x7xf32>) : !torch.vtensor<[256,224,1,7],f32> | |
| %223 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %224 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128x1024x1x1xf32>) : !torch.vtensor<[128,1024,1,1],f32> | |
| %225 = torch.vtensor.literal(dense_resource<__elided__> : tensor<128xf32>) : !torch.vtensor<[128],f32> | |
| %226 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x1024x1x1xf32>) : !torch.vtensor<[192,1024,1,1],f32> | |
| %227 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %228 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192x192x3x3xf32>) : !torch.vtensor<[192,192,3,3],f32> | |
| %229 = torch.vtensor.literal(dense_resource<__elided__> : tensor<192xf32>) : !torch.vtensor<[192],f32> | |
| %230 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1024x1x1xf32>) : !torch.vtensor<[256,1024,1,1],f32> | |
| %231 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %232 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x256x1x7xf32>) : !torch.vtensor<[256,256,1,7],f32> | |
| %233 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %234 = torch.vtensor.literal(dense_resource<__elided__> : tensor<320x256x7x1xf32>) : !torch.vtensor<[320,256,7,1],f32> | |
| %235 = torch.vtensor.literal(dense_resource<__elided__> : tensor<320xf32>) : !torch.vtensor<[320],f32> | |
| %236 = torch.vtensor.literal(dense_resource<__elided__> : tensor<320x320x3x3xf32>) : !torch.vtensor<[320,320,3,3],f32> | |
| %237 = torch.vtensor.literal(dense_resource<__elided__> : tensor<320xf32>) : !torch.vtensor<[320],f32> | |
| %238 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %239 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %240 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %241 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %242 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x1x3xf32>) : !torch.vtensor<[256,384,1,3],f32> | |
| %243 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %244 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x3x1xf32>) : !torch.vtensor<[256,384,3,1],f32> | |
| %245 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %246 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %247 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %248 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448x384x3x1xf32>) : !torch.vtensor<[448,384,3,1],f32> | |
| %249 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448xf32>) : !torch.vtensor<[448],f32> | |
| %250 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x448x1x3xf32>) : !torch.vtensor<[512,448,1,3],f32> | |
| %251 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> | |
| %252 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x3xf32>) : !torch.vtensor<[256,512,1,3],f32> | |
| %253 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %254 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x1xf32>) : !torch.vtensor<[256,512,3,1],f32> | |
| %255 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %256 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %257 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %258 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %259 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %260 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %261 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %262 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x1x3xf32>) : !torch.vtensor<[256,384,1,3],f32> | |
| %263 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %264 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x3x1xf32>) : !torch.vtensor<[256,384,3,1],f32> | |
| %265 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %266 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %267 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %268 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448x384x3x1xf32>) : !torch.vtensor<[448,384,3,1],f32> | |
| %269 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448xf32>) : !torch.vtensor<[448],f32> | |
| %270 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x448x1x3xf32>) : !torch.vtensor<[512,448,1,3],f32> | |
| %271 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> | |
| %272 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x3xf32>) : !torch.vtensor<[256,512,1,3],f32> | |
| %273 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %274 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x1xf32>) : !torch.vtensor<[256,512,3,1],f32> | |
| %275 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %276 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %277 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %278 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %279 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %280 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %281 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %282 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x1x3xf32>) : !torch.vtensor<[256,384,1,3],f32> | |
| %283 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %284 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x384x3x1xf32>) : !torch.vtensor<[256,384,3,1],f32> | |
| %285 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %286 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384x1536x1x1xf32>) : !torch.vtensor<[384,1536,1,1],f32> | |
| %287 = torch.vtensor.literal(dense_resource<__elided__> : tensor<384xf32>) : !torch.vtensor<[384],f32> | |
| %288 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448x384x3x1xf32>) : !torch.vtensor<[448,384,3,1],f32> | |
| %289 = torch.vtensor.literal(dense_resource<__elided__> : tensor<448xf32>) : !torch.vtensor<[448],f32> | |
| %290 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512x448x1x3xf32>) : !torch.vtensor<[512,448,1,3],f32> | |
| %291 = torch.vtensor.literal(dense_resource<__elided__> : tensor<512xf32>) : !torch.vtensor<[512],f32> | |
| %292 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x1x3xf32>) : !torch.vtensor<[256,512,1,3],f32> | |
| %293 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %294 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x512x3x1xf32>) : !torch.vtensor<[256,512,3,1],f32> | |
| %295 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %296 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256x1536x1x1xf32>) : !torch.vtensor<[256,1536,1,1],f32> | |
| %297 = torch.vtensor.literal(dense_resource<__elided__> : tensor<256xf32>) : !torch.vtensor<[256],f32> | |
| %298 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1000x1536xf32>) : !torch.vtensor<[1000,1536],f32> | |
| %299 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1000xf32>) : !torch.vtensor<[1000],f32> | |
| %300 = torch.vtensor.literal(dense<6.250000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %301 = torch.vtensor.literal(dense<0> : tensor<si8>) : !torch.vtensor<[],si8> | |
| %302 = torch.vtensor.literal(dense<1.562500e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %303 = torch.vtensor.literal(dense<1.250000e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %304 = torch.vtensor.literal(dense<7.812500e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %305 = torch.vtensor.literal(dense<3.906250e-03> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %306 = torch.vtensor.literal(dense<3.125000e-02> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %307 = torch.vtensor.literal(dense<0.001953125> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %308 = torch.vtensor.literal(dense<9.843750e-01> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %309 = torch.vtensor.literal(dense<0.9765625> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %310 = torch.vtensor.literal(dense<9.765625E-4> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %none = torch.constant.none | |
| %int12 = torch.constant.int 12 | |
| %311 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %312 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %313 = torch.aten.quantize_per_tensor %arg0, %311, %312, %int12 : !torch.vtensor<[32,3,224,224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,3,224,224],!torch.qint8> | |
| %314 = torch.aten.int_repr %313 : !torch.vtensor<[32,3,224,224],!torch.qint8> -> !torch.vtensor<[32,3,224,224],si8> | |
| %315 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %316 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %317 = torch.aten._make_per_tensor_quantized_tensor %314, %315, %316 : !torch.vtensor<[32,3,224,224],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,3,224,224],!torch.qint8> | |
| %318 = torch.aten.dequantize.self %317 : !torch.vtensor<[32,3,224,224],!torch.qint8> -> !torch.vtensor<[32,3,224,224],f32> | |
| %int12_0 = torch.constant.int 12 | |
| %319 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %320 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %321 = torch.aten.quantize_per_tensor %0, %319, %320, %int12_0 : !torch.vtensor<[32,3,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,3,3,3],!torch.qint8> | |
| %322 = torch.aten.int_repr %321 : !torch.vtensor<[32,3,3,3],!torch.qint8> -> !torch.vtensor<[32,3,3,3],si8> | |
| %323 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %324 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %325 = torch.aten._make_per_tensor_quantized_tensor %322, %323, %324 : !torch.vtensor<[32,3,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,3,3,3],!torch.qint8> | |
| %326 = torch.aten.dequantize.self %325 : !torch.vtensor<[32,3,3,3],!torch.qint8> -> !torch.vtensor<[32,3,3,3],f32> | |
| %int12_1 = torch.constant.int 12 | |
| %327 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %328 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %329 = torch.aten.quantize_per_tensor %1, %327, %328, %int12_1 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
| %330 = torch.aten.int_repr %329 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
| %331 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %332 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %333 = torch.aten._make_per_tensor_quantized_tensor %330, %331, %332 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
| %334 = torch.aten.dequantize.self %333 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
| %int0 = torch.constant.int 0 | |
| %int0_2 = torch.constant.int 0 | |
| %int1 = torch.constant.int 1 | |
| %int1_3 = torch.constant.int 1 | |
| %int2 = torch.constant.int 2 | |
| %int2_4 = torch.constant.int 2 | |
| %int0_5 = torch.constant.int 0 | |
| %335 = torch.prim.ListConstruct %int0, %int0_2 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %336 = torch.prim.ListConstruct %int1, %int1_3 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %337 = torch.prim.ListConstruct %int2, %int2_4 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %338 = torch.prim.ListConstruct %int0_5, %int0_5 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false = torch.constant.bool false | |
| %int1_6 = torch.constant.int 1 | |
| %339 = torch.aten.convolution %318, %326, %334, %337, %335, %336, %false, %338, %int1_6 : !torch.vtensor<[32,3,224,224],f32>, !torch.vtensor<[32,3,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,111,111],f32> | |
| %340 = torch.aten.relu %339 : !torch.vtensor<[32,32,111,111],f32> -> !torch.vtensor<[32,32,111,111],f32> | |
| %int12_7 = torch.constant.int 12 | |
| %341 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %342 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %343 = torch.aten.quantize_per_tensor %340, %341, %342, %int12_7 : !torch.vtensor<[32,32,111,111],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,111,111],!torch.qint8> | |
| %344 = torch.aten.int_repr %343 : !torch.vtensor<[32,32,111,111],!torch.qint8> -> !torch.vtensor<[32,32,111,111],si8> | |
| %345 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %346 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %347 = torch.aten._make_per_tensor_quantized_tensor %344, %345, %346 : !torch.vtensor<[32,32,111,111],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,111,111],!torch.qint8> | |
| %348 = torch.aten.dequantize.self %347 : !torch.vtensor<[32,32,111,111],!torch.qint8> -> !torch.vtensor<[32,32,111,111],f32> | |
| %int12_8 = torch.constant.int 12 | |
| %349 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %350 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %351 = torch.aten.quantize_per_tensor %2, %349, %350, %int12_8 : !torch.vtensor<[32,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8> | |
| %352 = torch.aten.int_repr %351 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],si8> | |
| %353 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %354 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %355 = torch.aten._make_per_tensor_quantized_tensor %352, %353, %354 : !torch.vtensor<[32,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,3,3],!torch.qint8> | |
| %356 = torch.aten.dequantize.self %355 : !torch.vtensor<[32,32,3,3],!torch.qint8> -> !torch.vtensor<[32,32,3,3],f32> | |
| %int12_9 = torch.constant.int 12 | |
| %357 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %358 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %359 = torch.aten.quantize_per_tensor %3, %357, %358, %int12_9 : !torch.vtensor<[32],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
| %360 = torch.aten.int_repr %359 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],si8> | |
| %361 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %362 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %363 = torch.aten._make_per_tensor_quantized_tensor %360, %361, %362 : !torch.vtensor<[32],si8>, !torch.float, !torch.int -> !torch.vtensor<[32],!torch.qint8> | |
| %364 = torch.aten.dequantize.self %363 : !torch.vtensor<[32],!torch.qint8> -> !torch.vtensor<[32],f32> | |
| %int0_10 = torch.constant.int 0 | |
| %int0_11 = torch.constant.int 0 | |
| %int1_12 = torch.constant.int 1 | |
| %int1_13 = torch.constant.int 1 | |
| %int1_14 = torch.constant.int 1 | |
| %int1_15 = torch.constant.int 1 | |
| %int0_16 = torch.constant.int 0 | |
| %365 = torch.prim.ListConstruct %int0_10, %int0_11 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %366 = torch.prim.ListConstruct %int1_12, %int1_13 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %367 = torch.prim.ListConstruct %int1_14, %int1_15 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %368 = torch.prim.ListConstruct %int0_16, %int0_16 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_17 = torch.constant.bool false | |
| %int1_18 = torch.constant.int 1 | |
| %369 = torch.aten.convolution %348, %356, %364, %367, %365, %366, %false_17, %368, %int1_18 : !torch.vtensor<[32,32,111,111],f32>, !torch.vtensor<[32,32,3,3],f32>, !torch.vtensor<[32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,32,109,109],f32> | |
| %370 = torch.aten.relu %369 : !torch.vtensor<[32,32,109,109],f32> -> !torch.vtensor<[32,32,109,109],f32> | |
| %int12_19 = torch.constant.int 12 | |
| %371 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %372 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %373 = torch.aten.quantize_per_tensor %370, %371, %372, %int12_19 : !torch.vtensor<[32,32,109,109],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,32,109,109],!torch.qint8> | |
| %374 = torch.aten.int_repr %373 : !torch.vtensor<[32,32,109,109],!torch.qint8> -> !torch.vtensor<[32,32,109,109],si8> | |
| %375 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %376 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %377 = torch.aten._make_per_tensor_quantized_tensor %374, %375, %376 : !torch.vtensor<[32,32,109,109],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,32,109,109],!torch.qint8> | |
| %378 = torch.aten.dequantize.self %377 : !torch.vtensor<[32,32,109,109],!torch.qint8> -> !torch.vtensor<[32,32,109,109],f32> | |
| %int12_20 = torch.constant.int 12 | |
| %379 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %380 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %381 = torch.aten.quantize_per_tensor %4, %379, %380, %int12_20 : !torch.vtensor<[64,32,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,32,3,3],!torch.qint8> | |
| %382 = torch.aten.int_repr %381 : !torch.vtensor<[64,32,3,3],!torch.qint8> -> !torch.vtensor<[64,32,3,3],si8> | |
| %383 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %384 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %385 = torch.aten._make_per_tensor_quantized_tensor %382, %383, %384 : !torch.vtensor<[64,32,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,32,3,3],!torch.qint8> | |
| %386 = torch.aten.dequantize.self %385 : !torch.vtensor<[64,32,3,3],!torch.qint8> -> !torch.vtensor<[64,32,3,3],f32> | |
| %int12_21 = torch.constant.int 12 | |
| %387 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %388 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %389 = torch.aten.quantize_per_tensor %5, %387, %388, %int12_21 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %390 = torch.aten.int_repr %389 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %391 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %392 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %393 = torch.aten._make_per_tensor_quantized_tensor %390, %391, %392 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %394 = torch.aten.dequantize.self %393 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int1_22 = torch.constant.int 1 | |
| %int1_23 = torch.constant.int 1 | |
| %int1_24 = torch.constant.int 1 | |
| %int1_25 = torch.constant.int 1 | |
| %int1_26 = torch.constant.int 1 | |
| %int1_27 = torch.constant.int 1 | |
| %int0_28 = torch.constant.int 0 | |
| %395 = torch.prim.ListConstruct %int1_22, %int1_23 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %396 = torch.prim.ListConstruct %int1_24, %int1_25 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %397 = torch.prim.ListConstruct %int1_26, %int1_27 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %398 = torch.prim.ListConstruct %int0_28, %int0_28 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_29 = torch.constant.bool false | |
| %int1_30 = torch.constant.int 1 | |
| %399 = torch.aten.convolution %378, %386, %394, %397, %395, %396, %false_29, %398, %int1_30 : !torch.vtensor<[32,32,109,109],f32>, !torch.vtensor<[64,32,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,109,109],f32> | |
| %400 = torch.aten.relu %399 : !torch.vtensor<[32,64,109,109],f32> -> !torch.vtensor<[32,64,109,109],f32> | |
| %int12_31 = torch.constant.int 12 | |
| %401 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %402 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %403 = torch.aten.quantize_per_tensor %400, %401, %402, %int12_31 : !torch.vtensor<[32,64,109,109],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,109,109],!torch.qint8> | |
| %404 = torch.aten.int_repr %403 : !torch.vtensor<[32,64,109,109],!torch.qint8> -> !torch.vtensor<[32,64,109,109],si8> | |
| %405 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %406 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %407 = torch.aten._make_per_tensor_quantized_tensor %404, %405, %406 : !torch.vtensor<[32,64,109,109],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,109,109],!torch.qint8> | |
| %408 = torch.aten.dequantize.self %407 : !torch.vtensor<[32,64,109,109],!torch.qint8> -> !torch.vtensor<[32,64,109,109],f32> | |
| %int3 = torch.constant.int 3 | |
| %int3_32 = torch.constant.int 3 | |
| %409 = torch.prim.ListConstruct %int3, %int3_32 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int0_33 = torch.constant.int 0 | |
| %int0_34 = torch.constant.int 0 | |
| %410 = torch.prim.ListConstruct %int0_33, %int0_34 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int2_35 = torch.constant.int 2 | |
| %int2_36 = torch.constant.int 2 | |
| %411 = torch.prim.ListConstruct %int2_35, %int2_36 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int1_37 = torch.constant.int 1 | |
| %int1_38 = torch.constant.int 1 | |
| %412 = torch.prim.ListConstruct %int1_37, %int1_38 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_39 = torch.constant.bool false | |
| %413 = torch.aten.max_pool2d %408, %409, %411, %410, %412, %false_39 : !torch.vtensor<[32,64,109,109],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_40 = torch.constant.int 12 | |
| %414 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %415 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %416 = torch.aten.quantize_per_tensor %6, %414, %415, %int12_40 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %417 = torch.aten.int_repr %416 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %418 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %419 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %420 = torch.aten._make_per_tensor_quantized_tensor %417, %418, %419 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %421 = torch.aten.dequantize.self %420 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_41 = torch.constant.int 12 | |
| %422 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %423 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %424 = torch.aten.quantize_per_tensor %7, %422, %423, %int12_41 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %425 = torch.aten.int_repr %424 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %426 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %427 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %428 = torch.aten._make_per_tensor_quantized_tensor %425, %426, %427 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %429 = torch.aten.dequantize.self %428 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_42 = torch.constant.int 0 | |
| %int0_43 = torch.constant.int 0 | |
| %int1_44 = torch.constant.int 1 | |
| %int1_45 = torch.constant.int 1 | |
| %int2_46 = torch.constant.int 2 | |
| %int2_47 = torch.constant.int 2 | |
| %int0_48 = torch.constant.int 0 | |
| %430 = torch.prim.ListConstruct %int0_42, %int0_43 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %431 = torch.prim.ListConstruct %int1_44, %int1_45 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %432 = torch.prim.ListConstruct %int2_46, %int2_47 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %433 = torch.prim.ListConstruct %int0_48, %int0_48 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_49 = torch.constant.bool false | |
| %int1_50 = torch.constant.int 1 | |
| %434 = torch.aten.convolution %408, %421, %429, %432, %430, %431, %false_49, %433, %int1_50 : !torch.vtensor<[32,64,109,109],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,54,54],f32> | |
| %435 = torch.aten.relu %434 : !torch.vtensor<[32,96,54,54],f32> -> !torch.vtensor<[32,96,54,54],f32> | |
| %436 = torch.prim.ListConstruct %413, %435 : (!torch.vtensor<[32,64,54,54],f32>, !torch.vtensor<[32,96,54,54],f32>) -> !torch.list<vtensor> | |
| %int1_51 = torch.constant.int 1 | |
| %437 = torch.aten.cat %436, %int1_51 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,160,54,54],f32> | |
| %int12_52 = torch.constant.int 12 | |
| %438 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %439 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %440 = torch.aten.quantize_per_tensor %437, %438, %439, %int12_52 : !torch.vtensor<[32,160,54,54],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,160,54,54],!torch.qint8> | |
| %441 = torch.aten.int_repr %440 : !torch.vtensor<[32,160,54,54],!torch.qint8> -> !torch.vtensor<[32,160,54,54],si8> | |
| %442 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %443 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %444 = torch.aten._make_per_tensor_quantized_tensor %441, %442, %443 : !torch.vtensor<[32,160,54,54],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,160,54,54],!torch.qint8> | |
| %445 = torch.aten.dequantize.self %444 : !torch.vtensor<[32,160,54,54],!torch.qint8> -> !torch.vtensor<[32,160,54,54],f32> | |
| %int12_53 = torch.constant.int 12 | |
| %446 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %447 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %448 = torch.aten.quantize_per_tensor %8, %446, %447, %int12_53 : !torch.vtensor<[64,160,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,160,1,1],!torch.qint8> | |
| %449 = torch.aten.int_repr %448 : !torch.vtensor<[64,160,1,1],!torch.qint8> -> !torch.vtensor<[64,160,1,1],si8> | |
| %450 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %451 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %452 = torch.aten._make_per_tensor_quantized_tensor %449, %450, %451 : !torch.vtensor<[64,160,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,160,1,1],!torch.qint8> | |
| %453 = torch.aten.dequantize.self %452 : !torch.vtensor<[64,160,1,1],!torch.qint8> -> !torch.vtensor<[64,160,1,1],f32> | |
| %int12_54 = torch.constant.int 12 | |
| %454 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %455 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %456 = torch.aten.quantize_per_tensor %9, %454, %455, %int12_54 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %457 = torch.aten.int_repr %456 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %458 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %459 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %460 = torch.aten._make_per_tensor_quantized_tensor %457, %458, %459 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %461 = torch.aten.dequantize.self %460 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_55 = torch.constant.int 0 | |
| %int0_56 = torch.constant.int 0 | |
| %int1_57 = torch.constant.int 1 | |
| %int1_58 = torch.constant.int 1 | |
| %int1_59 = torch.constant.int 1 | |
| %int1_60 = torch.constant.int 1 | |
| %int0_61 = torch.constant.int 0 | |
| %462 = torch.prim.ListConstruct %int0_55, %int0_56 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %463 = torch.prim.ListConstruct %int1_57, %int1_58 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %464 = torch.prim.ListConstruct %int1_59, %int1_60 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %465 = torch.prim.ListConstruct %int0_61, %int0_61 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_62 = torch.constant.bool false | |
| %int1_63 = torch.constant.int 1 | |
| %466 = torch.aten.convolution %445, %453, %461, %464, %462, %463, %false_62, %465, %int1_63 : !torch.vtensor<[32,160,54,54],f32>, !torch.vtensor<[64,160,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,54,54],f32> | |
| %467 = torch.aten.relu %466 : !torch.vtensor<[32,64,54,54],f32> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_64 = torch.constant.int 12 | |
| %468 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %469 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %470 = torch.aten.quantize_per_tensor %467, %468, %469, %int12_64 : !torch.vtensor<[32,64,54,54],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %471 = torch.aten.int_repr %470 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],si8> | |
| %472 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %473 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %474 = torch.aten._make_per_tensor_quantized_tensor %471, %472, %473 : !torch.vtensor<[32,64,54,54],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %475 = torch.aten.dequantize.self %474 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_65 = torch.constant.int 12 | |
| %476 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %477 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %478 = torch.aten.quantize_per_tensor %10, %476, %477, %int12_65 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %479 = torch.aten.int_repr %478 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %480 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %481 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %482 = torch.aten._make_per_tensor_quantized_tensor %479, %480, %481 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %483 = torch.aten.dequantize.self %482 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_66 = torch.constant.int 12 | |
| %484 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %485 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %486 = torch.aten.quantize_per_tensor %11, %484, %485, %int12_66 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %487 = torch.aten.int_repr %486 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %488 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %489 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %490 = torch.aten._make_per_tensor_quantized_tensor %487, %488, %489 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %491 = torch.aten.dequantize.self %490 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_67 = torch.constant.int 0 | |
| %int0_68 = torch.constant.int 0 | |
| %int1_69 = torch.constant.int 1 | |
| %int1_70 = torch.constant.int 1 | |
| %int1_71 = torch.constant.int 1 | |
| %int1_72 = torch.constant.int 1 | |
| %int0_73 = torch.constant.int 0 | |
| %492 = torch.prim.ListConstruct %int0_67, %int0_68 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %493 = torch.prim.ListConstruct %int1_69, %int1_70 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %494 = torch.prim.ListConstruct %int1_71, %int1_72 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %495 = torch.prim.ListConstruct %int0_73, %int0_73 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_74 = torch.constant.bool false | |
| %int1_75 = torch.constant.int 1 | |
| %496 = torch.aten.convolution %475, %483, %491, %494, %492, %493, %false_74, %495, %int1_75 : !torch.vtensor<[32,64,54,54],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,52,52],f32> | |
| %497 = torch.aten.relu %496 : !torch.vtensor<[32,96,52,52],f32> -> !torch.vtensor<[32,96,52,52],f32> | |
| %int12_76 = torch.constant.int 12 | |
| %498 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %499 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %500 = torch.aten.quantize_per_tensor %12, %498, %499, %int12_76 : !torch.vtensor<[64,160,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,160,1,1],!torch.qint8> | |
| %501 = torch.aten.int_repr %500 : !torch.vtensor<[64,160,1,1],!torch.qint8> -> !torch.vtensor<[64,160,1,1],si8> | |
| %502 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %503 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %504 = torch.aten._make_per_tensor_quantized_tensor %501, %502, %503 : !torch.vtensor<[64,160,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,160,1,1],!torch.qint8> | |
| %505 = torch.aten.dequantize.self %504 : !torch.vtensor<[64,160,1,1],!torch.qint8> -> !torch.vtensor<[64,160,1,1],f32> | |
| %int12_77 = torch.constant.int 12 | |
| %506 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %507 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %508 = torch.aten.quantize_per_tensor %13, %506, %507, %int12_77 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %509 = torch.aten.int_repr %508 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %510 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %511 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %512 = torch.aten._make_per_tensor_quantized_tensor %509, %510, %511 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %513 = torch.aten.dequantize.self %512 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_78 = torch.constant.int 0 | |
| %int0_79 = torch.constant.int 0 | |
| %int1_80 = torch.constant.int 1 | |
| %int1_81 = torch.constant.int 1 | |
| %int1_82 = torch.constant.int 1 | |
| %int1_83 = torch.constant.int 1 | |
| %int0_84 = torch.constant.int 0 | |
| %514 = torch.prim.ListConstruct %int0_78, %int0_79 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %515 = torch.prim.ListConstruct %int1_80, %int1_81 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %516 = torch.prim.ListConstruct %int1_82, %int1_83 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %517 = torch.prim.ListConstruct %int0_84, %int0_84 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_85 = torch.constant.bool false | |
| %int1_86 = torch.constant.int 1 | |
| %518 = torch.aten.convolution %445, %505, %513, %516, %514, %515, %false_85, %517, %int1_86 : !torch.vtensor<[32,160,54,54],f32>, !torch.vtensor<[64,160,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,54,54],f32> | |
| %519 = torch.aten.relu %518 : !torch.vtensor<[32,64,54,54],f32> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_87 = torch.constant.int 12 | |
| %520 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %521 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %522 = torch.aten.quantize_per_tensor %519, %520, %521, %int12_87 : !torch.vtensor<[32,64,54,54],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %523 = torch.aten.int_repr %522 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],si8> | |
| %524 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %525 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %526 = torch.aten._make_per_tensor_quantized_tensor %523, %524, %525 : !torch.vtensor<[32,64,54,54],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %527 = torch.aten.dequantize.self %526 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_88 = torch.constant.int 12 | |
| %528 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %529 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %530 = torch.aten.quantize_per_tensor %14, %528, %529, %int12_88 : !torch.vtensor<[64,64,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,1,7],!torch.qint8> | |
| %531 = torch.aten.int_repr %530 : !torch.vtensor<[64,64,1,7],!torch.qint8> -> !torch.vtensor<[64,64,1,7],si8> | |
| %532 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %533 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %534 = torch.aten._make_per_tensor_quantized_tensor %531, %532, %533 : !torch.vtensor<[64,64,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,1,7],!torch.qint8> | |
| %535 = torch.aten.dequantize.self %534 : !torch.vtensor<[64,64,1,7],!torch.qint8> -> !torch.vtensor<[64,64,1,7],f32> | |
| %int12_89 = torch.constant.int 12 | |
| %536 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %537 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %538 = torch.aten.quantize_per_tensor %15, %536, %537, %int12_89 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %539 = torch.aten.int_repr %538 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %540 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %541 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %542 = torch.aten._make_per_tensor_quantized_tensor %539, %540, %541 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %543 = torch.aten.dequantize.self %542 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_90 = torch.constant.int 0 | |
| %int3_91 = torch.constant.int 3 | |
| %int1_92 = torch.constant.int 1 | |
| %int1_93 = torch.constant.int 1 | |
| %int1_94 = torch.constant.int 1 | |
| %int1_95 = torch.constant.int 1 | |
| %int0_96 = torch.constant.int 0 | |
| %544 = torch.prim.ListConstruct %int0_90, %int3_91 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %545 = torch.prim.ListConstruct %int1_92, %int1_93 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %546 = torch.prim.ListConstruct %int1_94, %int1_95 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %547 = torch.prim.ListConstruct %int0_96, %int0_96 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_97 = torch.constant.bool false | |
| %int1_98 = torch.constant.int 1 | |
| %548 = torch.aten.convolution %527, %535, %543, %546, %544, %545, %false_97, %547, %int1_98 : !torch.vtensor<[32,64,54,54],f32>, !torch.vtensor<[64,64,1,7],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,54,54],f32> | |
| %549 = torch.aten.relu %548 : !torch.vtensor<[32,64,54,54],f32> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_99 = torch.constant.int 12 | |
| %550 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %551 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %552 = torch.aten.quantize_per_tensor %549, %550, %551, %int12_99 : !torch.vtensor<[32,64,54,54],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %553 = torch.aten.int_repr %552 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],si8> | |
| %554 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %555 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %556 = torch.aten._make_per_tensor_quantized_tensor %553, %554, %555 : !torch.vtensor<[32,64,54,54],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %557 = torch.aten.dequantize.self %556 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_100 = torch.constant.int 12 | |
| %558 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %559 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %560 = torch.aten.quantize_per_tensor %16, %558, %559, %int12_100 : !torch.vtensor<[64,64,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,64,7,1],!torch.qint8> | |
| %561 = torch.aten.int_repr %560 : !torch.vtensor<[64,64,7,1],!torch.qint8> -> !torch.vtensor<[64,64,7,1],si8> | |
| %562 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %563 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %564 = torch.aten._make_per_tensor_quantized_tensor %561, %562, %563 : !torch.vtensor<[64,64,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,64,7,1],!torch.qint8> | |
| %565 = torch.aten.dequantize.self %564 : !torch.vtensor<[64,64,7,1],!torch.qint8> -> !torch.vtensor<[64,64,7,1],f32> | |
| %int12_101 = torch.constant.int 12 | |
| %566 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %567 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %568 = torch.aten.quantize_per_tensor %17, %566, %567, %int12_101 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %569 = torch.aten.int_repr %568 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %570 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %571 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %572 = torch.aten._make_per_tensor_quantized_tensor %569, %570, %571 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %573 = torch.aten.dequantize.self %572 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int3_102 = torch.constant.int 3 | |
| %int0_103 = torch.constant.int 0 | |
| %int1_104 = torch.constant.int 1 | |
| %int1_105 = torch.constant.int 1 | |
| %int1_106 = torch.constant.int 1 | |
| %int1_107 = torch.constant.int 1 | |
| %int0_108 = torch.constant.int 0 | |
| %574 = torch.prim.ListConstruct %int3_102, %int0_103 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %575 = torch.prim.ListConstruct %int1_104, %int1_105 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %576 = torch.prim.ListConstruct %int1_106, %int1_107 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %577 = torch.prim.ListConstruct %int0_108, %int0_108 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_109 = torch.constant.bool false | |
| %int1_110 = torch.constant.int 1 | |
| %578 = torch.aten.convolution %557, %565, %573, %576, %574, %575, %false_109, %577, %int1_110 : !torch.vtensor<[32,64,54,54],f32>, !torch.vtensor<[64,64,7,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,54,54],f32> | |
| %579 = torch.aten.relu %578 : !torch.vtensor<[32,64,54,54],f32> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_111 = torch.constant.int 12 | |
| %580 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %581 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %582 = torch.aten.quantize_per_tensor %579, %580, %581, %int12_111 : !torch.vtensor<[32,64,54,54],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %583 = torch.aten.int_repr %582 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],si8> | |
| %584 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %585 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %586 = torch.aten._make_per_tensor_quantized_tensor %583, %584, %585 : !torch.vtensor<[32,64,54,54],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,54,54],!torch.qint8> | |
| %587 = torch.aten.dequantize.self %586 : !torch.vtensor<[32,64,54,54],!torch.qint8> -> !torch.vtensor<[32,64,54,54],f32> | |
| %int12_112 = torch.constant.int 12 | |
| %588 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %589 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %590 = torch.aten.quantize_per_tensor %18, %588, %589, %int12_112 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %591 = torch.aten.int_repr %590 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %592 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %593 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %594 = torch.aten._make_per_tensor_quantized_tensor %591, %592, %593 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %595 = torch.aten.dequantize.self %594 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_113 = torch.constant.int 12 | |
| %596 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %597 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %598 = torch.aten.quantize_per_tensor %19, %596, %597, %int12_113 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %599 = torch.aten.int_repr %598 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %600 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %601 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %602 = torch.aten._make_per_tensor_quantized_tensor %599, %600, %601 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %603 = torch.aten.dequantize.self %602 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_114 = torch.constant.int 0 | |
| %int0_115 = torch.constant.int 0 | |
| %int1_116 = torch.constant.int 1 | |
| %int1_117 = torch.constant.int 1 | |
| %int1_118 = torch.constant.int 1 | |
| %int1_119 = torch.constant.int 1 | |
| %int0_120 = torch.constant.int 0 | |
| %604 = torch.prim.ListConstruct %int0_114, %int0_115 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %605 = torch.prim.ListConstruct %int1_116, %int1_117 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %606 = torch.prim.ListConstruct %int1_118, %int1_119 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %607 = torch.prim.ListConstruct %int0_120, %int0_120 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_121 = torch.constant.bool false | |
| %int1_122 = torch.constant.int 1 | |
| %608 = torch.aten.convolution %587, %595, %603, %606, %604, %605, %false_121, %607, %int1_122 : !torch.vtensor<[32,64,54,54],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,52,52],f32> | |
| %609 = torch.aten.relu %608 : !torch.vtensor<[32,96,52,52],f32> -> !torch.vtensor<[32,96,52,52],f32> | |
| %610 = torch.prim.ListConstruct %497, %609 : (!torch.vtensor<[32,96,52,52],f32>, !torch.vtensor<[32,96,52,52],f32>) -> !torch.list<vtensor> | |
| %int1_123 = torch.constant.int 1 | |
| %611 = torch.aten.cat %610, %int1_123 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,192,52,52],f32> | |
| %int12_124 = torch.constant.int 12 | |
| %612 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %613 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %614 = torch.aten.quantize_per_tensor %611, %612, %613, %int12_124 : !torch.vtensor<[32,192,52,52],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,52,52],!torch.qint8> | |
| %615 = torch.aten.int_repr %614 : !torch.vtensor<[32,192,52,52],!torch.qint8> -> !torch.vtensor<[32,192,52,52],si8> | |
| %616 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %617 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %618 = torch.aten._make_per_tensor_quantized_tensor %615, %616, %617 : !torch.vtensor<[32,192,52,52],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,52,52],!torch.qint8> | |
| %619 = torch.aten.dequantize.self %618 : !torch.vtensor<[32,192,52,52],!torch.qint8> -> !torch.vtensor<[32,192,52,52],f32> | |
| %int12_125 = torch.constant.int 12 | |
| %620 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %621 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %622 = torch.aten.quantize_per_tensor %20, %620, %621, %int12_125 : !torch.vtensor<[192,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,3,3],!torch.qint8> | |
| %623 = torch.aten.int_repr %622 : !torch.vtensor<[192,192,3,3],!torch.qint8> -> !torch.vtensor<[192,192,3,3],si8> | |
| %624 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %625 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %626 = torch.aten._make_per_tensor_quantized_tensor %623, %624, %625 : !torch.vtensor<[192,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,3,3],!torch.qint8> | |
| %627 = torch.aten.dequantize.self %626 : !torch.vtensor<[192,192,3,3],!torch.qint8> -> !torch.vtensor<[192,192,3,3],f32> | |
| %int12_126 = torch.constant.int 12 | |
| %628 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %629 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %630 = torch.aten.quantize_per_tensor %21, %628, %629, %int12_126 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %631 = torch.aten.int_repr %630 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %632 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %633 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %634 = torch.aten._make_per_tensor_quantized_tensor %631, %632, %633 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %635 = torch.aten.dequantize.self %634 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_127 = torch.constant.int 0 | |
| %int0_128 = torch.constant.int 0 | |
| %int1_129 = torch.constant.int 1 | |
| %int1_130 = torch.constant.int 1 | |
| %int2_131 = torch.constant.int 2 | |
| %int2_132 = torch.constant.int 2 | |
| %int0_133 = torch.constant.int 0 | |
| %636 = torch.prim.ListConstruct %int0_127, %int0_128 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %637 = torch.prim.ListConstruct %int1_129, %int1_130 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %638 = torch.prim.ListConstruct %int2_131, %int2_132 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %639 = torch.prim.ListConstruct %int0_133, %int0_133 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_134 = torch.constant.bool false | |
| %int1_135 = torch.constant.int 1 | |
| %640 = torch.aten.convolution %619, %627, %635, %638, %636, %637, %false_134, %639, %int1_135 : !torch.vtensor<[32,192,52,52],f32>, !torch.vtensor<[192,192,3,3],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,25,25],f32> | |
| %641 = torch.aten.relu %640 : !torch.vtensor<[32,192,25,25],f32> -> !torch.vtensor<[32,192,25,25],f32> | |
| %int3_136 = torch.constant.int 3 | |
| %int3_137 = torch.constant.int 3 | |
| %642 = torch.prim.ListConstruct %int3_136, %int3_137 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int0_138 = torch.constant.int 0 | |
| %int0_139 = torch.constant.int 0 | |
| %643 = torch.prim.ListConstruct %int0_138, %int0_139 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int2_140 = torch.constant.int 2 | |
| %int2_141 = torch.constant.int 2 | |
| %644 = torch.prim.ListConstruct %int2_140, %int2_141 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int1_142 = torch.constant.int 1 | |
| %int1_143 = torch.constant.int 1 | |
| %645 = torch.prim.ListConstruct %int1_142, %int1_143 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_144 = torch.constant.bool false | |
| %646 = torch.aten.max_pool2d %619, %642, %644, %643, %645, %false_144 : !torch.vtensor<[32,192,52,52],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,192,25,25],f32> | |
| %647 = torch.prim.ListConstruct %641, %646 : (!torch.vtensor<[32,192,25,25],f32>, !torch.vtensor<[32,192,25,25],f32>) -> !torch.list<vtensor> | |
| %int1_145 = torch.constant.int 1 | |
| %648 = torch.aten.cat %647, %int1_145 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_146 = torch.constant.int 12 | |
| %649 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %650 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %651 = torch.aten.quantize_per_tensor %648, %649, %650, %int12_146 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %652 = torch.aten.int_repr %651 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %653 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %654 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %655 = torch.aten._make_per_tensor_quantized_tensor %652, %653, %654 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %656 = torch.aten.dequantize.self %655 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_147 = torch.constant.int 12 | |
| %657 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %658 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %659 = torch.aten.quantize_per_tensor %22, %657, %658, %int12_147 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %660 = torch.aten.int_repr %659 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %661 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %662 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %663 = torch.aten._make_per_tensor_quantized_tensor %660, %661, %662 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %664 = torch.aten.dequantize.self %663 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_148 = torch.constant.int 12 | |
| %665 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %666 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %667 = torch.aten.quantize_per_tensor %23, %665, %666, %int12_148 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %668 = torch.aten.int_repr %667 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %669 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %670 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %671 = torch.aten._make_per_tensor_quantized_tensor %668, %669, %670 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %672 = torch.aten.dequantize.self %671 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_149 = torch.constant.int 0 | |
| %int0_150 = torch.constant.int 0 | |
| %int1_151 = torch.constant.int 1 | |
| %int1_152 = torch.constant.int 1 | |
| %int1_153 = torch.constant.int 1 | |
| %int1_154 = torch.constant.int 1 | |
| %int0_155 = torch.constant.int 0 | |
| %673 = torch.prim.ListConstruct %int0_149, %int0_150 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %674 = torch.prim.ListConstruct %int1_151, %int1_152 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %675 = torch.prim.ListConstruct %int1_153, %int1_154 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %676 = torch.prim.ListConstruct %int0_155, %int0_155 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_156 = torch.constant.bool false | |
| %int1_157 = torch.constant.int 1 | |
| %677 = torch.aten.convolution %656, %664, %672, %675, %673, %674, %false_156, %676, %int1_157 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %678 = torch.aten.relu %677 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_158 = torch.constant.int 12 | |
| %679 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %680 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %681 = torch.aten.quantize_per_tensor %24, %679, %680, %int12_158 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %682 = torch.aten.int_repr %681 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %683 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %684 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %685 = torch.aten._make_per_tensor_quantized_tensor %682, %683, %684 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %686 = torch.aten.dequantize.self %685 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_159 = torch.constant.int 12 | |
| %687 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %688 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %689 = torch.aten.quantize_per_tensor %25, %687, %688, %int12_159 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %690 = torch.aten.int_repr %689 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %691 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %692 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %693 = torch.aten._make_per_tensor_quantized_tensor %690, %691, %692 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %694 = torch.aten.dequantize.self %693 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_160 = torch.constant.int 0 | |
| %int0_161 = torch.constant.int 0 | |
| %int1_162 = torch.constant.int 1 | |
| %int1_163 = torch.constant.int 1 | |
| %int1_164 = torch.constant.int 1 | |
| %int1_165 = torch.constant.int 1 | |
| %int0_166 = torch.constant.int 0 | |
| %695 = torch.prim.ListConstruct %int0_160, %int0_161 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %696 = torch.prim.ListConstruct %int1_162, %int1_163 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %697 = torch.prim.ListConstruct %int1_164, %int1_165 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %698 = torch.prim.ListConstruct %int0_166, %int0_166 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_167 = torch.constant.bool false | |
| %int1_168 = torch.constant.int 1 | |
| %699 = torch.aten.convolution %656, %686, %694, %697, %695, %696, %false_167, %698, %int1_168 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %700 = torch.aten.relu %699 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_169 = torch.constant.int 12 | |
| %701 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %702 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %703 = torch.aten.quantize_per_tensor %700, %701, %702, %int12_169 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %704 = torch.aten.int_repr %703 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %705 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %706 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %707 = torch.aten._make_per_tensor_quantized_tensor %704, %705, %706 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %708 = torch.aten.dequantize.self %707 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_170 = torch.constant.int 12 | |
| %709 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %710 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %711 = torch.aten.quantize_per_tensor %26, %709, %710, %int12_170 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %712 = torch.aten.int_repr %711 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %713 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %714 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %715 = torch.aten._make_per_tensor_quantized_tensor %712, %713, %714 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %716 = torch.aten.dequantize.self %715 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_171 = torch.constant.int 12 | |
| %717 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %718 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %719 = torch.aten.quantize_per_tensor %27, %717, %718, %int12_171 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %720 = torch.aten.int_repr %719 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %721 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %722 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %723 = torch.aten._make_per_tensor_quantized_tensor %720, %721, %722 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %724 = torch.aten.dequantize.self %723 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_172 = torch.constant.int 1 | |
| %int1_173 = torch.constant.int 1 | |
| %int1_174 = torch.constant.int 1 | |
| %int1_175 = torch.constant.int 1 | |
| %int1_176 = torch.constant.int 1 | |
| %int1_177 = torch.constant.int 1 | |
| %int0_178 = torch.constant.int 0 | |
| %725 = torch.prim.ListConstruct %int1_172, %int1_173 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %726 = torch.prim.ListConstruct %int1_174, %int1_175 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %727 = torch.prim.ListConstruct %int1_176, %int1_177 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %728 = torch.prim.ListConstruct %int0_178, %int0_178 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_179 = torch.constant.bool false | |
| %int1_180 = torch.constant.int 1 | |
| %729 = torch.aten.convolution %708, %716, %724, %727, %725, %726, %false_179, %728, %int1_180 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %730 = torch.aten.relu %729 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_181 = torch.constant.int 12 | |
| %731 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %732 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %733 = torch.aten.quantize_per_tensor %28, %731, %732, %int12_181 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %734 = torch.aten.int_repr %733 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %735 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %736 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %737 = torch.aten._make_per_tensor_quantized_tensor %734, %735, %736 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %738 = torch.aten.dequantize.self %737 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_182 = torch.constant.int 12 | |
| %739 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %740 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %741 = torch.aten.quantize_per_tensor %29, %739, %740, %int12_182 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %742 = torch.aten.int_repr %741 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %743 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %744 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %745 = torch.aten._make_per_tensor_quantized_tensor %742, %743, %744 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %746 = torch.aten.dequantize.self %745 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_183 = torch.constant.int 0 | |
| %int0_184 = torch.constant.int 0 | |
| %int1_185 = torch.constant.int 1 | |
| %int1_186 = torch.constant.int 1 | |
| %int1_187 = torch.constant.int 1 | |
| %int1_188 = torch.constant.int 1 | |
| %int0_189 = torch.constant.int 0 | |
| %747 = torch.prim.ListConstruct %int0_183, %int0_184 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %748 = torch.prim.ListConstruct %int1_185, %int1_186 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %749 = torch.prim.ListConstruct %int1_187, %int1_188 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %750 = torch.prim.ListConstruct %int0_189, %int0_189 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_190 = torch.constant.bool false | |
| %int1_191 = torch.constant.int 1 | |
| %751 = torch.aten.convolution %656, %738, %746, %749, %747, %748, %false_190, %750, %int1_191 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %752 = torch.aten.relu %751 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_192 = torch.constant.int 12 | |
| %753 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %754 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %755 = torch.aten.quantize_per_tensor %752, %753, %754, %int12_192 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %756 = torch.aten.int_repr %755 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %757 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %758 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %759 = torch.aten._make_per_tensor_quantized_tensor %756, %757, %758 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %760 = torch.aten.dequantize.self %759 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_193 = torch.constant.int 12 | |
| %761 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %762 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %763 = torch.aten.quantize_per_tensor %30, %761, %762, %int12_193 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %764 = torch.aten.int_repr %763 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %765 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %766 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %767 = torch.aten._make_per_tensor_quantized_tensor %764, %765, %766 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %768 = torch.aten.dequantize.self %767 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_194 = torch.constant.int 12 | |
| %769 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %770 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %771 = torch.aten.quantize_per_tensor %31, %769, %770, %int12_194 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %772 = torch.aten.int_repr %771 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %773 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %774 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %775 = torch.aten._make_per_tensor_quantized_tensor %772, %773, %774 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %776 = torch.aten.dequantize.self %775 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_195 = torch.constant.int 1 | |
| %int1_196 = torch.constant.int 1 | |
| %int1_197 = torch.constant.int 1 | |
| %int1_198 = torch.constant.int 1 | |
| %int1_199 = torch.constant.int 1 | |
| %int1_200 = torch.constant.int 1 | |
| %int0_201 = torch.constant.int 0 | |
| %777 = torch.prim.ListConstruct %int1_195, %int1_196 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %778 = torch.prim.ListConstruct %int1_197, %int1_198 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %779 = torch.prim.ListConstruct %int1_199, %int1_200 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %780 = torch.prim.ListConstruct %int0_201, %int0_201 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_202 = torch.constant.bool false | |
| %int1_203 = torch.constant.int 1 | |
| %781 = torch.aten.convolution %760, %768, %776, %779, %777, %778, %false_202, %780, %int1_203 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %782 = torch.aten.relu %781 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_204 = torch.constant.int 12 | |
| %783 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %784 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %785 = torch.aten.quantize_per_tensor %782, %783, %784, %int12_204 : !torch.vtensor<[32,96,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %786 = torch.aten.int_repr %785 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],si8> | |
| %787 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %788 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %789 = torch.aten._make_per_tensor_quantized_tensor %786, %787, %788 : !torch.vtensor<[32,96,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %790 = torch.aten.dequantize.self %789 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_205 = torch.constant.int 12 | |
| %791 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %792 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %793 = torch.aten.quantize_per_tensor %32, %791, %792, %int12_205 : !torch.vtensor<[96,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %794 = torch.aten.int_repr %793 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],si8> | |
| %795 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %796 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %797 = torch.aten._make_per_tensor_quantized_tensor %794, %795, %796 : !torch.vtensor<[96,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %798 = torch.aten.dequantize.self %797 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],f32> | |
| %int12_206 = torch.constant.int 12 | |
| %799 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %800 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %801 = torch.aten.quantize_per_tensor %33, %799, %800, %int12_206 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %802 = torch.aten.int_repr %801 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %803 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %804 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %805 = torch.aten._make_per_tensor_quantized_tensor %802, %803, %804 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %806 = torch.aten.dequantize.self %805 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_207 = torch.constant.int 1 | |
| %int1_208 = torch.constant.int 1 | |
| %int1_209 = torch.constant.int 1 | |
| %int1_210 = torch.constant.int 1 | |
| %int1_211 = torch.constant.int 1 | |
| %int1_212 = torch.constant.int 1 | |
| %int0_213 = torch.constant.int 0 | |
| %807 = torch.prim.ListConstruct %int1_207, %int1_208 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %808 = torch.prim.ListConstruct %int1_209, %int1_210 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %809 = torch.prim.ListConstruct %int1_211, %int1_212 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %810 = torch.prim.ListConstruct %int0_213, %int0_213 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_214 = torch.constant.bool false | |
| %int1_215 = torch.constant.int 1 | |
| %811 = torch.aten.convolution %790, %798, %806, %809, %807, %808, %false_214, %810, %int1_215 : !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[96,96,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %812 = torch.aten.relu %811 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int3_216 = torch.constant.int 3 | |
| %int3_217 = torch.constant.int 3 | |
| %int1_218 = torch.constant.int 1 | |
| %int1_219 = torch.constant.int 1 | |
| %int1_220 = torch.constant.int 1 | |
| %int1_221 = torch.constant.int 1 | |
| %int1_222 = torch.constant.int 1 | |
| %int1_223 = torch.constant.int 1 | |
| %813 = torch.prim.ListConstruct %int3_216, %int3_217 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %814 = torch.prim.ListConstruct %int1_218, %int1_219, %int1_220, %int1_221 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %815 = torch.prim.ListConstruct %int1_222, %int1_223 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_224 = torch.constant.bool false | |
| %false_225 = torch.constant.bool false | |
| %none_226 = torch.constant.none | |
| %816 = torch.aten.avg_pool2d %656, %813, %815, %814, %false_224, %false_225, %none_226 : !torch.vtensor<[32,384,25,25],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,384,25,25],f32> | |
| %817 = torch.aten.mul.Tensor %816, %308 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_227 = torch.constant.int 12 | |
| %818 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %819 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %820 = torch.aten.quantize_per_tensor %817, %818, %819, %int12_227 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %821 = torch.aten.int_repr %820 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %822 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %823 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %824 = torch.aten._make_per_tensor_quantized_tensor %821, %822, %823 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %825 = torch.aten.dequantize.self %824 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_228 = torch.constant.int 12 | |
| %826 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %827 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %828 = torch.aten.quantize_per_tensor %34, %826, %827, %int12_228 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %829 = torch.aten.int_repr %828 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %830 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %831 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %832 = torch.aten._make_per_tensor_quantized_tensor %829, %830, %831 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %833 = torch.aten.dequantize.self %832 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_229 = torch.constant.int 12 | |
| %834 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %835 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %836 = torch.aten.quantize_per_tensor %35, %834, %835, %int12_229 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %837 = torch.aten.int_repr %836 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %838 = torch.aten.item %303 : !torch.vtensor<[],f32> -> !torch.float | |
| %839 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %840 = torch.aten._make_per_tensor_quantized_tensor %837, %838, %839 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %841 = torch.aten.dequantize.self %840 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_230 = torch.constant.int 0 | |
| %int0_231 = torch.constant.int 0 | |
| %int1_232 = torch.constant.int 1 | |
| %int1_233 = torch.constant.int 1 | |
| %int1_234 = torch.constant.int 1 | |
| %int1_235 = torch.constant.int 1 | |
| %int0_236 = torch.constant.int 0 | |
| %842 = torch.prim.ListConstruct %int0_230, %int0_231 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %843 = torch.prim.ListConstruct %int1_232, %int1_233 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %844 = torch.prim.ListConstruct %int1_234, %int1_235 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %845 = torch.prim.ListConstruct %int0_236, %int0_236 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_237 = torch.constant.bool false | |
| %int1_238 = torch.constant.int 1 | |
| %846 = torch.aten.convolution %825, %833, %841, %844, %842, %843, %false_237, %845, %int1_238 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %847 = torch.aten.relu %846 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %848 = torch.prim.ListConstruct %678, %730, %812, %847 : (!torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>) -> !torch.list<vtensor> | |
| %int1_239 = torch.constant.int 1 | |
| %849 = torch.aten.cat %848, %int1_239 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_240 = torch.constant.int 12 | |
| %850 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %851 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %852 = torch.aten.quantize_per_tensor %849, %850, %851, %int12_240 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %853 = torch.aten.int_repr %852 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %854 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %855 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %856 = torch.aten._make_per_tensor_quantized_tensor %853, %854, %855 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %857 = torch.aten.dequantize.self %856 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_241 = torch.constant.int 12 | |
| %858 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %859 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %860 = torch.aten.quantize_per_tensor %36, %858, %859, %int12_241 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %861 = torch.aten.int_repr %860 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %862 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %863 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %864 = torch.aten._make_per_tensor_quantized_tensor %861, %862, %863 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %865 = torch.aten.dequantize.self %864 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_242 = torch.constant.int 12 | |
| %866 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %867 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %868 = torch.aten.quantize_per_tensor %37, %866, %867, %int12_242 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %869 = torch.aten.int_repr %868 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %870 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %871 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %872 = torch.aten._make_per_tensor_quantized_tensor %869, %870, %871 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %873 = torch.aten.dequantize.self %872 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_243 = torch.constant.int 0 | |
| %int0_244 = torch.constant.int 0 | |
| %int1_245 = torch.constant.int 1 | |
| %int1_246 = torch.constant.int 1 | |
| %int1_247 = torch.constant.int 1 | |
| %int1_248 = torch.constant.int 1 | |
| %int0_249 = torch.constant.int 0 | |
| %874 = torch.prim.ListConstruct %int0_243, %int0_244 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %875 = torch.prim.ListConstruct %int1_245, %int1_246 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %876 = torch.prim.ListConstruct %int1_247, %int1_248 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %877 = torch.prim.ListConstruct %int0_249, %int0_249 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_250 = torch.constant.bool false | |
| %int1_251 = torch.constant.int 1 | |
| %878 = torch.aten.convolution %857, %865, %873, %876, %874, %875, %false_250, %877, %int1_251 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %879 = torch.aten.relu %878 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_252 = torch.constant.int 12 | |
| %880 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %881 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %882 = torch.aten.quantize_per_tensor %38, %880, %881, %int12_252 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %883 = torch.aten.int_repr %882 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %884 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %885 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %886 = torch.aten._make_per_tensor_quantized_tensor %883, %884, %885 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %887 = torch.aten.dequantize.self %886 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_253 = torch.constant.int 12 | |
| %888 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %889 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %890 = torch.aten.quantize_per_tensor %39, %888, %889, %int12_253 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %891 = torch.aten.int_repr %890 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %892 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %893 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %894 = torch.aten._make_per_tensor_quantized_tensor %891, %892, %893 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %895 = torch.aten.dequantize.self %894 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_254 = torch.constant.int 0 | |
| %int0_255 = torch.constant.int 0 | |
| %int1_256 = torch.constant.int 1 | |
| %int1_257 = torch.constant.int 1 | |
| %int1_258 = torch.constant.int 1 | |
| %int1_259 = torch.constant.int 1 | |
| %int0_260 = torch.constant.int 0 | |
| %896 = torch.prim.ListConstruct %int0_254, %int0_255 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %897 = torch.prim.ListConstruct %int1_256, %int1_257 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %898 = torch.prim.ListConstruct %int1_258, %int1_259 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %899 = torch.prim.ListConstruct %int0_260, %int0_260 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_261 = torch.constant.bool false | |
| %int1_262 = torch.constant.int 1 | |
| %900 = torch.aten.convolution %857, %887, %895, %898, %896, %897, %false_261, %899, %int1_262 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %901 = torch.aten.relu %900 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_263 = torch.constant.int 12 | |
| %902 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %903 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %904 = torch.aten.quantize_per_tensor %901, %902, %903, %int12_263 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %905 = torch.aten.int_repr %904 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %906 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %907 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %908 = torch.aten._make_per_tensor_quantized_tensor %905, %906, %907 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %909 = torch.aten.dequantize.self %908 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_264 = torch.constant.int 12 | |
| %910 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %911 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %912 = torch.aten.quantize_per_tensor %40, %910, %911, %int12_264 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %913 = torch.aten.int_repr %912 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %914 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %915 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %916 = torch.aten._make_per_tensor_quantized_tensor %913, %914, %915 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %917 = torch.aten.dequantize.self %916 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_265 = torch.constant.int 12 | |
| %918 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %919 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %920 = torch.aten.quantize_per_tensor %41, %918, %919, %int12_265 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %921 = torch.aten.int_repr %920 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %922 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %923 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %924 = torch.aten._make_per_tensor_quantized_tensor %921, %922, %923 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %925 = torch.aten.dequantize.self %924 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_266 = torch.constant.int 1 | |
| %int1_267 = torch.constant.int 1 | |
| %int1_268 = torch.constant.int 1 | |
| %int1_269 = torch.constant.int 1 | |
| %int1_270 = torch.constant.int 1 | |
| %int1_271 = torch.constant.int 1 | |
| %int0_272 = torch.constant.int 0 | |
| %926 = torch.prim.ListConstruct %int1_266, %int1_267 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %927 = torch.prim.ListConstruct %int1_268, %int1_269 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %928 = torch.prim.ListConstruct %int1_270, %int1_271 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %929 = torch.prim.ListConstruct %int0_272, %int0_272 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_273 = torch.constant.bool false | |
| %int1_274 = torch.constant.int 1 | |
| %930 = torch.aten.convolution %909, %917, %925, %928, %926, %927, %false_273, %929, %int1_274 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %931 = torch.aten.relu %930 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_275 = torch.constant.int 12 | |
| %932 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %933 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %934 = torch.aten.quantize_per_tensor %42, %932, %933, %int12_275 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %935 = torch.aten.int_repr %934 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %936 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %937 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %938 = torch.aten._make_per_tensor_quantized_tensor %935, %936, %937 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %939 = torch.aten.dequantize.self %938 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_276 = torch.constant.int 12 | |
| %940 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %941 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %942 = torch.aten.quantize_per_tensor %43, %940, %941, %int12_276 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %943 = torch.aten.int_repr %942 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %944 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %945 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %946 = torch.aten._make_per_tensor_quantized_tensor %943, %944, %945 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %947 = torch.aten.dequantize.self %946 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_277 = torch.constant.int 0 | |
| %int0_278 = torch.constant.int 0 | |
| %int1_279 = torch.constant.int 1 | |
| %int1_280 = torch.constant.int 1 | |
| %int1_281 = torch.constant.int 1 | |
| %int1_282 = torch.constant.int 1 | |
| %int0_283 = torch.constant.int 0 | |
| %948 = torch.prim.ListConstruct %int0_277, %int0_278 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %949 = torch.prim.ListConstruct %int1_279, %int1_280 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %950 = torch.prim.ListConstruct %int1_281, %int1_282 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %951 = torch.prim.ListConstruct %int0_283, %int0_283 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_284 = torch.constant.bool false | |
| %int1_285 = torch.constant.int 1 | |
| %952 = torch.aten.convolution %857, %939, %947, %950, %948, %949, %false_284, %951, %int1_285 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %953 = torch.aten.relu %952 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_286 = torch.constant.int 12 | |
| %954 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %955 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %956 = torch.aten.quantize_per_tensor %953, %954, %955, %int12_286 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %957 = torch.aten.int_repr %956 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %958 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %959 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %960 = torch.aten._make_per_tensor_quantized_tensor %957, %958, %959 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %961 = torch.aten.dequantize.self %960 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_287 = torch.constant.int 12 | |
| %962 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %963 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %964 = torch.aten.quantize_per_tensor %44, %962, %963, %int12_287 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %965 = torch.aten.int_repr %964 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %966 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %967 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %968 = torch.aten._make_per_tensor_quantized_tensor %965, %966, %967 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %969 = torch.aten.dequantize.self %968 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_288 = torch.constant.int 12 | |
| %970 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %971 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %972 = torch.aten.quantize_per_tensor %45, %970, %971, %int12_288 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %973 = torch.aten.int_repr %972 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %974 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %975 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %976 = torch.aten._make_per_tensor_quantized_tensor %973, %974, %975 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %977 = torch.aten.dequantize.self %976 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_289 = torch.constant.int 1 | |
| %int1_290 = torch.constant.int 1 | |
| %int1_291 = torch.constant.int 1 | |
| %int1_292 = torch.constant.int 1 | |
| %int1_293 = torch.constant.int 1 | |
| %int1_294 = torch.constant.int 1 | |
| %int0_295 = torch.constant.int 0 | |
| %978 = torch.prim.ListConstruct %int1_289, %int1_290 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %979 = torch.prim.ListConstruct %int1_291, %int1_292 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %980 = torch.prim.ListConstruct %int1_293, %int1_294 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %981 = torch.prim.ListConstruct %int0_295, %int0_295 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_296 = torch.constant.bool false | |
| %int1_297 = torch.constant.int 1 | |
| %982 = torch.aten.convolution %961, %969, %977, %980, %978, %979, %false_296, %981, %int1_297 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %983 = torch.aten.relu %982 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_298 = torch.constant.int 12 | |
| %984 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %985 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %986 = torch.aten.quantize_per_tensor %983, %984, %985, %int12_298 : !torch.vtensor<[32,96,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %987 = torch.aten.int_repr %986 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],si8> | |
| %988 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %989 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %990 = torch.aten._make_per_tensor_quantized_tensor %987, %988, %989 : !torch.vtensor<[32,96,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %991 = torch.aten.dequantize.self %990 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_299 = torch.constant.int 12 | |
| %992 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %993 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %994 = torch.aten.quantize_per_tensor %46, %992, %993, %int12_299 : !torch.vtensor<[96,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %995 = torch.aten.int_repr %994 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],si8> | |
| %996 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %997 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %998 = torch.aten._make_per_tensor_quantized_tensor %995, %996, %997 : !torch.vtensor<[96,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %999 = torch.aten.dequantize.self %998 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],f32> | |
| %int12_300 = torch.constant.int 12 | |
| %1000 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1001 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1002 = torch.aten.quantize_per_tensor %47, %1000, %1001, %int12_300 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1003 = torch.aten.int_repr %1002 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1004 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1005 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1006 = torch.aten._make_per_tensor_quantized_tensor %1003, %1004, %1005 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1007 = torch.aten.dequantize.self %1006 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_301 = torch.constant.int 1 | |
| %int1_302 = torch.constant.int 1 | |
| %int1_303 = torch.constant.int 1 | |
| %int1_304 = torch.constant.int 1 | |
| %int1_305 = torch.constant.int 1 | |
| %int1_306 = torch.constant.int 1 | |
| %int0_307 = torch.constant.int 0 | |
| %1008 = torch.prim.ListConstruct %int1_301, %int1_302 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1009 = torch.prim.ListConstruct %int1_303, %int1_304 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1010 = torch.prim.ListConstruct %int1_305, %int1_306 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1011 = torch.prim.ListConstruct %int0_307, %int0_307 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_308 = torch.constant.bool false | |
| %int1_309 = torch.constant.int 1 | |
| %1012 = torch.aten.convolution %991, %999, %1007, %1010, %1008, %1009, %false_308, %1011, %int1_309 : !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[96,96,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1013 = torch.aten.relu %1012 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int3_310 = torch.constant.int 3 | |
| %int3_311 = torch.constant.int 3 | |
| %int1_312 = torch.constant.int 1 | |
| %int1_313 = torch.constant.int 1 | |
| %int1_314 = torch.constant.int 1 | |
| %int1_315 = torch.constant.int 1 | |
| %int1_316 = torch.constant.int 1 | |
| %int1_317 = torch.constant.int 1 | |
| %1014 = torch.prim.ListConstruct %int3_310, %int3_311 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1015 = torch.prim.ListConstruct %int1_312, %int1_313, %int1_314, %int1_315 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1016 = torch.prim.ListConstruct %int1_316, %int1_317 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_318 = torch.constant.bool false | |
| %false_319 = torch.constant.bool false | |
| %none_320 = torch.constant.none | |
| %1017 = torch.aten.avg_pool2d %857, %1014, %1016, %1015, %false_318, %false_319, %none_320 : !torch.vtensor<[32,384,25,25],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,384,25,25],f32> | |
| %1018 = torch.aten.mul.Tensor %1017, %308 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_321 = torch.constant.int 12 | |
| %1019 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1020 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1021 = torch.aten.quantize_per_tensor %1018, %1019, %1020, %int12_321 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1022 = torch.aten.int_repr %1021 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1023 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1024 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1025 = torch.aten._make_per_tensor_quantized_tensor %1022, %1023, %1024 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1026 = torch.aten.dequantize.self %1025 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_322 = torch.constant.int 12 | |
| %1027 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1028 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1029 = torch.aten.quantize_per_tensor %48, %1027, %1028, %int12_322 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1030 = torch.aten.int_repr %1029 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %1031 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1032 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1033 = torch.aten._make_per_tensor_quantized_tensor %1030, %1031, %1032 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1034 = torch.aten.dequantize.self %1033 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_323 = torch.constant.int 12 | |
| %1035 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1036 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1037 = torch.aten.quantize_per_tensor %49, %1035, %1036, %int12_323 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1038 = torch.aten.int_repr %1037 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1039 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1040 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1041 = torch.aten._make_per_tensor_quantized_tensor %1038, %1039, %1040 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1042 = torch.aten.dequantize.self %1041 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_324 = torch.constant.int 0 | |
| %int0_325 = torch.constant.int 0 | |
| %int1_326 = torch.constant.int 1 | |
| %int1_327 = torch.constant.int 1 | |
| %int1_328 = torch.constant.int 1 | |
| %int1_329 = torch.constant.int 1 | |
| %int0_330 = torch.constant.int 0 | |
| %1043 = torch.prim.ListConstruct %int0_324, %int0_325 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1044 = torch.prim.ListConstruct %int1_326, %int1_327 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1045 = torch.prim.ListConstruct %int1_328, %int1_329 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1046 = torch.prim.ListConstruct %int0_330, %int0_330 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_331 = torch.constant.bool false | |
| %int1_332 = torch.constant.int 1 | |
| %1047 = torch.aten.convolution %1026, %1034, %1042, %1045, %1043, %1044, %false_331, %1046, %int1_332 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1048 = torch.aten.relu %1047 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %1049 = torch.prim.ListConstruct %879, %931, %1013, %1048 : (!torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>) -> !torch.list<vtensor> | |
| %int1_333 = torch.constant.int 1 | |
| %1050 = torch.aten.cat %1049, %int1_333 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_334 = torch.constant.int 12 | |
| %1051 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1052 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1053 = torch.aten.quantize_per_tensor %1050, %1051, %1052, %int12_334 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1054 = torch.aten.int_repr %1053 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1055 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1056 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1057 = torch.aten._make_per_tensor_quantized_tensor %1054, %1055, %1056 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1058 = torch.aten.dequantize.self %1057 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_335 = torch.constant.int 12 | |
| %1059 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1060 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1061 = torch.aten.quantize_per_tensor %50, %1059, %1060, %int12_335 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1062 = torch.aten.int_repr %1061 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %1063 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1064 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1065 = torch.aten._make_per_tensor_quantized_tensor %1062, %1063, %1064 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1066 = torch.aten.dequantize.self %1065 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_336 = torch.constant.int 12 | |
| %1067 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1068 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1069 = torch.aten.quantize_per_tensor %51, %1067, %1068, %int12_336 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1070 = torch.aten.int_repr %1069 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1071 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1072 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1073 = torch.aten._make_per_tensor_quantized_tensor %1070, %1071, %1072 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1074 = torch.aten.dequantize.self %1073 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_337 = torch.constant.int 0 | |
| %int0_338 = torch.constant.int 0 | |
| %int1_339 = torch.constant.int 1 | |
| %int1_340 = torch.constant.int 1 | |
| %int1_341 = torch.constant.int 1 | |
| %int1_342 = torch.constant.int 1 | |
| %int0_343 = torch.constant.int 0 | |
| %1075 = torch.prim.ListConstruct %int0_337, %int0_338 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1076 = torch.prim.ListConstruct %int1_339, %int1_340 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1077 = torch.prim.ListConstruct %int1_341, %int1_342 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1078 = torch.prim.ListConstruct %int0_343, %int0_343 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_344 = torch.constant.bool false | |
| %int1_345 = torch.constant.int 1 | |
| %1079 = torch.aten.convolution %1058, %1066, %1074, %1077, %1075, %1076, %false_344, %1078, %int1_345 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1080 = torch.aten.relu %1079 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_346 = torch.constant.int 12 | |
| %1081 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1082 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1083 = torch.aten.quantize_per_tensor %52, %1081, %1082, %int12_346 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1084 = torch.aten.int_repr %1083 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %1085 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1086 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1087 = torch.aten._make_per_tensor_quantized_tensor %1084, %1085, %1086 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1088 = torch.aten.dequantize.self %1087 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_347 = torch.constant.int 12 | |
| %1089 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1090 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1091 = torch.aten.quantize_per_tensor %53, %1089, %1090, %int12_347 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1092 = torch.aten.int_repr %1091 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %1093 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1094 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1095 = torch.aten._make_per_tensor_quantized_tensor %1092, %1093, %1094 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1096 = torch.aten.dequantize.self %1095 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_348 = torch.constant.int 0 | |
| %int0_349 = torch.constant.int 0 | |
| %int1_350 = torch.constant.int 1 | |
| %int1_351 = torch.constant.int 1 | |
| %int1_352 = torch.constant.int 1 | |
| %int1_353 = torch.constant.int 1 | |
| %int0_354 = torch.constant.int 0 | |
| %1097 = torch.prim.ListConstruct %int0_348, %int0_349 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1098 = torch.prim.ListConstruct %int1_350, %int1_351 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1099 = torch.prim.ListConstruct %int1_352, %int1_353 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1100 = torch.prim.ListConstruct %int0_354, %int0_354 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_355 = torch.constant.bool false | |
| %int1_356 = torch.constant.int 1 | |
| %1101 = torch.aten.convolution %1058, %1088, %1096, %1099, %1097, %1098, %false_355, %1100, %int1_356 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %1102 = torch.aten.relu %1101 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_357 = torch.constant.int 12 | |
| %1103 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1104 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1105 = torch.aten.quantize_per_tensor %1102, %1103, %1104, %int12_357 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1106 = torch.aten.int_repr %1105 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %1107 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1108 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1109 = torch.aten._make_per_tensor_quantized_tensor %1106, %1107, %1108 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1110 = torch.aten.dequantize.self %1109 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_358 = torch.constant.int 12 | |
| %1111 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1112 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1113 = torch.aten.quantize_per_tensor %54, %1111, %1112, %int12_358 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1114 = torch.aten.int_repr %1113 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %1115 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1116 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1117 = torch.aten._make_per_tensor_quantized_tensor %1114, %1115, %1116 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1118 = torch.aten.dequantize.self %1117 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_359 = torch.constant.int 12 | |
| %1119 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1120 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1121 = torch.aten.quantize_per_tensor %55, %1119, %1120, %int12_359 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1122 = torch.aten.int_repr %1121 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1123 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1124 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1125 = torch.aten._make_per_tensor_quantized_tensor %1122, %1123, %1124 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1126 = torch.aten.dequantize.self %1125 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_360 = torch.constant.int 1 | |
| %int1_361 = torch.constant.int 1 | |
| %int1_362 = torch.constant.int 1 | |
| %int1_363 = torch.constant.int 1 | |
| %int1_364 = torch.constant.int 1 | |
| %int1_365 = torch.constant.int 1 | |
| %int0_366 = torch.constant.int 0 | |
| %1127 = torch.prim.ListConstruct %int1_360, %int1_361 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1128 = torch.prim.ListConstruct %int1_362, %int1_363 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1129 = torch.prim.ListConstruct %int1_364, %int1_365 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1130 = torch.prim.ListConstruct %int0_366, %int0_366 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_367 = torch.constant.bool false | |
| %int1_368 = torch.constant.int 1 | |
| %1131 = torch.aten.convolution %1110, %1118, %1126, %1129, %1127, %1128, %false_367, %1130, %int1_368 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1132 = torch.aten.relu %1131 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_369 = torch.constant.int 12 | |
| %1133 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1134 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1135 = torch.aten.quantize_per_tensor %56, %1133, %1134, %int12_369 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1136 = torch.aten.int_repr %1135 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %1137 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1138 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1139 = torch.aten._make_per_tensor_quantized_tensor %1136, %1137, %1138 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1140 = torch.aten.dequantize.self %1139 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_370 = torch.constant.int 12 | |
| %1141 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1142 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1143 = torch.aten.quantize_per_tensor %57, %1141, %1142, %int12_370 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1144 = torch.aten.int_repr %1143 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %1145 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1146 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1147 = torch.aten._make_per_tensor_quantized_tensor %1144, %1145, %1146 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1148 = torch.aten.dequantize.self %1147 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_371 = torch.constant.int 0 | |
| %int0_372 = torch.constant.int 0 | |
| %int1_373 = torch.constant.int 1 | |
| %int1_374 = torch.constant.int 1 | |
| %int1_375 = torch.constant.int 1 | |
| %int1_376 = torch.constant.int 1 | |
| %int0_377 = torch.constant.int 0 | |
| %1149 = torch.prim.ListConstruct %int0_371, %int0_372 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1150 = torch.prim.ListConstruct %int1_373, %int1_374 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1151 = torch.prim.ListConstruct %int1_375, %int1_376 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1152 = torch.prim.ListConstruct %int0_377, %int0_377 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_378 = torch.constant.bool false | |
| %int1_379 = torch.constant.int 1 | |
| %1153 = torch.aten.convolution %1058, %1140, %1148, %1151, %1149, %1150, %false_378, %1152, %int1_379 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %1154 = torch.aten.relu %1153 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_380 = torch.constant.int 12 | |
| %1155 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1156 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1157 = torch.aten.quantize_per_tensor %1154, %1155, %1156, %int12_380 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1158 = torch.aten.int_repr %1157 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %1159 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1160 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1161 = torch.aten._make_per_tensor_quantized_tensor %1158, %1159, %1160 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1162 = torch.aten.dequantize.self %1161 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_381 = torch.constant.int 12 | |
| %1163 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1164 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1165 = torch.aten.quantize_per_tensor %58, %1163, %1164, %int12_381 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1166 = torch.aten.int_repr %1165 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %1167 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1168 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1169 = torch.aten._make_per_tensor_quantized_tensor %1166, %1167, %1168 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1170 = torch.aten.dequantize.self %1169 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_382 = torch.constant.int 12 | |
| %1171 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1172 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1173 = torch.aten.quantize_per_tensor %59, %1171, %1172, %int12_382 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1174 = torch.aten.int_repr %1173 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1175 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1176 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1177 = torch.aten._make_per_tensor_quantized_tensor %1174, %1175, %1176 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1178 = torch.aten.dequantize.self %1177 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_383 = torch.constant.int 1 | |
| %int1_384 = torch.constant.int 1 | |
| %int1_385 = torch.constant.int 1 | |
| %int1_386 = torch.constant.int 1 | |
| %int1_387 = torch.constant.int 1 | |
| %int1_388 = torch.constant.int 1 | |
| %int0_389 = torch.constant.int 0 | |
| %1179 = torch.prim.ListConstruct %int1_383, %int1_384 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1180 = torch.prim.ListConstruct %int1_385, %int1_386 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1181 = torch.prim.ListConstruct %int1_387, %int1_388 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1182 = torch.prim.ListConstruct %int0_389, %int0_389 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_390 = torch.constant.bool false | |
| %int1_391 = torch.constant.int 1 | |
| %1183 = torch.aten.convolution %1162, %1170, %1178, %1181, %1179, %1180, %false_390, %1182, %int1_391 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1184 = torch.aten.relu %1183 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_392 = torch.constant.int 12 | |
| %1185 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1186 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1187 = torch.aten.quantize_per_tensor %1184, %1185, %1186, %int12_392 : !torch.vtensor<[32,96,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %1188 = torch.aten.int_repr %1187 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],si8> | |
| %1189 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1190 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1191 = torch.aten._make_per_tensor_quantized_tensor %1188, %1189, %1190 : !torch.vtensor<[32,96,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %1192 = torch.aten.dequantize.self %1191 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_393 = torch.constant.int 12 | |
| %1193 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1194 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1195 = torch.aten.quantize_per_tensor %60, %1193, %1194, %int12_393 : !torch.vtensor<[96,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %1196 = torch.aten.int_repr %1195 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],si8> | |
| %1197 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1198 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1199 = torch.aten._make_per_tensor_quantized_tensor %1196, %1197, %1198 : !torch.vtensor<[96,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %1200 = torch.aten.dequantize.self %1199 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],f32> | |
| %int12_394 = torch.constant.int 12 | |
| %1201 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1202 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1203 = torch.aten.quantize_per_tensor %61, %1201, %1202, %int12_394 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1204 = torch.aten.int_repr %1203 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1205 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1206 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1207 = torch.aten._make_per_tensor_quantized_tensor %1204, %1205, %1206 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1208 = torch.aten.dequantize.self %1207 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_395 = torch.constant.int 1 | |
| %int1_396 = torch.constant.int 1 | |
| %int1_397 = torch.constant.int 1 | |
| %int1_398 = torch.constant.int 1 | |
| %int1_399 = torch.constant.int 1 | |
| %int1_400 = torch.constant.int 1 | |
| %int0_401 = torch.constant.int 0 | |
| %1209 = torch.prim.ListConstruct %int1_395, %int1_396 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1210 = torch.prim.ListConstruct %int1_397, %int1_398 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1211 = torch.prim.ListConstruct %int1_399, %int1_400 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1212 = torch.prim.ListConstruct %int0_401, %int0_401 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_402 = torch.constant.bool false | |
| %int1_403 = torch.constant.int 1 | |
| %1213 = torch.aten.convolution %1192, %1200, %1208, %1211, %1209, %1210, %false_402, %1212, %int1_403 : !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[96,96,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1214 = torch.aten.relu %1213 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int3_404 = torch.constant.int 3 | |
| %int3_405 = torch.constant.int 3 | |
| %int1_406 = torch.constant.int 1 | |
| %int1_407 = torch.constant.int 1 | |
| %int1_408 = torch.constant.int 1 | |
| %int1_409 = torch.constant.int 1 | |
| %int1_410 = torch.constant.int 1 | |
| %int1_411 = torch.constant.int 1 | |
| %1215 = torch.prim.ListConstruct %int3_404, %int3_405 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1216 = torch.prim.ListConstruct %int1_406, %int1_407, %int1_408, %int1_409 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1217 = torch.prim.ListConstruct %int1_410, %int1_411 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_412 = torch.constant.bool false | |
| %false_413 = torch.constant.bool false | |
| %none_414 = torch.constant.none | |
| %1218 = torch.aten.avg_pool2d %1058, %1215, %1217, %1216, %false_412, %false_413, %none_414 : !torch.vtensor<[32,384,25,25],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,384,25,25],f32> | |
| %1219 = torch.aten.mul.Tensor %1218, %308 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_415 = torch.constant.int 12 | |
| %1220 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1221 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1222 = torch.aten.quantize_per_tensor %1219, %1220, %1221, %int12_415 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1223 = torch.aten.int_repr %1222 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1224 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1225 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1226 = torch.aten._make_per_tensor_quantized_tensor %1223, %1224, %1225 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1227 = torch.aten.dequantize.self %1226 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_416 = torch.constant.int 12 | |
| %1228 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1229 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1230 = torch.aten.quantize_per_tensor %62, %1228, %1229, %int12_416 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1231 = torch.aten.int_repr %1230 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %1232 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1233 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1234 = torch.aten._make_per_tensor_quantized_tensor %1231, %1232, %1233 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1235 = torch.aten.dequantize.self %1234 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_417 = torch.constant.int 12 | |
| %1236 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1237 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1238 = torch.aten.quantize_per_tensor %63, %1236, %1237, %int12_417 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1239 = torch.aten.int_repr %1238 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1240 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1241 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1242 = torch.aten._make_per_tensor_quantized_tensor %1239, %1240, %1241 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1243 = torch.aten.dequantize.self %1242 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_418 = torch.constant.int 0 | |
| %int0_419 = torch.constant.int 0 | |
| %int1_420 = torch.constant.int 1 | |
| %int1_421 = torch.constant.int 1 | |
| %int1_422 = torch.constant.int 1 | |
| %int1_423 = torch.constant.int 1 | |
| %int0_424 = torch.constant.int 0 | |
| %1244 = torch.prim.ListConstruct %int0_418, %int0_419 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1245 = torch.prim.ListConstruct %int1_420, %int1_421 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1246 = torch.prim.ListConstruct %int1_422, %int1_423 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1247 = torch.prim.ListConstruct %int0_424, %int0_424 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_425 = torch.constant.bool false | |
| %int1_426 = torch.constant.int 1 | |
| %1248 = torch.aten.convolution %1227, %1235, %1243, %1246, %1244, %1245, %false_425, %1247, %int1_426 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1249 = torch.aten.relu %1248 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %1250 = torch.prim.ListConstruct %1080, %1132, %1214, %1249 : (!torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>) -> !torch.list<vtensor> | |
| %int1_427 = torch.constant.int 1 | |
| %1251 = torch.aten.cat %1250, %int1_427 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_428 = torch.constant.int 12 | |
| %1252 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1253 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1254 = torch.aten.quantize_per_tensor %1251, %1252, %1253, %int12_428 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1255 = torch.aten.int_repr %1254 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1256 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1257 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1258 = torch.aten._make_per_tensor_quantized_tensor %1255, %1256, %1257 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1259 = torch.aten.dequantize.self %1258 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_429 = torch.constant.int 12 | |
| %1260 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1261 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1262 = torch.aten.quantize_per_tensor %64, %1260, %1261, %int12_429 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1263 = torch.aten.int_repr %1262 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %1264 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1265 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1266 = torch.aten._make_per_tensor_quantized_tensor %1263, %1264, %1265 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1267 = torch.aten.dequantize.self %1266 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_430 = torch.constant.int 12 | |
| %1268 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1269 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1270 = torch.aten.quantize_per_tensor %65, %1268, %1269, %int12_430 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1271 = torch.aten.int_repr %1270 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1272 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1273 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1274 = torch.aten._make_per_tensor_quantized_tensor %1271, %1272, %1273 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1275 = torch.aten.dequantize.self %1274 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_431 = torch.constant.int 0 | |
| %int0_432 = torch.constant.int 0 | |
| %int1_433 = torch.constant.int 1 | |
| %int1_434 = torch.constant.int 1 | |
| %int1_435 = torch.constant.int 1 | |
| %int1_436 = torch.constant.int 1 | |
| %int0_437 = torch.constant.int 0 | |
| %1276 = torch.prim.ListConstruct %int0_431, %int0_432 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1277 = torch.prim.ListConstruct %int1_433, %int1_434 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1278 = torch.prim.ListConstruct %int1_435, %int1_436 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1279 = torch.prim.ListConstruct %int0_437, %int0_437 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_438 = torch.constant.bool false | |
| %int1_439 = torch.constant.int 1 | |
| %1280 = torch.aten.convolution %1259, %1267, %1275, %1278, %1276, %1277, %false_438, %1279, %int1_439 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1281 = torch.aten.relu %1280 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_440 = torch.constant.int 12 | |
| %1282 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1283 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1284 = torch.aten.quantize_per_tensor %66, %1282, %1283, %int12_440 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1285 = torch.aten.int_repr %1284 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %1286 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1287 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1288 = torch.aten._make_per_tensor_quantized_tensor %1285, %1286, %1287 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1289 = torch.aten.dequantize.self %1288 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_441 = torch.constant.int 12 | |
| %1290 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1291 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1292 = torch.aten.quantize_per_tensor %67, %1290, %1291, %int12_441 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1293 = torch.aten.int_repr %1292 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %1294 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1295 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1296 = torch.aten._make_per_tensor_quantized_tensor %1293, %1294, %1295 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1297 = torch.aten.dequantize.self %1296 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_442 = torch.constant.int 0 | |
| %int0_443 = torch.constant.int 0 | |
| %int1_444 = torch.constant.int 1 | |
| %int1_445 = torch.constant.int 1 | |
| %int1_446 = torch.constant.int 1 | |
| %int1_447 = torch.constant.int 1 | |
| %int0_448 = torch.constant.int 0 | |
| %1298 = torch.prim.ListConstruct %int0_442, %int0_443 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1299 = torch.prim.ListConstruct %int1_444, %int1_445 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1300 = torch.prim.ListConstruct %int1_446, %int1_447 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1301 = torch.prim.ListConstruct %int0_448, %int0_448 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_449 = torch.constant.bool false | |
| %int1_450 = torch.constant.int 1 | |
| %1302 = torch.aten.convolution %1259, %1289, %1297, %1300, %1298, %1299, %false_449, %1301, %int1_450 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %1303 = torch.aten.relu %1302 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_451 = torch.constant.int 12 | |
| %1304 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1305 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1306 = torch.aten.quantize_per_tensor %1303, %1304, %1305, %int12_451 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1307 = torch.aten.int_repr %1306 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %1308 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1309 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1310 = torch.aten._make_per_tensor_quantized_tensor %1307, %1308, %1309 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1311 = torch.aten.dequantize.self %1310 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_452 = torch.constant.int 12 | |
| %1312 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1313 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1314 = torch.aten.quantize_per_tensor %68, %1312, %1313, %int12_452 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1315 = torch.aten.int_repr %1314 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %1316 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1317 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1318 = torch.aten._make_per_tensor_quantized_tensor %1315, %1316, %1317 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1319 = torch.aten.dequantize.self %1318 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_453 = torch.constant.int 12 | |
| %1320 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1321 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1322 = torch.aten.quantize_per_tensor %69, %1320, %1321, %int12_453 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1323 = torch.aten.int_repr %1322 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1324 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1325 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1326 = torch.aten._make_per_tensor_quantized_tensor %1323, %1324, %1325 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1327 = torch.aten.dequantize.self %1326 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_454 = torch.constant.int 1 | |
| %int1_455 = torch.constant.int 1 | |
| %int1_456 = torch.constant.int 1 | |
| %int1_457 = torch.constant.int 1 | |
| %int1_458 = torch.constant.int 1 | |
| %int1_459 = torch.constant.int 1 | |
| %int0_460 = torch.constant.int 0 | |
| %1328 = torch.prim.ListConstruct %int1_454, %int1_455 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1329 = torch.prim.ListConstruct %int1_456, %int1_457 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1330 = torch.prim.ListConstruct %int1_458, %int1_459 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1331 = torch.prim.ListConstruct %int0_460, %int0_460 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_461 = torch.constant.bool false | |
| %int1_462 = torch.constant.int 1 | |
| %1332 = torch.aten.convolution %1311, %1319, %1327, %1330, %1328, %1329, %false_461, %1331, %int1_462 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1333 = torch.aten.relu %1332 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_463 = torch.constant.int 12 | |
| %1334 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1335 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1336 = torch.aten.quantize_per_tensor %70, %1334, %1335, %int12_463 : !torch.vtensor<[64,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1337 = torch.aten.int_repr %1336 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],si8> | |
| %1338 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1339 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1340 = torch.aten._make_per_tensor_quantized_tensor %1337, %1338, %1339 : !torch.vtensor<[64,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[64,384,1,1],!torch.qint8> | |
| %1341 = torch.aten.dequantize.self %1340 : !torch.vtensor<[64,384,1,1],!torch.qint8> -> !torch.vtensor<[64,384,1,1],f32> | |
| %int12_464 = torch.constant.int 12 | |
| %1342 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1343 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1344 = torch.aten.quantize_per_tensor %71, %1342, %1343, %int12_464 : !torch.vtensor<[64],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1345 = torch.aten.int_repr %1344 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],si8> | |
| %1346 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1347 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1348 = torch.aten._make_per_tensor_quantized_tensor %1345, %1346, %1347 : !torch.vtensor<[64],si8>, !torch.float, !torch.int -> !torch.vtensor<[64],!torch.qint8> | |
| %1349 = torch.aten.dequantize.self %1348 : !torch.vtensor<[64],!torch.qint8> -> !torch.vtensor<[64],f32> | |
| %int0_465 = torch.constant.int 0 | |
| %int0_466 = torch.constant.int 0 | |
| %int1_467 = torch.constant.int 1 | |
| %int1_468 = torch.constant.int 1 | |
| %int1_469 = torch.constant.int 1 | |
| %int1_470 = torch.constant.int 1 | |
| %int0_471 = torch.constant.int 0 | |
| %1350 = torch.prim.ListConstruct %int0_465, %int0_466 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1351 = torch.prim.ListConstruct %int1_467, %int1_468 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1352 = torch.prim.ListConstruct %int1_469, %int1_470 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1353 = torch.prim.ListConstruct %int0_471, %int0_471 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_472 = torch.constant.bool false | |
| %int1_473 = torch.constant.int 1 | |
| %1354 = torch.aten.convolution %1259, %1341, %1349, %1352, %1350, %1351, %false_472, %1353, %int1_473 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[64,384,1,1],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,64,25,25],f32> | |
| %1355 = torch.aten.relu %1354 : !torch.vtensor<[32,64,25,25],f32> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_474 = torch.constant.int 12 | |
| %1356 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1357 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1358 = torch.aten.quantize_per_tensor %1355, %1356, %1357, %int12_474 : !torch.vtensor<[32,64,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1359 = torch.aten.int_repr %1358 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],si8> | |
| %1360 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1361 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1362 = torch.aten._make_per_tensor_quantized_tensor %1359, %1360, %1361 : !torch.vtensor<[32,64,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,64,25,25],!torch.qint8> | |
| %1363 = torch.aten.dequantize.self %1362 : !torch.vtensor<[32,64,25,25],!torch.qint8> -> !torch.vtensor<[32,64,25,25],f32> | |
| %int12_475 = torch.constant.int 12 | |
| %1364 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1365 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1366 = torch.aten.quantize_per_tensor %72, %1364, %1365, %int12_475 : !torch.vtensor<[96,64,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1367 = torch.aten.int_repr %1366 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],si8> | |
| %1368 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1369 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1370 = torch.aten._make_per_tensor_quantized_tensor %1367, %1368, %1369 : !torch.vtensor<[96,64,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,64,3,3],!torch.qint8> | |
| %1371 = torch.aten.dequantize.self %1370 : !torch.vtensor<[96,64,3,3],!torch.qint8> -> !torch.vtensor<[96,64,3,3],f32> | |
| %int12_476 = torch.constant.int 12 | |
| %1372 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1373 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1374 = torch.aten.quantize_per_tensor %73, %1372, %1373, %int12_476 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1375 = torch.aten.int_repr %1374 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1376 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1377 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1378 = torch.aten._make_per_tensor_quantized_tensor %1375, %1376, %1377 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1379 = torch.aten.dequantize.self %1378 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_477 = torch.constant.int 1 | |
| %int1_478 = torch.constant.int 1 | |
| %int1_479 = torch.constant.int 1 | |
| %int1_480 = torch.constant.int 1 | |
| %int1_481 = torch.constant.int 1 | |
| %int1_482 = torch.constant.int 1 | |
| %int0_483 = torch.constant.int 0 | |
| %1380 = torch.prim.ListConstruct %int1_477, %int1_478 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1381 = torch.prim.ListConstruct %int1_479, %int1_480 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1382 = torch.prim.ListConstruct %int1_481, %int1_482 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1383 = torch.prim.ListConstruct %int0_483, %int0_483 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_484 = torch.constant.bool false | |
| %int1_485 = torch.constant.int 1 | |
| %1384 = torch.aten.convolution %1363, %1371, %1379, %1382, %1380, %1381, %false_484, %1383, %int1_485 : !torch.vtensor<[32,64,25,25],f32>, !torch.vtensor<[96,64,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1385 = torch.aten.relu %1384 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_486 = torch.constant.int 12 | |
| %1386 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1387 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1388 = torch.aten.quantize_per_tensor %1385, %1386, %1387, %int12_486 : !torch.vtensor<[32,96,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %1389 = torch.aten.int_repr %1388 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],si8> | |
| %1390 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1391 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1392 = torch.aten._make_per_tensor_quantized_tensor %1389, %1390, %1391 : !torch.vtensor<[32,96,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,96,25,25],!torch.qint8> | |
| %1393 = torch.aten.dequantize.self %1392 : !torch.vtensor<[32,96,25,25],!torch.qint8> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int12_487 = torch.constant.int 12 | |
| %1394 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1395 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1396 = torch.aten.quantize_per_tensor %74, %1394, %1395, %int12_487 : !torch.vtensor<[96,96,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %1397 = torch.aten.int_repr %1396 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],si8> | |
| %1398 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1399 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1400 = torch.aten._make_per_tensor_quantized_tensor %1397, %1398, %1399 : !torch.vtensor<[96,96,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,96,3,3],!torch.qint8> | |
| %1401 = torch.aten.dequantize.self %1400 : !torch.vtensor<[96,96,3,3],!torch.qint8> -> !torch.vtensor<[96,96,3,3],f32> | |
| %int12_488 = torch.constant.int 12 | |
| %1402 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1403 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1404 = torch.aten.quantize_per_tensor %75, %1402, %1403, %int12_488 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1405 = torch.aten.int_repr %1404 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1406 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1407 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1408 = torch.aten._make_per_tensor_quantized_tensor %1405, %1406, %1407 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1409 = torch.aten.dequantize.self %1408 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int1_489 = torch.constant.int 1 | |
| %int1_490 = torch.constant.int 1 | |
| %int1_491 = torch.constant.int 1 | |
| %int1_492 = torch.constant.int 1 | |
| %int1_493 = torch.constant.int 1 | |
| %int1_494 = torch.constant.int 1 | |
| %int0_495 = torch.constant.int 0 | |
| %1410 = torch.prim.ListConstruct %int1_489, %int1_490 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1411 = torch.prim.ListConstruct %int1_491, %int1_492 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1412 = torch.prim.ListConstruct %int1_493, %int1_494 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1413 = torch.prim.ListConstruct %int0_495, %int0_495 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_496 = torch.constant.bool false | |
| %int1_497 = torch.constant.int 1 | |
| %1414 = torch.aten.convolution %1393, %1401, %1409, %1412, %1410, %1411, %false_496, %1413, %int1_497 : !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[96,96,3,3],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1415 = torch.aten.relu %1414 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %int3_498 = torch.constant.int 3 | |
| %int3_499 = torch.constant.int 3 | |
| %int1_500 = torch.constant.int 1 | |
| %int1_501 = torch.constant.int 1 | |
| %int1_502 = torch.constant.int 1 | |
| %int1_503 = torch.constant.int 1 | |
| %int1_504 = torch.constant.int 1 | |
| %int1_505 = torch.constant.int 1 | |
| %1416 = torch.prim.ListConstruct %int3_498, %int3_499 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1417 = torch.prim.ListConstruct %int1_500, %int1_501, %int1_502, %int1_503 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1418 = torch.prim.ListConstruct %int1_504, %int1_505 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_506 = torch.constant.bool false | |
| %false_507 = torch.constant.bool false | |
| %none_508 = torch.constant.none | |
| %1419 = torch.aten.avg_pool2d %1259, %1416, %1418, %1417, %false_506, %false_507, %none_508 : !torch.vtensor<[32,384,25,25],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,384,25,25],f32> | |
| %1420 = torch.aten.mul.Tensor %1419, %308 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_509 = torch.constant.int 12 | |
| %1421 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1422 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1423 = torch.aten.quantize_per_tensor %1420, %1421, %1422, %int12_509 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1424 = torch.aten.int_repr %1423 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1425 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1426 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1427 = torch.aten._make_per_tensor_quantized_tensor %1424, %1425, %1426 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1428 = torch.aten.dequantize.self %1427 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_510 = torch.constant.int 12 | |
| %1429 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1430 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1431 = torch.aten.quantize_per_tensor %76, %1429, %1430, %int12_510 : !torch.vtensor<[96,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1432 = torch.aten.int_repr %1431 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],si8> | |
| %1433 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1434 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1435 = torch.aten._make_per_tensor_quantized_tensor %1432, %1433, %1434 : !torch.vtensor<[96,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[96,384,1,1],!torch.qint8> | |
| %1436 = torch.aten.dequantize.self %1435 : !torch.vtensor<[96,384,1,1],!torch.qint8> -> !torch.vtensor<[96,384,1,1],f32> | |
| %int12_511 = torch.constant.int 12 | |
| %1437 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1438 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1439 = torch.aten.quantize_per_tensor %77, %1437, %1438, %int12_511 : !torch.vtensor<[96],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1440 = torch.aten.int_repr %1439 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],si8> | |
| %1441 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1442 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1443 = torch.aten._make_per_tensor_quantized_tensor %1440, %1441, %1442 : !torch.vtensor<[96],si8>, !torch.float, !torch.int -> !torch.vtensor<[96],!torch.qint8> | |
| %1444 = torch.aten.dequantize.self %1443 : !torch.vtensor<[96],!torch.qint8> -> !torch.vtensor<[96],f32> | |
| %int0_512 = torch.constant.int 0 | |
| %int0_513 = torch.constant.int 0 | |
| %int1_514 = torch.constant.int 1 | |
| %int1_515 = torch.constant.int 1 | |
| %int1_516 = torch.constant.int 1 | |
| %int1_517 = torch.constant.int 1 | |
| %int0_518 = torch.constant.int 0 | |
| %1445 = torch.prim.ListConstruct %int0_512, %int0_513 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1446 = torch.prim.ListConstruct %int1_514, %int1_515 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1447 = torch.prim.ListConstruct %int1_516, %int1_517 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1448 = torch.prim.ListConstruct %int0_518, %int0_518 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_519 = torch.constant.bool false | |
| %int1_520 = torch.constant.int 1 | |
| %1449 = torch.aten.convolution %1428, %1436, %1444, %1447, %1445, %1446, %false_519, %1448, %int1_520 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[96,384,1,1],f32>, !torch.vtensor<[96],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,96,25,25],f32> | |
| %1450 = torch.aten.relu %1449 : !torch.vtensor<[32,96,25,25],f32> -> !torch.vtensor<[32,96,25,25],f32> | |
| %1451 = torch.prim.ListConstruct %1281, %1333, %1415, %1450 : (!torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>, !torch.vtensor<[32,96,25,25],f32>) -> !torch.list<vtensor> | |
| %int1_521 = torch.constant.int 1 | |
| %1452 = torch.aten.cat %1451, %int1_521 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_522 = torch.constant.int 12 | |
| %1453 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1454 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1455 = torch.aten.quantize_per_tensor %1452, %1453, %1454, %int12_522 : !torch.vtensor<[32,384,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1456 = torch.aten.int_repr %1455 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],si8> | |
| %1457 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1458 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1459 = torch.aten._make_per_tensor_quantized_tensor %1456, %1457, %1458 : !torch.vtensor<[32,384,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,25,25],!torch.qint8> | |
| %1460 = torch.aten.dequantize.self %1459 : !torch.vtensor<[32,384,25,25],!torch.qint8> -> !torch.vtensor<[32,384,25,25],f32> | |
| %int12_523 = torch.constant.int 12 | |
| %1461 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1462 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1463 = torch.aten.quantize_per_tensor %78, %1461, %1462, %int12_523 : !torch.vtensor<[384,384,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,384,3,3],!torch.qint8> | |
| %1464 = torch.aten.int_repr %1463 : !torch.vtensor<[384,384,3,3],!torch.qint8> -> !torch.vtensor<[384,384,3,3],si8> | |
| %1465 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1466 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1467 = torch.aten._make_per_tensor_quantized_tensor %1464, %1465, %1466 : !torch.vtensor<[384,384,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,384,3,3],!torch.qint8> | |
| %1468 = torch.aten.dequantize.self %1467 : !torch.vtensor<[384,384,3,3],!torch.qint8> -> !torch.vtensor<[384,384,3,3],f32> | |
| %int12_524 = torch.constant.int 12 | |
| %1469 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1470 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1471 = torch.aten.quantize_per_tensor %79, %1469, %1470, %int12_524 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1472 = torch.aten.int_repr %1471 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %1473 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1474 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1475 = torch.aten._make_per_tensor_quantized_tensor %1472, %1473, %1474 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1476 = torch.aten.dequantize.self %1475 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_525 = torch.constant.int 0 | |
| %int0_526 = torch.constant.int 0 | |
| %int1_527 = torch.constant.int 1 | |
| %int1_528 = torch.constant.int 1 | |
| %int2_529 = torch.constant.int 2 | |
| %int2_530 = torch.constant.int 2 | |
| %int0_531 = torch.constant.int 0 | |
| %1477 = torch.prim.ListConstruct %int0_525, %int0_526 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1478 = torch.prim.ListConstruct %int1_527, %int1_528 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1479 = torch.prim.ListConstruct %int2_529, %int2_530 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1480 = torch.prim.ListConstruct %int0_531, %int0_531 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_532 = torch.constant.bool false | |
| %int1_533 = torch.constant.int 1 | |
| %1481 = torch.aten.convolution %1460, %1468, %1476, %1479, %1477, %1478, %false_532, %1480, %int1_533 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[384,384,3,3],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %1482 = torch.aten.relu %1481 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_534 = torch.constant.int 12 | |
| %1483 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1484 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1485 = torch.aten.quantize_per_tensor %80, %1483, %1484, %int12_534 : !torch.vtensor<[192,384,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,384,1,1],!torch.qint8> | |
| %1486 = torch.aten.int_repr %1485 : !torch.vtensor<[192,384,1,1],!torch.qint8> -> !torch.vtensor<[192,384,1,1],si8> | |
| %1487 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1488 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1489 = torch.aten._make_per_tensor_quantized_tensor %1486, %1487, %1488 : !torch.vtensor<[192,384,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,384,1,1],!torch.qint8> | |
| %1490 = torch.aten.dequantize.self %1489 : !torch.vtensor<[192,384,1,1],!torch.qint8> -> !torch.vtensor<[192,384,1,1],f32> | |
| %int12_535 = torch.constant.int 12 | |
| %1491 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1492 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1493 = torch.aten.quantize_per_tensor %81, %1491, %1492, %int12_535 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1494 = torch.aten.int_repr %1493 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1495 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1496 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1497 = torch.aten._make_per_tensor_quantized_tensor %1494, %1495, %1496 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1498 = torch.aten.dequantize.self %1497 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_536 = torch.constant.int 0 | |
| %int0_537 = torch.constant.int 0 | |
| %int1_538 = torch.constant.int 1 | |
| %int1_539 = torch.constant.int 1 | |
| %int1_540 = torch.constant.int 1 | |
| %int1_541 = torch.constant.int 1 | |
| %int0_542 = torch.constant.int 0 | |
| %1499 = torch.prim.ListConstruct %int0_536, %int0_537 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1500 = torch.prim.ListConstruct %int1_538, %int1_539 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1501 = torch.prim.ListConstruct %int1_540, %int1_541 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1502 = torch.prim.ListConstruct %int0_542, %int0_542 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_543 = torch.constant.bool false | |
| %int1_544 = torch.constant.int 1 | |
| %1503 = torch.aten.convolution %1460, %1490, %1498, %1501, %1499, %1500, %false_543, %1502, %int1_544 : !torch.vtensor<[32,384,25,25],f32>, !torch.vtensor<[192,384,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,25,25],f32> | |
| %1504 = torch.aten.relu %1503 : !torch.vtensor<[32,192,25,25],f32> -> !torch.vtensor<[32,192,25,25],f32> | |
| %int12_545 = torch.constant.int 12 | |
| %1505 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1506 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1507 = torch.aten.quantize_per_tensor %1504, %1505, %1506, %int12_545 : !torch.vtensor<[32,192,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,25,25],!torch.qint8> | |
| %1508 = torch.aten.int_repr %1507 : !torch.vtensor<[32,192,25,25],!torch.qint8> -> !torch.vtensor<[32,192,25,25],si8> | |
| %1509 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1510 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1511 = torch.aten._make_per_tensor_quantized_tensor %1508, %1509, %1510 : !torch.vtensor<[32,192,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,25,25],!torch.qint8> | |
| %1512 = torch.aten.dequantize.self %1511 : !torch.vtensor<[32,192,25,25],!torch.qint8> -> !torch.vtensor<[32,192,25,25],f32> | |
| %int12_546 = torch.constant.int 12 | |
| %1513 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1514 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1515 = torch.aten.quantize_per_tensor %82, %1513, %1514, %int12_546 : !torch.vtensor<[224,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,3,3],!torch.qint8> | |
| %1516 = torch.aten.int_repr %1515 : !torch.vtensor<[224,192,3,3],!torch.qint8> -> !torch.vtensor<[224,192,3,3],si8> | |
| %1517 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1518 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1519 = torch.aten._make_per_tensor_quantized_tensor %1516, %1517, %1518 : !torch.vtensor<[224,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,3,3],!torch.qint8> | |
| %1520 = torch.aten.dequantize.self %1519 : !torch.vtensor<[224,192,3,3],!torch.qint8> -> !torch.vtensor<[224,192,3,3],f32> | |
| %int12_547 = torch.constant.int 12 | |
| %1521 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1522 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1523 = torch.aten.quantize_per_tensor %83, %1521, %1522, %int12_547 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1524 = torch.aten.int_repr %1523 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %1525 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1526 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1527 = torch.aten._make_per_tensor_quantized_tensor %1524, %1525, %1526 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1528 = torch.aten.dequantize.self %1527 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int1_548 = torch.constant.int 1 | |
| %int1_549 = torch.constant.int 1 | |
| %int1_550 = torch.constant.int 1 | |
| %int1_551 = torch.constant.int 1 | |
| %int1_552 = torch.constant.int 1 | |
| %int1_553 = torch.constant.int 1 | |
| %int0_554 = torch.constant.int 0 | |
| %1529 = torch.prim.ListConstruct %int1_548, %int1_549 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1530 = torch.prim.ListConstruct %int1_550, %int1_551 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1531 = torch.prim.ListConstruct %int1_552, %int1_553 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1532 = torch.prim.ListConstruct %int0_554, %int0_554 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_555 = torch.constant.bool false | |
| %int1_556 = torch.constant.int 1 | |
| %1533 = torch.aten.convolution %1512, %1520, %1528, %1531, %1529, %1530, %false_555, %1532, %int1_556 : !torch.vtensor<[32,192,25,25],f32>, !torch.vtensor<[224,192,3,3],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,25,25],f32> | |
| %1534 = torch.aten.relu %1533 : !torch.vtensor<[32,224,25,25],f32> -> !torch.vtensor<[32,224,25,25],f32> | |
| %int12_557 = torch.constant.int 12 | |
| %1535 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1536 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1537 = torch.aten.quantize_per_tensor %1534, %1535, %1536, %int12_557 : !torch.vtensor<[32,224,25,25],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,25,25],!torch.qint8> | |
| %1538 = torch.aten.int_repr %1537 : !torch.vtensor<[32,224,25,25],!torch.qint8> -> !torch.vtensor<[32,224,25,25],si8> | |
| %1539 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1540 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1541 = torch.aten._make_per_tensor_quantized_tensor %1538, %1539, %1540 : !torch.vtensor<[32,224,25,25],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,25,25],!torch.qint8> | |
| %1542 = torch.aten.dequantize.self %1541 : !torch.vtensor<[32,224,25,25],!torch.qint8> -> !torch.vtensor<[32,224,25,25],f32> | |
| %int12_558 = torch.constant.int 12 | |
| %1543 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1544 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1545 = torch.aten.quantize_per_tensor %84, %1543, %1544, %int12_558 : !torch.vtensor<[256,224,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,3,3],!torch.qint8> | |
| %1546 = torch.aten.int_repr %1545 : !torch.vtensor<[256,224,3,3],!torch.qint8> -> !torch.vtensor<[256,224,3,3],si8> | |
| %1547 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1548 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1549 = torch.aten._make_per_tensor_quantized_tensor %1546, %1547, %1548 : !torch.vtensor<[256,224,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,3,3],!torch.qint8> | |
| %1550 = torch.aten.dequantize.self %1549 : !torch.vtensor<[256,224,3,3],!torch.qint8> -> !torch.vtensor<[256,224,3,3],f32> | |
| %int12_559 = torch.constant.int 12 | |
| %1551 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1552 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1553 = torch.aten.quantize_per_tensor %85, %1551, %1552, %int12_559 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1554 = torch.aten.int_repr %1553 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %1555 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1556 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1557 = torch.aten._make_per_tensor_quantized_tensor %1554, %1555, %1556 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1558 = torch.aten.dequantize.self %1557 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_560 = torch.constant.int 0 | |
| %int0_561 = torch.constant.int 0 | |
| %int1_562 = torch.constant.int 1 | |
| %int1_563 = torch.constant.int 1 | |
| %int2_564 = torch.constant.int 2 | |
| %int2_565 = torch.constant.int 2 | |
| %int0_566 = torch.constant.int 0 | |
| %1559 = torch.prim.ListConstruct %int0_560, %int0_561 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1560 = torch.prim.ListConstruct %int1_562, %int1_563 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1561 = torch.prim.ListConstruct %int2_564, %int2_565 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1562 = torch.prim.ListConstruct %int0_566, %int0_566 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_567 = torch.constant.bool false | |
| %int1_568 = torch.constant.int 1 | |
| %1563 = torch.aten.convolution %1542, %1550, %1558, %1561, %1559, %1560, %false_567, %1562, %int1_568 : !torch.vtensor<[32,224,25,25],f32>, !torch.vtensor<[256,224,3,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %1564 = torch.aten.relu %1563 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_569 = torch.constant.int 3 | |
| %int3_570 = torch.constant.int 3 | |
| %1565 = torch.prim.ListConstruct %int3_569, %int3_570 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int0_571 = torch.constant.int 0 | |
| %int0_572 = torch.constant.int 0 | |
| %1566 = torch.prim.ListConstruct %int0_571, %int0_572 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int2_573 = torch.constant.int 2 | |
| %int2_574 = torch.constant.int 2 | |
| %1567 = torch.prim.ListConstruct %int2_573, %int2_574 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int1_575 = torch.constant.int 1 | |
| %int1_576 = torch.constant.int 1 | |
| %1568 = torch.prim.ListConstruct %int1_575, %int1_576 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_577 = torch.constant.bool false | |
| %1569 = torch.aten.max_pool2d %1460, %1565, %1567, %1566, %1568, %false_577 : !torch.vtensor<[32,384,25,25],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,384,12,12],f32> | |
| %1570 = torch.prim.ListConstruct %1482, %1564, %1569 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,384,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_578 = torch.constant.int 1 | |
| %1571 = torch.aten.cat %1570, %int1_578 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_579 = torch.constant.int 12 | |
| %1572 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1573 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1574 = torch.aten.quantize_per_tensor %1571, %1572, %1573, %int12_579 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1575 = torch.aten.int_repr %1574 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %1576 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1577 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1578 = torch.aten._make_per_tensor_quantized_tensor %1575, %1576, %1577 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1579 = torch.aten.dequantize.self %1578 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_580 = torch.constant.int 12 | |
| %1580 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1581 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1582 = torch.aten.quantize_per_tensor %86, %1580, %1581, %int12_580 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %1583 = torch.aten.int_repr %1582 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %1584 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1585 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1586 = torch.aten._make_per_tensor_quantized_tensor %1583, %1584, %1585 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %1587 = torch.aten.dequantize.self %1586 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_581 = torch.constant.int 12 | |
| %1588 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1589 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1590 = torch.aten.quantize_per_tensor %87, %1588, %1589, %int12_581 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1591 = torch.aten.int_repr %1590 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %1592 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1593 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1594 = torch.aten._make_per_tensor_quantized_tensor %1591, %1592, %1593 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1595 = torch.aten.dequantize.self %1594 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_582 = torch.constant.int 0 | |
| %int0_583 = torch.constant.int 0 | |
| %int1_584 = torch.constant.int 1 | |
| %int1_585 = torch.constant.int 1 | |
| %int1_586 = torch.constant.int 1 | |
| %int1_587 = torch.constant.int 1 | |
| %int0_588 = torch.constant.int 0 | |
| %1596 = torch.prim.ListConstruct %int0_582, %int0_583 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1597 = torch.prim.ListConstruct %int1_584, %int1_585 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1598 = torch.prim.ListConstruct %int1_586, %int1_587 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1599 = torch.prim.ListConstruct %int0_588, %int0_588 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_589 = torch.constant.bool false | |
| %int1_590 = torch.constant.int 1 | |
| %1600 = torch.aten.convolution %1579, %1587, %1595, %1598, %1596, %1597, %false_589, %1599, %int1_590 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %1601 = torch.aten.relu %1600 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_591 = torch.constant.int 12 | |
| %1602 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1603 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1604 = torch.aten.quantize_per_tensor %88, %1602, %1603, %int12_591 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1605 = torch.aten.int_repr %1604 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %1606 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1607 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1608 = torch.aten._make_per_tensor_quantized_tensor %1605, %1606, %1607 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1609 = torch.aten.dequantize.self %1608 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_592 = torch.constant.int 12 | |
| %1610 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1611 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1612 = torch.aten.quantize_per_tensor %89, %1610, %1611, %int12_592 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1613 = torch.aten.int_repr %1612 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1614 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1615 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1616 = torch.aten._make_per_tensor_quantized_tensor %1613, %1614, %1615 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1617 = torch.aten.dequantize.self %1616 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_593 = torch.constant.int 0 | |
| %int0_594 = torch.constant.int 0 | |
| %int1_595 = torch.constant.int 1 | |
| %int1_596 = torch.constant.int 1 | |
| %int1_597 = torch.constant.int 1 | |
| %int1_598 = torch.constant.int 1 | |
| %int0_599 = torch.constant.int 0 | |
| %1618 = torch.prim.ListConstruct %int0_593, %int0_594 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1619 = torch.prim.ListConstruct %int1_595, %int1_596 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1620 = torch.prim.ListConstruct %int1_597, %int1_598 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1621 = torch.prim.ListConstruct %int0_599, %int0_599 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_600 = torch.constant.bool false | |
| %int1_601 = torch.constant.int 1 | |
| %1622 = torch.aten.convolution %1579, %1609, %1617, %1620, %1618, %1619, %false_600, %1621, %int1_601 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %1623 = torch.aten.relu %1622 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_602 = torch.constant.int 12 | |
| %1624 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1625 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1626 = torch.aten.quantize_per_tensor %1623, %1624, %1625, %int12_602 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1627 = torch.aten.int_repr %1626 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %1628 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1629 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1630 = torch.aten._make_per_tensor_quantized_tensor %1627, %1628, %1629 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1631 = torch.aten.dequantize.self %1630 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_603 = torch.constant.int 12 | |
| %1632 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1633 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1634 = torch.aten.quantize_per_tensor %90, %1632, %1633, %int12_603 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1635 = torch.aten.int_repr %1634 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %1636 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1637 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1638 = torch.aten._make_per_tensor_quantized_tensor %1635, %1636, %1637 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1639 = torch.aten.dequantize.self %1638 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_604 = torch.constant.int 12 | |
| %1640 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1641 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1642 = torch.aten.quantize_per_tensor %91, %1640, %1641, %int12_604 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1643 = torch.aten.int_repr %1642 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %1644 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1645 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1646 = torch.aten._make_per_tensor_quantized_tensor %1643, %1644, %1645 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1647 = torch.aten.dequantize.self %1646 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_605 = torch.constant.int 0 | |
| %int3_606 = torch.constant.int 3 | |
| %int1_607 = torch.constant.int 1 | |
| %int1_608 = torch.constant.int 1 | |
| %int1_609 = torch.constant.int 1 | |
| %int1_610 = torch.constant.int 1 | |
| %int0_611 = torch.constant.int 0 | |
| %1648 = torch.prim.ListConstruct %int0_605, %int3_606 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1649 = torch.prim.ListConstruct %int1_607, %int1_608 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1650 = torch.prim.ListConstruct %int1_609, %int1_610 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1651 = torch.prim.ListConstruct %int0_611, %int0_611 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_612 = torch.constant.bool false | |
| %int1_613 = torch.constant.int 1 | |
| %1652 = torch.aten.convolution %1631, %1639, %1647, %1650, %1648, %1649, %false_612, %1651, %int1_613 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %1653 = torch.aten.relu %1652 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_614 = torch.constant.int 12 | |
| %1654 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1655 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1656 = torch.aten.quantize_per_tensor %1653, %1654, %1655, %int12_614 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1657 = torch.aten.int_repr %1656 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %1658 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1659 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1660 = torch.aten._make_per_tensor_quantized_tensor %1657, %1658, %1659 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1661 = torch.aten.dequantize.self %1660 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_615 = torch.constant.int 12 | |
| %1662 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1663 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1664 = torch.aten.quantize_per_tensor %92, %1662, %1663, %int12_615 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %1665 = torch.aten.int_repr %1664 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %1666 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1667 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1668 = torch.aten._make_per_tensor_quantized_tensor %1665, %1666, %1667 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %1669 = torch.aten.dequantize.self %1668 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_616 = torch.constant.int 12 | |
| %1670 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1671 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1672 = torch.aten.quantize_per_tensor %93, %1670, %1671, %int12_616 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1673 = torch.aten.int_repr %1672 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %1674 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1675 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1676 = torch.aten._make_per_tensor_quantized_tensor %1673, %1674, %1675 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1677 = torch.aten.dequantize.self %1676 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_617 = torch.constant.int 3 | |
| %int0_618 = torch.constant.int 0 | |
| %int1_619 = torch.constant.int 1 | |
| %int1_620 = torch.constant.int 1 | |
| %int1_621 = torch.constant.int 1 | |
| %int1_622 = torch.constant.int 1 | |
| %int0_623 = torch.constant.int 0 | |
| %1678 = torch.prim.ListConstruct %int3_617, %int0_618 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1679 = torch.prim.ListConstruct %int1_619, %int1_620 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1680 = torch.prim.ListConstruct %int1_621, %int1_622 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1681 = torch.prim.ListConstruct %int0_623, %int0_623 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_624 = torch.constant.bool false | |
| %int1_625 = torch.constant.int 1 | |
| %1682 = torch.aten.convolution %1661, %1669, %1677, %1680, %1678, %1679, %false_624, %1681, %int1_625 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %1683 = torch.aten.relu %1682 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_626 = torch.constant.int 12 | |
| %1684 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1685 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1686 = torch.aten.quantize_per_tensor %94, %1684, %1685, %int12_626 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1687 = torch.aten.int_repr %1686 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %1688 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1689 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1690 = torch.aten._make_per_tensor_quantized_tensor %1687, %1688, %1689 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1691 = torch.aten.dequantize.self %1690 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_627 = torch.constant.int 12 | |
| %1692 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1693 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1694 = torch.aten.quantize_per_tensor %95, %1692, %1693, %int12_627 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1695 = torch.aten.int_repr %1694 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1696 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1697 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1698 = torch.aten._make_per_tensor_quantized_tensor %1695, %1696, %1697 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1699 = torch.aten.dequantize.self %1698 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_628 = torch.constant.int 0 | |
| %int0_629 = torch.constant.int 0 | |
| %int1_630 = torch.constant.int 1 | |
| %int1_631 = torch.constant.int 1 | |
| %int1_632 = torch.constant.int 1 | |
| %int1_633 = torch.constant.int 1 | |
| %int0_634 = torch.constant.int 0 | |
| %1700 = torch.prim.ListConstruct %int0_628, %int0_629 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1701 = torch.prim.ListConstruct %int1_630, %int1_631 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1702 = torch.prim.ListConstruct %int1_632, %int1_633 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1703 = torch.prim.ListConstruct %int0_634, %int0_634 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_635 = torch.constant.bool false | |
| %int1_636 = torch.constant.int 1 | |
| %1704 = torch.aten.convolution %1579, %1691, %1699, %1702, %1700, %1701, %false_635, %1703, %int1_636 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %1705 = torch.aten.relu %1704 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_637 = torch.constant.int 12 | |
| %1706 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1707 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1708 = torch.aten.quantize_per_tensor %1705, %1706, %1707, %int12_637 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1709 = torch.aten.int_repr %1708 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %1710 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1711 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1712 = torch.aten._make_per_tensor_quantized_tensor %1709, %1710, %1711 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1713 = torch.aten.dequantize.self %1712 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_638 = torch.constant.int 12 | |
| %1714 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1715 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1716 = torch.aten.quantize_per_tensor %96, %1714, %1715, %int12_638 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %1717 = torch.aten.int_repr %1716 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %1718 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1719 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1720 = torch.aten._make_per_tensor_quantized_tensor %1717, %1718, %1719 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %1721 = torch.aten.dequantize.self %1720 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_639 = torch.constant.int 12 | |
| %1722 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1723 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1724 = torch.aten.quantize_per_tensor %97, %1722, %1723, %int12_639 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1725 = torch.aten.int_repr %1724 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1726 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1727 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1728 = torch.aten._make_per_tensor_quantized_tensor %1725, %1726, %1727 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1729 = torch.aten.dequantize.self %1728 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_640 = torch.constant.int 3 | |
| %int0_641 = torch.constant.int 0 | |
| %int1_642 = torch.constant.int 1 | |
| %int1_643 = torch.constant.int 1 | |
| %int1_644 = torch.constant.int 1 | |
| %int1_645 = torch.constant.int 1 | |
| %int0_646 = torch.constant.int 0 | |
| %1730 = torch.prim.ListConstruct %int3_640, %int0_641 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1731 = torch.prim.ListConstruct %int1_642, %int1_643 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1732 = torch.prim.ListConstruct %int1_644, %int1_645 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1733 = torch.prim.ListConstruct %int0_646, %int0_646 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_647 = torch.constant.bool false | |
| %int1_648 = torch.constant.int 1 | |
| %1734 = torch.aten.convolution %1713, %1721, %1729, %1732, %1730, %1731, %false_647, %1733, %int1_648 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %1735 = torch.aten.relu %1734 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_649 = torch.constant.int 12 | |
| %1736 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1737 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1738 = torch.aten.quantize_per_tensor %1735, %1736, %1737, %int12_649 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1739 = torch.aten.int_repr %1738 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %1740 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1741 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1742 = torch.aten._make_per_tensor_quantized_tensor %1739, %1740, %1741 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1743 = torch.aten.dequantize.self %1742 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_650 = torch.constant.int 12 | |
| %1744 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1745 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1746 = torch.aten.quantize_per_tensor %98, %1744, %1745, %int12_650 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1747 = torch.aten.int_repr %1746 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %1748 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1749 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1750 = torch.aten._make_per_tensor_quantized_tensor %1747, %1748, %1749 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1751 = torch.aten.dequantize.self %1750 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_651 = torch.constant.int 12 | |
| %1752 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1753 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1754 = torch.aten.quantize_per_tensor %99, %1752, %1753, %int12_651 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1755 = torch.aten.int_repr %1754 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %1756 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1757 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1758 = torch.aten._make_per_tensor_quantized_tensor %1755, %1756, %1757 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1759 = torch.aten.dequantize.self %1758 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_652 = torch.constant.int 0 | |
| %int3_653 = torch.constant.int 3 | |
| %int1_654 = torch.constant.int 1 | |
| %int1_655 = torch.constant.int 1 | |
| %int1_656 = torch.constant.int 1 | |
| %int1_657 = torch.constant.int 1 | |
| %int0_658 = torch.constant.int 0 | |
| %1760 = torch.prim.ListConstruct %int0_652, %int3_653 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1761 = torch.prim.ListConstruct %int1_654, %int1_655 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1762 = torch.prim.ListConstruct %int1_656, %int1_657 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1763 = torch.prim.ListConstruct %int0_658, %int0_658 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_659 = torch.constant.bool false | |
| %int1_660 = torch.constant.int 1 | |
| %1764 = torch.aten.convolution %1743, %1751, %1759, %1762, %1760, %1761, %false_659, %1763, %int1_660 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %1765 = torch.aten.relu %1764 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_661 = torch.constant.int 12 | |
| %1766 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1767 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1768 = torch.aten.quantize_per_tensor %1765, %1766, %1767, %int12_661 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1769 = torch.aten.int_repr %1768 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %1770 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1771 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1772 = torch.aten._make_per_tensor_quantized_tensor %1769, %1770, %1771 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1773 = torch.aten.dequantize.self %1772 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_662 = torch.constant.int 12 | |
| %1774 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1775 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1776 = torch.aten.quantize_per_tensor %100, %1774, %1775, %int12_662 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %1777 = torch.aten.int_repr %1776 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %1778 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1779 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1780 = torch.aten._make_per_tensor_quantized_tensor %1777, %1778, %1779 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %1781 = torch.aten.dequantize.self %1780 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_663 = torch.constant.int 12 | |
| %1782 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1783 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1784 = torch.aten.quantize_per_tensor %101, %1782, %1783, %int12_663 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1785 = torch.aten.int_repr %1784 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %1786 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1787 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1788 = torch.aten._make_per_tensor_quantized_tensor %1785, %1786, %1787 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1789 = torch.aten.dequantize.self %1788 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_664 = torch.constant.int 3 | |
| %int0_665 = torch.constant.int 0 | |
| %int1_666 = torch.constant.int 1 | |
| %int1_667 = torch.constant.int 1 | |
| %int1_668 = torch.constant.int 1 | |
| %int1_669 = torch.constant.int 1 | |
| %int0_670 = torch.constant.int 0 | |
| %1790 = torch.prim.ListConstruct %int3_664, %int0_665 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1791 = torch.prim.ListConstruct %int1_666, %int1_667 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1792 = torch.prim.ListConstruct %int1_668, %int1_669 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1793 = torch.prim.ListConstruct %int0_670, %int0_670 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_671 = torch.constant.bool false | |
| %int1_672 = torch.constant.int 1 | |
| %1794 = torch.aten.convolution %1773, %1781, %1789, %1792, %1790, %1791, %false_671, %1793, %int1_672 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %1795 = torch.aten.relu %1794 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_673 = torch.constant.int 12 | |
| %1796 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1797 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1798 = torch.aten.quantize_per_tensor %1795, %1796, %1797, %int12_673 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1799 = torch.aten.int_repr %1798 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %1800 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1801 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1802 = torch.aten._make_per_tensor_quantized_tensor %1799, %1800, %1801 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1803 = torch.aten.dequantize.self %1802 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_674 = torch.constant.int 12 | |
| %1804 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1805 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1806 = torch.aten.quantize_per_tensor %102, %1804, %1805, %int12_674 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %1807 = torch.aten.int_repr %1806 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %1808 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1809 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1810 = torch.aten._make_per_tensor_quantized_tensor %1807, %1808, %1809 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %1811 = torch.aten.dequantize.self %1810 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_675 = torch.constant.int 12 | |
| %1812 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1813 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1814 = torch.aten.quantize_per_tensor %103, %1812, %1813, %int12_675 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1815 = torch.aten.int_repr %1814 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %1816 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1817 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1818 = torch.aten._make_per_tensor_quantized_tensor %1815, %1816, %1817 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1819 = torch.aten.dequantize.self %1818 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_676 = torch.constant.int 0 | |
| %int3_677 = torch.constant.int 3 | |
| %int1_678 = torch.constant.int 1 | |
| %int1_679 = torch.constant.int 1 | |
| %int1_680 = torch.constant.int 1 | |
| %int1_681 = torch.constant.int 1 | |
| %int0_682 = torch.constant.int 0 | |
| %1820 = torch.prim.ListConstruct %int0_676, %int3_677 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1821 = torch.prim.ListConstruct %int1_678, %int1_679 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1822 = torch.prim.ListConstruct %int1_680, %int1_681 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1823 = torch.prim.ListConstruct %int0_682, %int0_682 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_683 = torch.constant.bool false | |
| %int1_684 = torch.constant.int 1 | |
| %1824 = torch.aten.convolution %1803, %1811, %1819, %1822, %1820, %1821, %false_683, %1823, %int1_684 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %1825 = torch.aten.relu %1824 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_685 = torch.constant.int 3 | |
| %int3_686 = torch.constant.int 3 | |
| %int1_687 = torch.constant.int 1 | |
| %int1_688 = torch.constant.int 1 | |
| %int1_689 = torch.constant.int 1 | |
| %int1_690 = torch.constant.int 1 | |
| %int1_691 = torch.constant.int 1 | |
| %int1_692 = torch.constant.int 1 | |
| %1826 = torch.prim.ListConstruct %int3_685, %int3_686 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1827 = torch.prim.ListConstruct %int1_687, %int1_688, %int1_689, %int1_690 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1828 = torch.prim.ListConstruct %int1_691, %int1_692 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_693 = torch.constant.bool false | |
| %false_694 = torch.constant.bool false | |
| %none_695 = torch.constant.none | |
| %1829 = torch.aten.avg_pool2d %1579, %1826, %1828, %1827, %false_693, %false_694, %none_695 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %1830 = torch.aten.mul.Tensor %1829, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_696 = torch.constant.int 12 | |
| %1831 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1832 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1833 = torch.aten.quantize_per_tensor %1830, %1831, %1832, %int12_696 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1834 = torch.aten.int_repr %1833 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %1835 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1836 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1837 = torch.aten._make_per_tensor_quantized_tensor %1834, %1835, %1836 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1838 = torch.aten.dequantize.self %1837 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_697 = torch.constant.int 12 | |
| %1839 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1840 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1841 = torch.aten.quantize_per_tensor %104, %1839, %1840, %int12_697 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %1842 = torch.aten.int_repr %1841 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %1843 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1844 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1845 = torch.aten._make_per_tensor_quantized_tensor %1842, %1843, %1844 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %1846 = torch.aten.dequantize.self %1845 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_698 = torch.constant.int 12 | |
| %1847 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1848 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1849 = torch.aten.quantize_per_tensor %105, %1847, %1848, %int12_698 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %1850 = torch.aten.int_repr %1849 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %1851 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1852 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1853 = torch.aten._make_per_tensor_quantized_tensor %1850, %1851, %1852 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %1854 = torch.aten.dequantize.self %1853 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_699 = torch.constant.int 0 | |
| %int0_700 = torch.constant.int 0 | |
| %int1_701 = torch.constant.int 1 | |
| %int1_702 = torch.constant.int 1 | |
| %int1_703 = torch.constant.int 1 | |
| %int1_704 = torch.constant.int 1 | |
| %int0_705 = torch.constant.int 0 | |
| %1855 = torch.prim.ListConstruct %int0_699, %int0_700 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1856 = torch.prim.ListConstruct %int1_701, %int1_702 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1857 = torch.prim.ListConstruct %int1_703, %int1_704 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1858 = torch.prim.ListConstruct %int0_705, %int0_705 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_706 = torch.constant.bool false | |
| %int1_707 = torch.constant.int 1 | |
| %1859 = torch.aten.convolution %1838, %1846, %1854, %1857, %1855, %1856, %false_706, %1858, %int1_707 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %1860 = torch.aten.relu %1859 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %1861 = torch.prim.ListConstruct %1601, %1683, %1825, %1860 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_708 = torch.constant.int 1 | |
| %1862 = torch.aten.cat %1861, %int1_708 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_709 = torch.constant.int 12 | |
| %1863 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1864 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1865 = torch.aten.quantize_per_tensor %1862, %1863, %1864, %int12_709 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1866 = torch.aten.int_repr %1865 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %1867 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %1868 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1869 = torch.aten._make_per_tensor_quantized_tensor %1866, %1867, %1868 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %1870 = torch.aten.dequantize.self %1869 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_710 = torch.constant.int 12 | |
| %1871 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1872 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1873 = torch.aten.quantize_per_tensor %106, %1871, %1872, %int12_710 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %1874 = torch.aten.int_repr %1873 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %1875 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1876 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1877 = torch.aten._make_per_tensor_quantized_tensor %1874, %1875, %1876 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %1878 = torch.aten.dequantize.self %1877 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_711 = torch.constant.int 12 | |
| %1879 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1880 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1881 = torch.aten.quantize_per_tensor %107, %1879, %1880, %int12_711 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1882 = torch.aten.int_repr %1881 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %1883 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1884 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1885 = torch.aten._make_per_tensor_quantized_tensor %1882, %1883, %1884 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %1886 = torch.aten.dequantize.self %1885 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_712 = torch.constant.int 0 | |
| %int0_713 = torch.constant.int 0 | |
| %int1_714 = torch.constant.int 1 | |
| %int1_715 = torch.constant.int 1 | |
| %int1_716 = torch.constant.int 1 | |
| %int1_717 = torch.constant.int 1 | |
| %int0_718 = torch.constant.int 0 | |
| %1887 = torch.prim.ListConstruct %int0_712, %int0_713 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1888 = torch.prim.ListConstruct %int1_714, %int1_715 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1889 = torch.prim.ListConstruct %int1_716, %int1_717 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1890 = torch.prim.ListConstruct %int0_718, %int0_718 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_719 = torch.constant.bool false | |
| %int1_720 = torch.constant.int 1 | |
| %1891 = torch.aten.convolution %1870, %1878, %1886, %1889, %1887, %1888, %false_719, %1890, %int1_720 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %1892 = torch.aten.relu %1891 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_721 = torch.constant.int 12 | |
| %1893 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1894 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1895 = torch.aten.quantize_per_tensor %108, %1893, %1894, %int12_721 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1896 = torch.aten.int_repr %1895 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %1897 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1898 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1899 = torch.aten._make_per_tensor_quantized_tensor %1896, %1897, %1898 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1900 = torch.aten.dequantize.self %1899 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_722 = torch.constant.int 12 | |
| %1901 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1902 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1903 = torch.aten.quantize_per_tensor %109, %1901, %1902, %int12_722 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1904 = torch.aten.int_repr %1903 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1905 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %1906 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1907 = torch.aten._make_per_tensor_quantized_tensor %1904, %1905, %1906 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1908 = torch.aten.dequantize.self %1907 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_723 = torch.constant.int 0 | |
| %int0_724 = torch.constant.int 0 | |
| %int1_725 = torch.constant.int 1 | |
| %int1_726 = torch.constant.int 1 | |
| %int1_727 = torch.constant.int 1 | |
| %int1_728 = torch.constant.int 1 | |
| %int0_729 = torch.constant.int 0 | |
| %1909 = torch.prim.ListConstruct %int0_723, %int0_724 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1910 = torch.prim.ListConstruct %int1_725, %int1_726 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1911 = torch.prim.ListConstruct %int1_727, %int1_728 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1912 = torch.prim.ListConstruct %int0_729, %int0_729 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_730 = torch.constant.bool false | |
| %int1_731 = torch.constant.int 1 | |
| %1913 = torch.aten.convolution %1870, %1900, %1908, %1911, %1909, %1910, %false_730, %1912, %int1_731 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %1914 = torch.aten.relu %1913 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_732 = torch.constant.int 12 | |
| %1915 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1916 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1917 = torch.aten.quantize_per_tensor %1914, %1915, %1916, %int12_732 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1918 = torch.aten.int_repr %1917 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %1919 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1920 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1921 = torch.aten._make_per_tensor_quantized_tensor %1918, %1919, %1920 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %1922 = torch.aten.dequantize.self %1921 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_733 = torch.constant.int 12 | |
| %1923 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1924 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1925 = torch.aten.quantize_per_tensor %110, %1923, %1924, %int12_733 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1926 = torch.aten.int_repr %1925 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %1927 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1928 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1929 = torch.aten._make_per_tensor_quantized_tensor %1926, %1927, %1928 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %1930 = torch.aten.dequantize.self %1929 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_734 = torch.constant.int 12 | |
| %1931 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1932 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1933 = torch.aten.quantize_per_tensor %111, %1931, %1932, %int12_734 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1934 = torch.aten.int_repr %1933 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %1935 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1936 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1937 = torch.aten._make_per_tensor_quantized_tensor %1934, %1935, %1936 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %1938 = torch.aten.dequantize.self %1937 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_735 = torch.constant.int 0 | |
| %int3_736 = torch.constant.int 3 | |
| %int1_737 = torch.constant.int 1 | |
| %int1_738 = torch.constant.int 1 | |
| %int1_739 = torch.constant.int 1 | |
| %int1_740 = torch.constant.int 1 | |
| %int0_741 = torch.constant.int 0 | |
| %1939 = torch.prim.ListConstruct %int0_735, %int3_736 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1940 = torch.prim.ListConstruct %int1_737, %int1_738 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1941 = torch.prim.ListConstruct %int1_739, %int1_740 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1942 = torch.prim.ListConstruct %int0_741, %int0_741 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_742 = torch.constant.bool false | |
| %int1_743 = torch.constant.int 1 | |
| %1943 = torch.aten.convolution %1922, %1930, %1938, %1941, %1939, %1940, %false_742, %1942, %int1_743 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %1944 = torch.aten.relu %1943 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_744 = torch.constant.int 12 | |
| %1945 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1946 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1947 = torch.aten.quantize_per_tensor %1944, %1945, %1946, %int12_744 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1948 = torch.aten.int_repr %1947 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %1949 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1950 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1951 = torch.aten._make_per_tensor_quantized_tensor %1948, %1949, %1950 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %1952 = torch.aten.dequantize.self %1951 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_745 = torch.constant.int 12 | |
| %1953 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1954 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1955 = torch.aten.quantize_per_tensor %112, %1953, %1954, %int12_745 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %1956 = torch.aten.int_repr %1955 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %1957 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %1958 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1959 = torch.aten._make_per_tensor_quantized_tensor %1956, %1957, %1958 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %1960 = torch.aten.dequantize.self %1959 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_746 = torch.constant.int 12 | |
| %1961 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1962 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1963 = torch.aten.quantize_per_tensor %113, %1961, %1962, %int12_746 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1964 = torch.aten.int_repr %1963 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %1965 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1966 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1967 = torch.aten._make_per_tensor_quantized_tensor %1964, %1965, %1966 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %1968 = torch.aten.dequantize.self %1967 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_747 = torch.constant.int 3 | |
| %int0_748 = torch.constant.int 0 | |
| %int1_749 = torch.constant.int 1 | |
| %int1_750 = torch.constant.int 1 | |
| %int1_751 = torch.constant.int 1 | |
| %int1_752 = torch.constant.int 1 | |
| %int0_753 = torch.constant.int 0 | |
| %1969 = torch.prim.ListConstruct %int3_747, %int0_748 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1970 = torch.prim.ListConstruct %int1_749, %int1_750 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1971 = torch.prim.ListConstruct %int1_751, %int1_752 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1972 = torch.prim.ListConstruct %int0_753, %int0_753 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_754 = torch.constant.bool false | |
| %int1_755 = torch.constant.int 1 | |
| %1973 = torch.aten.convolution %1952, %1960, %1968, %1971, %1969, %1970, %false_754, %1972, %int1_755 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %1974 = torch.aten.relu %1973 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_756 = torch.constant.int 12 | |
| %1975 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1976 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1977 = torch.aten.quantize_per_tensor %114, %1975, %1976, %int12_756 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1978 = torch.aten.int_repr %1977 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %1979 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %1980 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1981 = torch.aten._make_per_tensor_quantized_tensor %1978, %1979, %1980 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %1982 = torch.aten.dequantize.self %1981 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_757 = torch.constant.int 12 | |
| %1983 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1984 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1985 = torch.aten.quantize_per_tensor %115, %1983, %1984, %int12_757 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1986 = torch.aten.int_repr %1985 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %1987 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %1988 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1989 = torch.aten._make_per_tensor_quantized_tensor %1986, %1987, %1988 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %1990 = torch.aten.dequantize.self %1989 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_758 = torch.constant.int 0 | |
| %int0_759 = torch.constant.int 0 | |
| %int1_760 = torch.constant.int 1 | |
| %int1_761 = torch.constant.int 1 | |
| %int1_762 = torch.constant.int 1 | |
| %int1_763 = torch.constant.int 1 | |
| %int0_764 = torch.constant.int 0 | |
| %1991 = torch.prim.ListConstruct %int0_758, %int0_759 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1992 = torch.prim.ListConstruct %int1_760, %int1_761 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1993 = torch.prim.ListConstruct %int1_762, %int1_763 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %1994 = torch.prim.ListConstruct %int0_764, %int0_764 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_765 = torch.constant.bool false | |
| %int1_766 = torch.constant.int 1 | |
| %1995 = torch.aten.convolution %1870, %1982, %1990, %1993, %1991, %1992, %false_765, %1994, %int1_766 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %1996 = torch.aten.relu %1995 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_767 = torch.constant.int 12 | |
| %1997 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %1998 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %1999 = torch.aten.quantize_per_tensor %1996, %1997, %1998, %int12_767 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2000 = torch.aten.int_repr %1999 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2001 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2002 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2003 = torch.aten._make_per_tensor_quantized_tensor %2000, %2001, %2002 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2004 = torch.aten.dequantize.self %2003 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_768 = torch.constant.int 12 | |
| %2005 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2006 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2007 = torch.aten.quantize_per_tensor %116, %2005, %2006, %int12_768 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2008 = torch.aten.int_repr %2007 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %2009 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2010 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2011 = torch.aten._make_per_tensor_quantized_tensor %2008, %2009, %2010 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2012 = torch.aten.dequantize.self %2011 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_769 = torch.constant.int 12 | |
| %2013 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2014 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2015 = torch.aten.quantize_per_tensor %117, %2013, %2014, %int12_769 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2016 = torch.aten.int_repr %2015 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2017 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2018 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2019 = torch.aten._make_per_tensor_quantized_tensor %2016, %2017, %2018 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2020 = torch.aten.dequantize.self %2019 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_770 = torch.constant.int 3 | |
| %int0_771 = torch.constant.int 0 | |
| %int1_772 = torch.constant.int 1 | |
| %int1_773 = torch.constant.int 1 | |
| %int1_774 = torch.constant.int 1 | |
| %int1_775 = torch.constant.int 1 | |
| %int0_776 = torch.constant.int 0 | |
| %2021 = torch.prim.ListConstruct %int3_770, %int0_771 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2022 = torch.prim.ListConstruct %int1_772, %int1_773 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2023 = torch.prim.ListConstruct %int1_774, %int1_775 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2024 = torch.prim.ListConstruct %int0_776, %int0_776 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_777 = torch.constant.bool false | |
| %int1_778 = torch.constant.int 1 | |
| %2025 = torch.aten.convolution %2004, %2012, %2020, %2023, %2021, %2022, %false_777, %2024, %int1_778 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2026 = torch.aten.relu %2025 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_779 = torch.constant.int 12 | |
| %2027 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2028 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2029 = torch.aten.quantize_per_tensor %2026, %2027, %2028, %int12_779 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2030 = torch.aten.int_repr %2029 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2031 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2032 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2033 = torch.aten._make_per_tensor_quantized_tensor %2030, %2031, %2032 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2034 = torch.aten.dequantize.self %2033 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_780 = torch.constant.int 12 | |
| %2035 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2036 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2037 = torch.aten.quantize_per_tensor %118, %2035, %2036, %int12_780 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2038 = torch.aten.int_repr %2037 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2039 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2040 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2041 = torch.aten._make_per_tensor_quantized_tensor %2038, %2039, %2040 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2042 = torch.aten.dequantize.self %2041 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_781 = torch.constant.int 12 | |
| %2043 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2044 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2045 = torch.aten.quantize_per_tensor %119, %2043, %2044, %int12_781 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2046 = torch.aten.int_repr %2045 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2047 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2048 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2049 = torch.aten._make_per_tensor_quantized_tensor %2046, %2047, %2048 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2050 = torch.aten.dequantize.self %2049 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_782 = torch.constant.int 0 | |
| %int3_783 = torch.constant.int 3 | |
| %int1_784 = torch.constant.int 1 | |
| %int1_785 = torch.constant.int 1 | |
| %int1_786 = torch.constant.int 1 | |
| %int1_787 = torch.constant.int 1 | |
| %int0_788 = torch.constant.int 0 | |
| %2051 = torch.prim.ListConstruct %int0_782, %int3_783 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2052 = torch.prim.ListConstruct %int1_784, %int1_785 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2053 = torch.prim.ListConstruct %int1_786, %int1_787 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2054 = torch.prim.ListConstruct %int0_788, %int0_788 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_789 = torch.constant.bool false | |
| %int1_790 = torch.constant.int 1 | |
| %2055 = torch.aten.convolution %2034, %2042, %2050, %2053, %2051, %2052, %false_789, %2054, %int1_790 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2056 = torch.aten.relu %2055 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_791 = torch.constant.int 12 | |
| %2057 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2058 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2059 = torch.aten.quantize_per_tensor %2056, %2057, %2058, %int12_791 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2060 = torch.aten.int_repr %2059 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2061 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2062 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2063 = torch.aten._make_per_tensor_quantized_tensor %2060, %2061, %2062 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2064 = torch.aten.dequantize.self %2063 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_792 = torch.constant.int 12 | |
| %2065 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2066 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2067 = torch.aten.quantize_per_tensor %120, %2065, %2066, %int12_792 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2068 = torch.aten.int_repr %2067 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %2069 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2070 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2071 = torch.aten._make_per_tensor_quantized_tensor %2068, %2069, %2070 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2072 = torch.aten.dequantize.self %2071 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_793 = torch.constant.int 12 | |
| %2073 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2074 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2075 = torch.aten.quantize_per_tensor %121, %2073, %2074, %int12_793 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2076 = torch.aten.int_repr %2075 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2077 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2078 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2079 = torch.aten._make_per_tensor_quantized_tensor %2076, %2077, %2078 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2080 = torch.aten.dequantize.self %2079 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_794 = torch.constant.int 3 | |
| %int0_795 = torch.constant.int 0 | |
| %int1_796 = torch.constant.int 1 | |
| %int1_797 = torch.constant.int 1 | |
| %int1_798 = torch.constant.int 1 | |
| %int1_799 = torch.constant.int 1 | |
| %int0_800 = torch.constant.int 0 | |
| %2081 = torch.prim.ListConstruct %int3_794, %int0_795 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2082 = torch.prim.ListConstruct %int1_796, %int1_797 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2083 = torch.prim.ListConstruct %int1_798, %int1_799 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2084 = torch.prim.ListConstruct %int0_800, %int0_800 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_801 = torch.constant.bool false | |
| %int1_802 = torch.constant.int 1 | |
| %2085 = torch.aten.convolution %2064, %2072, %2080, %2083, %2081, %2082, %false_801, %2084, %int1_802 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2086 = torch.aten.relu %2085 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_803 = torch.constant.int 12 | |
| %2087 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2088 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2089 = torch.aten.quantize_per_tensor %2086, %2087, %2088, %int12_803 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2090 = torch.aten.int_repr %2089 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2091 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2092 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2093 = torch.aten._make_per_tensor_quantized_tensor %2090, %2091, %2092 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2094 = torch.aten.dequantize.self %2093 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_804 = torch.constant.int 12 | |
| %2095 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2096 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2097 = torch.aten.quantize_per_tensor %122, %2095, %2096, %int12_804 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2098 = torch.aten.int_repr %2097 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %2099 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2100 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2101 = torch.aten._make_per_tensor_quantized_tensor %2098, %2099, %2100 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2102 = torch.aten.dequantize.self %2101 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_805 = torch.constant.int 12 | |
| %2103 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2104 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2105 = torch.aten.quantize_per_tensor %123, %2103, %2104, %int12_805 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2106 = torch.aten.int_repr %2105 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2107 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2108 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2109 = torch.aten._make_per_tensor_quantized_tensor %2106, %2107, %2108 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2110 = torch.aten.dequantize.self %2109 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_806 = torch.constant.int 0 | |
| %int3_807 = torch.constant.int 3 | |
| %int1_808 = torch.constant.int 1 | |
| %int1_809 = torch.constant.int 1 | |
| %int1_810 = torch.constant.int 1 | |
| %int1_811 = torch.constant.int 1 | |
| %int0_812 = torch.constant.int 0 | |
| %2111 = torch.prim.ListConstruct %int0_806, %int3_807 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2112 = torch.prim.ListConstruct %int1_808, %int1_809 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2113 = torch.prim.ListConstruct %int1_810, %int1_811 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2114 = torch.prim.ListConstruct %int0_812, %int0_812 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_813 = torch.constant.bool false | |
| %int1_814 = torch.constant.int 1 | |
| %2115 = torch.aten.convolution %2094, %2102, %2110, %2113, %2111, %2112, %false_813, %2114, %int1_814 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2116 = torch.aten.relu %2115 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_815 = torch.constant.int 3 | |
| %int3_816 = torch.constant.int 3 | |
| %int1_817 = torch.constant.int 1 | |
| %int1_818 = torch.constant.int 1 | |
| %int1_819 = torch.constant.int 1 | |
| %int1_820 = torch.constant.int 1 | |
| %int1_821 = torch.constant.int 1 | |
| %int1_822 = torch.constant.int 1 | |
| %2117 = torch.prim.ListConstruct %int3_815, %int3_816 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2118 = torch.prim.ListConstruct %int1_817, %int1_818, %int1_819, %int1_820 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2119 = torch.prim.ListConstruct %int1_821, %int1_822 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_823 = torch.constant.bool false | |
| %false_824 = torch.constant.bool false | |
| %none_825 = torch.constant.none | |
| %2120 = torch.aten.avg_pool2d %1870, %2117, %2119, %2118, %false_823, %false_824, %none_825 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %2121 = torch.aten.mul.Tensor %2120, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_826 = torch.constant.int 12 | |
| %2122 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2123 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2124 = torch.aten.quantize_per_tensor %2121, %2122, %2123, %int12_826 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2125 = torch.aten.int_repr %2124 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2126 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2127 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2128 = torch.aten._make_per_tensor_quantized_tensor %2125, %2126, %2127 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2129 = torch.aten.dequantize.self %2128 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_827 = torch.constant.int 12 | |
| %2130 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2131 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2132 = torch.aten.quantize_per_tensor %124, %2130, %2131, %int12_827 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2133 = torch.aten.int_repr %2132 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %2134 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2135 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2136 = torch.aten._make_per_tensor_quantized_tensor %2133, %2134, %2135 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2137 = torch.aten.dequantize.self %2136 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_828 = torch.constant.int 12 | |
| %2138 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2139 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2140 = torch.aten.quantize_per_tensor %125, %2138, %2139, %int12_828 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2141 = torch.aten.int_repr %2140 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %2142 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2143 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2144 = torch.aten._make_per_tensor_quantized_tensor %2141, %2142, %2143 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2145 = torch.aten.dequantize.self %2144 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_829 = torch.constant.int 0 | |
| %int0_830 = torch.constant.int 0 | |
| %int1_831 = torch.constant.int 1 | |
| %int1_832 = torch.constant.int 1 | |
| %int1_833 = torch.constant.int 1 | |
| %int1_834 = torch.constant.int 1 | |
| %int0_835 = torch.constant.int 0 | |
| %2146 = torch.prim.ListConstruct %int0_829, %int0_830 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2147 = torch.prim.ListConstruct %int1_831, %int1_832 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2148 = torch.prim.ListConstruct %int1_833, %int1_834 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2149 = torch.prim.ListConstruct %int0_835, %int0_835 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_836 = torch.constant.bool false | |
| %int1_837 = torch.constant.int 1 | |
| %2150 = torch.aten.convolution %2129, %2137, %2145, %2148, %2146, %2147, %false_836, %2149, %int1_837 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %2151 = torch.aten.relu %2150 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %2152 = torch.prim.ListConstruct %1892, %1974, %2116, %2151 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_838 = torch.constant.int 1 | |
| %2153 = torch.aten.cat %2152, %int1_838 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_839 = torch.constant.int 12 | |
| %2154 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2155 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2156 = torch.aten.quantize_per_tensor %2153, %2154, %2155, %int12_839 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2157 = torch.aten.int_repr %2156 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2158 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2159 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2160 = torch.aten._make_per_tensor_quantized_tensor %2157, %2158, %2159 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2161 = torch.aten.dequantize.self %2160 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_840 = torch.constant.int 12 | |
| %2162 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2163 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2164 = torch.aten.quantize_per_tensor %126, %2162, %2163, %int12_840 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2165 = torch.aten.int_repr %2164 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %2166 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2167 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2168 = torch.aten._make_per_tensor_quantized_tensor %2165, %2166, %2167 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2169 = torch.aten.dequantize.self %2168 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_841 = torch.constant.int 12 | |
| %2170 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2171 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2172 = torch.aten.quantize_per_tensor %127, %2170, %2171, %int12_841 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2173 = torch.aten.int_repr %2172 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %2174 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2175 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2176 = torch.aten._make_per_tensor_quantized_tensor %2173, %2174, %2175 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2177 = torch.aten.dequantize.self %2176 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_842 = torch.constant.int 0 | |
| %int0_843 = torch.constant.int 0 | |
| %int1_844 = torch.constant.int 1 | |
| %int1_845 = torch.constant.int 1 | |
| %int1_846 = torch.constant.int 1 | |
| %int1_847 = torch.constant.int 1 | |
| %int0_848 = torch.constant.int 0 | |
| %2178 = torch.prim.ListConstruct %int0_842, %int0_843 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2179 = torch.prim.ListConstruct %int1_844, %int1_845 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2180 = torch.prim.ListConstruct %int1_846, %int1_847 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2181 = torch.prim.ListConstruct %int0_848, %int0_848 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_849 = torch.constant.bool false | |
| %int1_850 = torch.constant.int 1 | |
| %2182 = torch.aten.convolution %2161, %2169, %2177, %2180, %2178, %2179, %false_849, %2181, %int1_850 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %2183 = torch.aten.relu %2182 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_851 = torch.constant.int 12 | |
| %2184 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2185 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2186 = torch.aten.quantize_per_tensor %128, %2184, %2185, %int12_851 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2187 = torch.aten.int_repr %2186 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2188 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2189 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2190 = torch.aten._make_per_tensor_quantized_tensor %2187, %2188, %2189 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2191 = torch.aten.dequantize.self %2190 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_852 = torch.constant.int 12 | |
| %2192 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2193 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2194 = torch.aten.quantize_per_tensor %129, %2192, %2193, %int12_852 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2195 = torch.aten.int_repr %2194 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2196 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2197 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2198 = torch.aten._make_per_tensor_quantized_tensor %2195, %2196, %2197 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2199 = torch.aten.dequantize.self %2198 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_853 = torch.constant.int 0 | |
| %int0_854 = torch.constant.int 0 | |
| %int1_855 = torch.constant.int 1 | |
| %int1_856 = torch.constant.int 1 | |
| %int1_857 = torch.constant.int 1 | |
| %int1_858 = torch.constant.int 1 | |
| %int0_859 = torch.constant.int 0 | |
| %2200 = torch.prim.ListConstruct %int0_853, %int0_854 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2201 = torch.prim.ListConstruct %int1_855, %int1_856 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2202 = torch.prim.ListConstruct %int1_857, %int1_858 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2203 = torch.prim.ListConstruct %int0_859, %int0_859 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_860 = torch.constant.bool false | |
| %int1_861 = torch.constant.int 1 | |
| %2204 = torch.aten.convolution %2161, %2191, %2199, %2202, %2200, %2201, %false_860, %2203, %int1_861 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2205 = torch.aten.relu %2204 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_862 = torch.constant.int 12 | |
| %2206 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2207 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2208 = torch.aten.quantize_per_tensor %2205, %2206, %2207, %int12_862 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2209 = torch.aten.int_repr %2208 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2210 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2211 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2212 = torch.aten._make_per_tensor_quantized_tensor %2209, %2210, %2211 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2213 = torch.aten.dequantize.self %2212 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_863 = torch.constant.int 12 | |
| %2214 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2215 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2216 = torch.aten.quantize_per_tensor %130, %2214, %2215, %int12_863 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2217 = torch.aten.int_repr %2216 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2218 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2219 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2220 = torch.aten._make_per_tensor_quantized_tensor %2217, %2218, %2219 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2221 = torch.aten.dequantize.self %2220 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_864 = torch.constant.int 12 | |
| %2222 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2223 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2224 = torch.aten.quantize_per_tensor %131, %2222, %2223, %int12_864 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2225 = torch.aten.int_repr %2224 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2226 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2227 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2228 = torch.aten._make_per_tensor_quantized_tensor %2225, %2226, %2227 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2229 = torch.aten.dequantize.self %2228 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_865 = torch.constant.int 0 | |
| %int3_866 = torch.constant.int 3 | |
| %int1_867 = torch.constant.int 1 | |
| %int1_868 = torch.constant.int 1 | |
| %int1_869 = torch.constant.int 1 | |
| %int1_870 = torch.constant.int 1 | |
| %int0_871 = torch.constant.int 0 | |
| %2230 = torch.prim.ListConstruct %int0_865, %int3_866 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2231 = torch.prim.ListConstruct %int1_867, %int1_868 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2232 = torch.prim.ListConstruct %int1_869, %int1_870 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2233 = torch.prim.ListConstruct %int0_871, %int0_871 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_872 = torch.constant.bool false | |
| %int1_873 = torch.constant.int 1 | |
| %2234 = torch.aten.convolution %2213, %2221, %2229, %2232, %2230, %2231, %false_872, %2233, %int1_873 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2235 = torch.aten.relu %2234 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_874 = torch.constant.int 12 | |
| %2236 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2237 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2238 = torch.aten.quantize_per_tensor %2235, %2236, %2237, %int12_874 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2239 = torch.aten.int_repr %2238 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2240 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2241 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2242 = torch.aten._make_per_tensor_quantized_tensor %2239, %2240, %2241 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2243 = torch.aten.dequantize.self %2242 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_875 = torch.constant.int 12 | |
| %2244 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2245 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2246 = torch.aten.quantize_per_tensor %132, %2244, %2245, %int12_875 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2247 = torch.aten.int_repr %2246 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %2248 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2249 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2250 = torch.aten._make_per_tensor_quantized_tensor %2247, %2248, %2249 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2251 = torch.aten.dequantize.self %2250 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_876 = torch.constant.int 12 | |
| %2252 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2253 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2254 = torch.aten.quantize_per_tensor %133, %2252, %2253, %int12_876 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2255 = torch.aten.int_repr %2254 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2256 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2257 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2258 = torch.aten._make_per_tensor_quantized_tensor %2255, %2256, %2257 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2259 = torch.aten.dequantize.self %2258 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_877 = torch.constant.int 3 | |
| %int0_878 = torch.constant.int 0 | |
| %int1_879 = torch.constant.int 1 | |
| %int1_880 = torch.constant.int 1 | |
| %int1_881 = torch.constant.int 1 | |
| %int1_882 = torch.constant.int 1 | |
| %int0_883 = torch.constant.int 0 | |
| %2260 = torch.prim.ListConstruct %int3_877, %int0_878 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2261 = torch.prim.ListConstruct %int1_879, %int1_880 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2262 = torch.prim.ListConstruct %int1_881, %int1_882 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2263 = torch.prim.ListConstruct %int0_883, %int0_883 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_884 = torch.constant.bool false | |
| %int1_885 = torch.constant.int 1 | |
| %2264 = torch.aten.convolution %2243, %2251, %2259, %2262, %2260, %2261, %false_884, %2263, %int1_885 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2265 = torch.aten.relu %2264 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_886 = torch.constant.int 12 | |
| %2266 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2267 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2268 = torch.aten.quantize_per_tensor %134, %2266, %2267, %int12_886 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2269 = torch.aten.int_repr %2268 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2270 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2271 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2272 = torch.aten._make_per_tensor_quantized_tensor %2269, %2270, %2271 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2273 = torch.aten.dequantize.self %2272 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_887 = torch.constant.int 12 | |
| %2274 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2275 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2276 = torch.aten.quantize_per_tensor %135, %2274, %2275, %int12_887 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2277 = torch.aten.int_repr %2276 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2278 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2279 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2280 = torch.aten._make_per_tensor_quantized_tensor %2277, %2278, %2279 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2281 = torch.aten.dequantize.self %2280 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_888 = torch.constant.int 0 | |
| %int0_889 = torch.constant.int 0 | |
| %int1_890 = torch.constant.int 1 | |
| %int1_891 = torch.constant.int 1 | |
| %int1_892 = torch.constant.int 1 | |
| %int1_893 = torch.constant.int 1 | |
| %int0_894 = torch.constant.int 0 | |
| %2282 = torch.prim.ListConstruct %int0_888, %int0_889 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2283 = torch.prim.ListConstruct %int1_890, %int1_891 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2284 = torch.prim.ListConstruct %int1_892, %int1_893 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2285 = torch.prim.ListConstruct %int0_894, %int0_894 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_895 = torch.constant.bool false | |
| %int1_896 = torch.constant.int 1 | |
| %2286 = torch.aten.convolution %2161, %2273, %2281, %2284, %2282, %2283, %false_895, %2285, %int1_896 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2287 = torch.aten.relu %2286 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_897 = torch.constant.int 12 | |
| %2288 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2289 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2290 = torch.aten.quantize_per_tensor %2287, %2288, %2289, %int12_897 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2291 = torch.aten.int_repr %2290 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2292 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2293 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2294 = torch.aten._make_per_tensor_quantized_tensor %2291, %2292, %2293 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2295 = torch.aten.dequantize.self %2294 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_898 = torch.constant.int 12 | |
| %2296 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2297 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2298 = torch.aten.quantize_per_tensor %136, %2296, %2297, %int12_898 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2299 = torch.aten.int_repr %2298 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %2300 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2301 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2302 = torch.aten._make_per_tensor_quantized_tensor %2299, %2300, %2301 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2303 = torch.aten.dequantize.self %2302 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_899 = torch.constant.int 12 | |
| %2304 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2305 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2306 = torch.aten.quantize_per_tensor %137, %2304, %2305, %int12_899 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2307 = torch.aten.int_repr %2306 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2308 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2309 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2310 = torch.aten._make_per_tensor_quantized_tensor %2307, %2308, %2309 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2311 = torch.aten.dequantize.self %2310 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_900 = torch.constant.int 3 | |
| %int0_901 = torch.constant.int 0 | |
| %int1_902 = torch.constant.int 1 | |
| %int1_903 = torch.constant.int 1 | |
| %int1_904 = torch.constant.int 1 | |
| %int1_905 = torch.constant.int 1 | |
| %int0_906 = torch.constant.int 0 | |
| %2312 = torch.prim.ListConstruct %int3_900, %int0_901 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2313 = torch.prim.ListConstruct %int1_902, %int1_903 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2314 = torch.prim.ListConstruct %int1_904, %int1_905 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2315 = torch.prim.ListConstruct %int0_906, %int0_906 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_907 = torch.constant.bool false | |
| %int1_908 = torch.constant.int 1 | |
| %2316 = torch.aten.convolution %2295, %2303, %2311, %2314, %2312, %2313, %false_907, %2315, %int1_908 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2317 = torch.aten.relu %2316 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_909 = torch.constant.int 12 | |
| %2318 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2319 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2320 = torch.aten.quantize_per_tensor %2317, %2318, %2319, %int12_909 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2321 = torch.aten.int_repr %2320 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2322 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2323 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2324 = torch.aten._make_per_tensor_quantized_tensor %2321, %2322, %2323 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2325 = torch.aten.dequantize.self %2324 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_910 = torch.constant.int 12 | |
| %2326 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2327 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2328 = torch.aten.quantize_per_tensor %138, %2326, %2327, %int12_910 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2329 = torch.aten.int_repr %2328 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2330 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2331 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2332 = torch.aten._make_per_tensor_quantized_tensor %2329, %2330, %2331 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2333 = torch.aten.dequantize.self %2332 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_911 = torch.constant.int 12 | |
| %2334 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2335 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2336 = torch.aten.quantize_per_tensor %139, %2334, %2335, %int12_911 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2337 = torch.aten.int_repr %2336 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2338 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2339 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2340 = torch.aten._make_per_tensor_quantized_tensor %2337, %2338, %2339 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2341 = torch.aten.dequantize.self %2340 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_912 = torch.constant.int 0 | |
| %int3_913 = torch.constant.int 3 | |
| %int1_914 = torch.constant.int 1 | |
| %int1_915 = torch.constant.int 1 | |
| %int1_916 = torch.constant.int 1 | |
| %int1_917 = torch.constant.int 1 | |
| %int0_918 = torch.constant.int 0 | |
| %2342 = torch.prim.ListConstruct %int0_912, %int3_913 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2343 = torch.prim.ListConstruct %int1_914, %int1_915 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2344 = torch.prim.ListConstruct %int1_916, %int1_917 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2345 = torch.prim.ListConstruct %int0_918, %int0_918 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_919 = torch.constant.bool false | |
| %int1_920 = torch.constant.int 1 | |
| %2346 = torch.aten.convolution %2325, %2333, %2341, %2344, %2342, %2343, %false_919, %2345, %int1_920 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2347 = torch.aten.relu %2346 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_921 = torch.constant.int 12 | |
| %2348 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2349 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2350 = torch.aten.quantize_per_tensor %2347, %2348, %2349, %int12_921 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2351 = torch.aten.int_repr %2350 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2352 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2353 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2354 = torch.aten._make_per_tensor_quantized_tensor %2351, %2352, %2353 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2355 = torch.aten.dequantize.self %2354 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_922 = torch.constant.int 12 | |
| %2356 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2357 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2358 = torch.aten.quantize_per_tensor %140, %2356, %2357, %int12_922 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2359 = torch.aten.int_repr %2358 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %2360 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2361 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2362 = torch.aten._make_per_tensor_quantized_tensor %2359, %2360, %2361 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2363 = torch.aten.dequantize.self %2362 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_923 = torch.constant.int 12 | |
| %2364 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2365 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2366 = torch.aten.quantize_per_tensor %141, %2364, %2365, %int12_923 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2367 = torch.aten.int_repr %2366 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2368 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2369 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2370 = torch.aten._make_per_tensor_quantized_tensor %2367, %2368, %2369 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2371 = torch.aten.dequantize.self %2370 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_924 = torch.constant.int 3 | |
| %int0_925 = torch.constant.int 0 | |
| %int1_926 = torch.constant.int 1 | |
| %int1_927 = torch.constant.int 1 | |
| %int1_928 = torch.constant.int 1 | |
| %int1_929 = torch.constant.int 1 | |
| %int0_930 = torch.constant.int 0 | |
| %2372 = torch.prim.ListConstruct %int3_924, %int0_925 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2373 = torch.prim.ListConstruct %int1_926, %int1_927 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2374 = torch.prim.ListConstruct %int1_928, %int1_929 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2375 = torch.prim.ListConstruct %int0_930, %int0_930 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_931 = torch.constant.bool false | |
| %int1_932 = torch.constant.int 1 | |
| %2376 = torch.aten.convolution %2355, %2363, %2371, %2374, %2372, %2373, %false_931, %2375, %int1_932 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2377 = torch.aten.relu %2376 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_933 = torch.constant.int 12 | |
| %2378 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2379 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2380 = torch.aten.quantize_per_tensor %2377, %2378, %2379, %int12_933 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2381 = torch.aten.int_repr %2380 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2382 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2383 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2384 = torch.aten._make_per_tensor_quantized_tensor %2381, %2382, %2383 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2385 = torch.aten.dequantize.self %2384 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_934 = torch.constant.int 12 | |
| %2386 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2387 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2388 = torch.aten.quantize_per_tensor %142, %2386, %2387, %int12_934 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2389 = torch.aten.int_repr %2388 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %2390 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2391 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2392 = torch.aten._make_per_tensor_quantized_tensor %2389, %2390, %2391 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2393 = torch.aten.dequantize.self %2392 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_935 = torch.constant.int 12 | |
| %2394 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2395 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2396 = torch.aten.quantize_per_tensor %143, %2394, %2395, %int12_935 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2397 = torch.aten.int_repr %2396 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2398 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2399 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2400 = torch.aten._make_per_tensor_quantized_tensor %2397, %2398, %2399 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2401 = torch.aten.dequantize.self %2400 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_936 = torch.constant.int 0 | |
| %int3_937 = torch.constant.int 3 | |
| %int1_938 = torch.constant.int 1 | |
| %int1_939 = torch.constant.int 1 | |
| %int1_940 = torch.constant.int 1 | |
| %int1_941 = torch.constant.int 1 | |
| %int0_942 = torch.constant.int 0 | |
| %2402 = torch.prim.ListConstruct %int0_936, %int3_937 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2403 = torch.prim.ListConstruct %int1_938, %int1_939 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2404 = torch.prim.ListConstruct %int1_940, %int1_941 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2405 = torch.prim.ListConstruct %int0_942, %int0_942 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_943 = torch.constant.bool false | |
| %int1_944 = torch.constant.int 1 | |
| %2406 = torch.aten.convolution %2385, %2393, %2401, %2404, %2402, %2403, %false_943, %2405, %int1_944 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2407 = torch.aten.relu %2406 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_945 = torch.constant.int 3 | |
| %int3_946 = torch.constant.int 3 | |
| %int1_947 = torch.constant.int 1 | |
| %int1_948 = torch.constant.int 1 | |
| %int1_949 = torch.constant.int 1 | |
| %int1_950 = torch.constant.int 1 | |
| %int1_951 = torch.constant.int 1 | |
| %int1_952 = torch.constant.int 1 | |
| %2408 = torch.prim.ListConstruct %int3_945, %int3_946 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2409 = torch.prim.ListConstruct %int1_947, %int1_948, %int1_949, %int1_950 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2410 = torch.prim.ListConstruct %int1_951, %int1_952 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_953 = torch.constant.bool false | |
| %false_954 = torch.constant.bool false | |
| %none_955 = torch.constant.none | |
| %2411 = torch.aten.avg_pool2d %2161, %2408, %2410, %2409, %false_953, %false_954, %none_955 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %2412 = torch.aten.mul.Tensor %2411, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_956 = torch.constant.int 12 | |
| %2413 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2414 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2415 = torch.aten.quantize_per_tensor %2412, %2413, %2414, %int12_956 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2416 = torch.aten.int_repr %2415 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2417 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2418 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2419 = torch.aten._make_per_tensor_quantized_tensor %2416, %2417, %2418 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2420 = torch.aten.dequantize.self %2419 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_957 = torch.constant.int 12 | |
| %2421 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2422 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2423 = torch.aten.quantize_per_tensor %144, %2421, %2422, %int12_957 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2424 = torch.aten.int_repr %2423 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %2425 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2426 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2427 = torch.aten._make_per_tensor_quantized_tensor %2424, %2425, %2426 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2428 = torch.aten.dequantize.self %2427 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_958 = torch.constant.int 12 | |
| %2429 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2430 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2431 = torch.aten.quantize_per_tensor %145, %2429, %2430, %int12_958 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2432 = torch.aten.int_repr %2431 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %2433 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2434 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2435 = torch.aten._make_per_tensor_quantized_tensor %2432, %2433, %2434 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2436 = torch.aten.dequantize.self %2435 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_959 = torch.constant.int 0 | |
| %int0_960 = torch.constant.int 0 | |
| %int1_961 = torch.constant.int 1 | |
| %int1_962 = torch.constant.int 1 | |
| %int1_963 = torch.constant.int 1 | |
| %int1_964 = torch.constant.int 1 | |
| %int0_965 = torch.constant.int 0 | |
| %2437 = torch.prim.ListConstruct %int0_959, %int0_960 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2438 = torch.prim.ListConstruct %int1_961, %int1_962 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2439 = torch.prim.ListConstruct %int1_963, %int1_964 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2440 = torch.prim.ListConstruct %int0_965, %int0_965 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_966 = torch.constant.bool false | |
| %int1_967 = torch.constant.int 1 | |
| %2441 = torch.aten.convolution %2420, %2428, %2436, %2439, %2437, %2438, %false_966, %2440, %int1_967 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %2442 = torch.aten.relu %2441 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %2443 = torch.prim.ListConstruct %2183, %2265, %2407, %2442 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_968 = torch.constant.int 1 | |
| %2444 = torch.aten.cat %2443, %int1_968 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_969 = torch.constant.int 12 | |
| %2445 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2446 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2447 = torch.aten.quantize_per_tensor %2444, %2445, %2446, %int12_969 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2448 = torch.aten.int_repr %2447 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2449 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2450 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2451 = torch.aten._make_per_tensor_quantized_tensor %2448, %2449, %2450 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2452 = torch.aten.dequantize.self %2451 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_970 = torch.constant.int 12 | |
| %2453 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2454 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2455 = torch.aten.quantize_per_tensor %146, %2453, %2454, %int12_970 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2456 = torch.aten.int_repr %2455 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %2457 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2458 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2459 = torch.aten._make_per_tensor_quantized_tensor %2456, %2457, %2458 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2460 = torch.aten.dequantize.self %2459 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_971 = torch.constant.int 12 | |
| %2461 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2462 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2463 = torch.aten.quantize_per_tensor %147, %2461, %2462, %int12_971 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2464 = torch.aten.int_repr %2463 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %2465 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2466 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2467 = torch.aten._make_per_tensor_quantized_tensor %2464, %2465, %2466 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2468 = torch.aten.dequantize.self %2467 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_972 = torch.constant.int 0 | |
| %int0_973 = torch.constant.int 0 | |
| %int1_974 = torch.constant.int 1 | |
| %int1_975 = torch.constant.int 1 | |
| %int1_976 = torch.constant.int 1 | |
| %int1_977 = torch.constant.int 1 | |
| %int0_978 = torch.constant.int 0 | |
| %2469 = torch.prim.ListConstruct %int0_972, %int0_973 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2470 = torch.prim.ListConstruct %int1_974, %int1_975 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2471 = torch.prim.ListConstruct %int1_976, %int1_977 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2472 = torch.prim.ListConstruct %int0_978, %int0_978 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_979 = torch.constant.bool false | |
| %int1_980 = torch.constant.int 1 | |
| %2473 = torch.aten.convolution %2452, %2460, %2468, %2471, %2469, %2470, %false_979, %2472, %int1_980 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %2474 = torch.aten.relu %2473 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_981 = torch.constant.int 12 | |
| %2475 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2476 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2477 = torch.aten.quantize_per_tensor %148, %2475, %2476, %int12_981 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2478 = torch.aten.int_repr %2477 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2479 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2480 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2481 = torch.aten._make_per_tensor_quantized_tensor %2478, %2479, %2480 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2482 = torch.aten.dequantize.self %2481 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_982 = torch.constant.int 12 | |
| %2483 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2484 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2485 = torch.aten.quantize_per_tensor %149, %2483, %2484, %int12_982 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2486 = torch.aten.int_repr %2485 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2487 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2488 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2489 = torch.aten._make_per_tensor_quantized_tensor %2486, %2487, %2488 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2490 = torch.aten.dequantize.self %2489 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_983 = torch.constant.int 0 | |
| %int0_984 = torch.constant.int 0 | |
| %int1_985 = torch.constant.int 1 | |
| %int1_986 = torch.constant.int 1 | |
| %int1_987 = torch.constant.int 1 | |
| %int1_988 = torch.constant.int 1 | |
| %int0_989 = torch.constant.int 0 | |
| %2491 = torch.prim.ListConstruct %int0_983, %int0_984 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2492 = torch.prim.ListConstruct %int1_985, %int1_986 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2493 = torch.prim.ListConstruct %int1_987, %int1_988 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2494 = torch.prim.ListConstruct %int0_989, %int0_989 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_990 = torch.constant.bool false | |
| %int1_991 = torch.constant.int 1 | |
| %2495 = torch.aten.convolution %2452, %2482, %2490, %2493, %2491, %2492, %false_990, %2494, %int1_991 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2496 = torch.aten.relu %2495 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_992 = torch.constant.int 12 | |
| %2497 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2498 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2499 = torch.aten.quantize_per_tensor %2496, %2497, %2498, %int12_992 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2500 = torch.aten.int_repr %2499 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2501 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2502 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2503 = torch.aten._make_per_tensor_quantized_tensor %2500, %2501, %2502 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2504 = torch.aten.dequantize.self %2503 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_993 = torch.constant.int 12 | |
| %2505 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2506 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2507 = torch.aten.quantize_per_tensor %150, %2505, %2506, %int12_993 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2508 = torch.aten.int_repr %2507 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2509 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2510 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2511 = torch.aten._make_per_tensor_quantized_tensor %2508, %2509, %2510 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2512 = torch.aten.dequantize.self %2511 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_994 = torch.constant.int 12 | |
| %2513 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2514 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2515 = torch.aten.quantize_per_tensor %151, %2513, %2514, %int12_994 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2516 = torch.aten.int_repr %2515 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2517 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2518 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2519 = torch.aten._make_per_tensor_quantized_tensor %2516, %2517, %2518 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2520 = torch.aten.dequantize.self %2519 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_995 = torch.constant.int 0 | |
| %int3_996 = torch.constant.int 3 | |
| %int1_997 = torch.constant.int 1 | |
| %int1_998 = torch.constant.int 1 | |
| %int1_999 = torch.constant.int 1 | |
| %int1_1000 = torch.constant.int 1 | |
| %int0_1001 = torch.constant.int 0 | |
| %2521 = torch.prim.ListConstruct %int0_995, %int3_996 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2522 = torch.prim.ListConstruct %int1_997, %int1_998 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2523 = torch.prim.ListConstruct %int1_999, %int1_1000 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2524 = torch.prim.ListConstruct %int0_1001, %int0_1001 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1002 = torch.constant.bool false | |
| %int1_1003 = torch.constant.int 1 | |
| %2525 = torch.aten.convolution %2504, %2512, %2520, %2523, %2521, %2522, %false_1002, %2524, %int1_1003 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2526 = torch.aten.relu %2525 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1004 = torch.constant.int 12 | |
| %2527 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2528 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2529 = torch.aten.quantize_per_tensor %2526, %2527, %2528, %int12_1004 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2530 = torch.aten.int_repr %2529 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2531 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2532 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2533 = torch.aten._make_per_tensor_quantized_tensor %2530, %2531, %2532 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2534 = torch.aten.dequantize.self %2533 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1005 = torch.constant.int 12 | |
| %2535 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2536 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2537 = torch.aten.quantize_per_tensor %152, %2535, %2536, %int12_1005 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2538 = torch.aten.int_repr %2537 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %2539 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2540 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2541 = torch.aten._make_per_tensor_quantized_tensor %2538, %2539, %2540 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2542 = torch.aten.dequantize.self %2541 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_1006 = torch.constant.int 12 | |
| %2543 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2544 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2545 = torch.aten.quantize_per_tensor %153, %2543, %2544, %int12_1006 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2546 = torch.aten.int_repr %2545 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2547 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2548 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2549 = torch.aten._make_per_tensor_quantized_tensor %2546, %2547, %2548 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2550 = torch.aten.dequantize.self %2549 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_1007 = torch.constant.int 3 | |
| %int0_1008 = torch.constant.int 0 | |
| %int1_1009 = torch.constant.int 1 | |
| %int1_1010 = torch.constant.int 1 | |
| %int1_1011 = torch.constant.int 1 | |
| %int1_1012 = torch.constant.int 1 | |
| %int0_1013 = torch.constant.int 0 | |
| %2551 = torch.prim.ListConstruct %int3_1007, %int0_1008 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2552 = torch.prim.ListConstruct %int1_1009, %int1_1010 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2553 = torch.prim.ListConstruct %int1_1011, %int1_1012 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2554 = torch.prim.ListConstruct %int0_1013, %int0_1013 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1014 = torch.constant.bool false | |
| %int1_1015 = torch.constant.int 1 | |
| %2555 = torch.aten.convolution %2534, %2542, %2550, %2553, %2551, %2552, %false_1014, %2554, %int1_1015 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2556 = torch.aten.relu %2555 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1016 = torch.constant.int 12 | |
| %2557 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2558 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2559 = torch.aten.quantize_per_tensor %154, %2557, %2558, %int12_1016 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2560 = torch.aten.int_repr %2559 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2561 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2562 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2563 = torch.aten._make_per_tensor_quantized_tensor %2560, %2561, %2562 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2564 = torch.aten.dequantize.self %2563 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1017 = torch.constant.int 12 | |
| %2565 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2566 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2567 = torch.aten.quantize_per_tensor %155, %2565, %2566, %int12_1017 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2568 = torch.aten.int_repr %2567 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2569 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2570 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2571 = torch.aten._make_per_tensor_quantized_tensor %2568, %2569, %2570 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2572 = torch.aten.dequantize.self %2571 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1018 = torch.constant.int 0 | |
| %int0_1019 = torch.constant.int 0 | |
| %int1_1020 = torch.constant.int 1 | |
| %int1_1021 = torch.constant.int 1 | |
| %int1_1022 = torch.constant.int 1 | |
| %int1_1023 = torch.constant.int 1 | |
| %int0_1024 = torch.constant.int 0 | |
| %2573 = torch.prim.ListConstruct %int0_1018, %int0_1019 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2574 = torch.prim.ListConstruct %int1_1020, %int1_1021 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2575 = torch.prim.ListConstruct %int1_1022, %int1_1023 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2576 = torch.prim.ListConstruct %int0_1024, %int0_1024 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1025 = torch.constant.bool false | |
| %int1_1026 = torch.constant.int 1 | |
| %2577 = torch.aten.convolution %2452, %2564, %2572, %2575, %2573, %2574, %false_1025, %2576, %int1_1026 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2578 = torch.aten.relu %2577 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1027 = torch.constant.int 12 | |
| %2579 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2580 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2581 = torch.aten.quantize_per_tensor %2578, %2579, %2580, %int12_1027 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2582 = torch.aten.int_repr %2581 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2583 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2584 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2585 = torch.aten._make_per_tensor_quantized_tensor %2582, %2583, %2584 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2586 = torch.aten.dequantize.self %2585 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1028 = torch.constant.int 12 | |
| %2587 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2588 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2589 = torch.aten.quantize_per_tensor %156, %2587, %2588, %int12_1028 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2590 = torch.aten.int_repr %2589 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %2591 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2592 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2593 = torch.aten._make_per_tensor_quantized_tensor %2590, %2591, %2592 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2594 = torch.aten.dequantize.self %2593 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_1029 = torch.constant.int 12 | |
| %2595 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2596 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2597 = torch.aten.quantize_per_tensor %157, %2595, %2596, %int12_1029 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2598 = torch.aten.int_repr %2597 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2599 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2600 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2601 = torch.aten._make_per_tensor_quantized_tensor %2598, %2599, %2600 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2602 = torch.aten.dequantize.self %2601 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_1030 = torch.constant.int 3 | |
| %int0_1031 = torch.constant.int 0 | |
| %int1_1032 = torch.constant.int 1 | |
| %int1_1033 = torch.constant.int 1 | |
| %int1_1034 = torch.constant.int 1 | |
| %int1_1035 = torch.constant.int 1 | |
| %int0_1036 = torch.constant.int 0 | |
| %2603 = torch.prim.ListConstruct %int3_1030, %int0_1031 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2604 = torch.prim.ListConstruct %int1_1032, %int1_1033 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2605 = torch.prim.ListConstruct %int1_1034, %int1_1035 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2606 = torch.prim.ListConstruct %int0_1036, %int0_1036 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1037 = torch.constant.bool false | |
| %int1_1038 = torch.constant.int 1 | |
| %2607 = torch.aten.convolution %2586, %2594, %2602, %2605, %2603, %2604, %false_1037, %2606, %int1_1038 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2608 = torch.aten.relu %2607 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1039 = torch.constant.int 12 | |
| %2609 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2610 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2611 = torch.aten.quantize_per_tensor %2608, %2609, %2610, %int12_1039 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2612 = torch.aten.int_repr %2611 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2613 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2614 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2615 = torch.aten._make_per_tensor_quantized_tensor %2612, %2613, %2614 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2616 = torch.aten.dequantize.self %2615 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1040 = torch.constant.int 12 | |
| %2617 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2618 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2619 = torch.aten.quantize_per_tensor %158, %2617, %2618, %int12_1040 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2620 = torch.aten.int_repr %2619 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2621 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2622 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2623 = torch.aten._make_per_tensor_quantized_tensor %2620, %2621, %2622 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2624 = torch.aten.dequantize.self %2623 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1041 = torch.constant.int 12 | |
| %2625 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2626 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2627 = torch.aten.quantize_per_tensor %159, %2625, %2626, %int12_1041 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2628 = torch.aten.int_repr %2627 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2629 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2630 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2631 = torch.aten._make_per_tensor_quantized_tensor %2628, %2629, %2630 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2632 = torch.aten.dequantize.self %2631 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1042 = torch.constant.int 0 | |
| %int3_1043 = torch.constant.int 3 | |
| %int1_1044 = torch.constant.int 1 | |
| %int1_1045 = torch.constant.int 1 | |
| %int1_1046 = torch.constant.int 1 | |
| %int1_1047 = torch.constant.int 1 | |
| %int0_1048 = torch.constant.int 0 | |
| %2633 = torch.prim.ListConstruct %int0_1042, %int3_1043 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2634 = torch.prim.ListConstruct %int1_1044, %int1_1045 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2635 = torch.prim.ListConstruct %int1_1046, %int1_1047 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2636 = torch.prim.ListConstruct %int0_1048, %int0_1048 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1049 = torch.constant.bool false | |
| %int1_1050 = torch.constant.int 1 | |
| %2637 = torch.aten.convolution %2616, %2624, %2632, %2635, %2633, %2634, %false_1049, %2636, %int1_1050 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2638 = torch.aten.relu %2637 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1051 = torch.constant.int 12 | |
| %2639 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2640 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2641 = torch.aten.quantize_per_tensor %2638, %2639, %2640, %int12_1051 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2642 = torch.aten.int_repr %2641 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2643 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2644 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2645 = torch.aten._make_per_tensor_quantized_tensor %2642, %2643, %2644 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2646 = torch.aten.dequantize.self %2645 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1052 = torch.constant.int 12 | |
| %2647 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2648 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2649 = torch.aten.quantize_per_tensor %160, %2647, %2648, %int12_1052 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2650 = torch.aten.int_repr %2649 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %2651 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2652 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2653 = torch.aten._make_per_tensor_quantized_tensor %2650, %2651, %2652 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2654 = torch.aten.dequantize.self %2653 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_1053 = torch.constant.int 12 | |
| %2655 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2656 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2657 = torch.aten.quantize_per_tensor %161, %2655, %2656, %int12_1053 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2658 = torch.aten.int_repr %2657 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2659 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %2660 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2661 = torch.aten._make_per_tensor_quantized_tensor %2658, %2659, %2660 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2662 = torch.aten.dequantize.self %2661 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_1054 = torch.constant.int 3 | |
| %int0_1055 = torch.constant.int 0 | |
| %int1_1056 = torch.constant.int 1 | |
| %int1_1057 = torch.constant.int 1 | |
| %int1_1058 = torch.constant.int 1 | |
| %int1_1059 = torch.constant.int 1 | |
| %int0_1060 = torch.constant.int 0 | |
| %2663 = torch.prim.ListConstruct %int3_1054, %int0_1055 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2664 = torch.prim.ListConstruct %int1_1056, %int1_1057 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2665 = torch.prim.ListConstruct %int1_1058, %int1_1059 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2666 = torch.prim.ListConstruct %int0_1060, %int0_1060 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1061 = torch.constant.bool false | |
| %int1_1062 = torch.constant.int 1 | |
| %2667 = torch.aten.convolution %2646, %2654, %2662, %2665, %2663, %2664, %false_1061, %2666, %int1_1062 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2668 = torch.aten.relu %2667 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1063 = torch.constant.int 12 | |
| %2669 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2670 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2671 = torch.aten.quantize_per_tensor %2668, %2669, %2670, %int12_1063 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2672 = torch.aten.int_repr %2671 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2673 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2674 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2675 = torch.aten._make_per_tensor_quantized_tensor %2672, %2673, %2674 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2676 = torch.aten.dequantize.self %2675 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1064 = torch.constant.int 12 | |
| %2677 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2678 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2679 = torch.aten.quantize_per_tensor %162, %2677, %2678, %int12_1064 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2680 = torch.aten.int_repr %2679 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %2681 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2682 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2683 = torch.aten._make_per_tensor_quantized_tensor %2680, %2681, %2682 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2684 = torch.aten.dequantize.self %2683 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_1065 = torch.constant.int 12 | |
| %2685 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2686 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2687 = torch.aten.quantize_per_tensor %163, %2685, %2686, %int12_1065 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2688 = torch.aten.int_repr %2687 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2689 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2690 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2691 = torch.aten._make_per_tensor_quantized_tensor %2688, %2689, %2690 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2692 = torch.aten.dequantize.self %2691 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1066 = torch.constant.int 0 | |
| %int3_1067 = torch.constant.int 3 | |
| %int1_1068 = torch.constant.int 1 | |
| %int1_1069 = torch.constant.int 1 | |
| %int1_1070 = torch.constant.int 1 | |
| %int1_1071 = torch.constant.int 1 | |
| %int0_1072 = torch.constant.int 0 | |
| %2693 = torch.prim.ListConstruct %int0_1066, %int3_1067 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2694 = torch.prim.ListConstruct %int1_1068, %int1_1069 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2695 = torch.prim.ListConstruct %int1_1070, %int1_1071 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2696 = torch.prim.ListConstruct %int0_1072, %int0_1072 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1073 = torch.constant.bool false | |
| %int1_1074 = torch.constant.int 1 | |
| %2697 = torch.aten.convolution %2676, %2684, %2692, %2695, %2693, %2694, %false_1073, %2696, %int1_1074 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2698 = torch.aten.relu %2697 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_1075 = torch.constant.int 3 | |
| %int3_1076 = torch.constant.int 3 | |
| %int1_1077 = torch.constant.int 1 | |
| %int1_1078 = torch.constant.int 1 | |
| %int1_1079 = torch.constant.int 1 | |
| %int1_1080 = torch.constant.int 1 | |
| %int1_1081 = torch.constant.int 1 | |
| %int1_1082 = torch.constant.int 1 | |
| %2699 = torch.prim.ListConstruct %int3_1075, %int3_1076 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2700 = torch.prim.ListConstruct %int1_1077, %int1_1078, %int1_1079, %int1_1080 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2701 = torch.prim.ListConstruct %int1_1081, %int1_1082 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1083 = torch.constant.bool false | |
| %false_1084 = torch.constant.bool false | |
| %none_1085 = torch.constant.none | |
| %2702 = torch.aten.avg_pool2d %2452, %2699, %2701, %2700, %false_1083, %false_1084, %none_1085 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %2703 = torch.aten.mul.Tensor %2702, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1086 = torch.constant.int 12 | |
| %2704 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2705 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2706 = torch.aten.quantize_per_tensor %2703, %2704, %2705, %int12_1086 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2707 = torch.aten.int_repr %2706 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2708 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2709 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2710 = torch.aten._make_per_tensor_quantized_tensor %2707, %2708, %2709 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2711 = torch.aten.dequantize.self %2710 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1087 = torch.constant.int 12 | |
| %2712 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2713 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2714 = torch.aten.quantize_per_tensor %164, %2712, %2713, %int12_1087 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2715 = torch.aten.int_repr %2714 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %2716 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2717 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2718 = torch.aten._make_per_tensor_quantized_tensor %2715, %2716, %2717 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %2719 = torch.aten.dequantize.self %2718 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_1088 = torch.constant.int 12 | |
| %2720 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2721 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2722 = torch.aten.quantize_per_tensor %165, %2720, %2721, %int12_1088 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2723 = torch.aten.int_repr %2722 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %2724 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2725 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2726 = torch.aten._make_per_tensor_quantized_tensor %2723, %2724, %2725 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %2727 = torch.aten.dequantize.self %2726 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_1089 = torch.constant.int 0 | |
| %int0_1090 = torch.constant.int 0 | |
| %int1_1091 = torch.constant.int 1 | |
| %int1_1092 = torch.constant.int 1 | |
| %int1_1093 = torch.constant.int 1 | |
| %int1_1094 = torch.constant.int 1 | |
| %int0_1095 = torch.constant.int 0 | |
| %2728 = torch.prim.ListConstruct %int0_1089, %int0_1090 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2729 = torch.prim.ListConstruct %int1_1091, %int1_1092 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2730 = torch.prim.ListConstruct %int1_1093, %int1_1094 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2731 = torch.prim.ListConstruct %int0_1095, %int0_1095 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1096 = torch.constant.bool false | |
| %int1_1097 = torch.constant.int 1 | |
| %2732 = torch.aten.convolution %2711, %2719, %2727, %2730, %2728, %2729, %false_1096, %2731, %int1_1097 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %2733 = torch.aten.relu %2732 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %2734 = torch.prim.ListConstruct %2474, %2556, %2698, %2733 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_1098 = torch.constant.int 1 | |
| %2735 = torch.aten.cat %2734, %int1_1098 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1099 = torch.constant.int 12 | |
| %2736 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2737 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2738 = torch.aten.quantize_per_tensor %2735, %2736, %2737, %int12_1099 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2739 = torch.aten.int_repr %2738 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2740 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2741 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2742 = torch.aten._make_per_tensor_quantized_tensor %2739, %2740, %2741 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2743 = torch.aten.dequantize.self %2742 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1100 = torch.constant.int 12 | |
| %2744 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2745 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2746 = torch.aten.quantize_per_tensor %166, %2744, %2745, %int12_1100 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2747 = torch.aten.int_repr %2746 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %2748 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2749 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2750 = torch.aten._make_per_tensor_quantized_tensor %2747, %2748, %2749 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %2751 = torch.aten.dequantize.self %2750 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_1101 = torch.constant.int 12 | |
| %2752 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2753 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2754 = torch.aten.quantize_per_tensor %167, %2752, %2753, %int12_1101 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2755 = torch.aten.int_repr %2754 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %2756 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2757 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2758 = torch.aten._make_per_tensor_quantized_tensor %2755, %2756, %2757 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %2759 = torch.aten.dequantize.self %2758 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1102 = torch.constant.int 0 | |
| %int0_1103 = torch.constant.int 0 | |
| %int1_1104 = torch.constant.int 1 | |
| %int1_1105 = torch.constant.int 1 | |
| %int1_1106 = torch.constant.int 1 | |
| %int1_1107 = torch.constant.int 1 | |
| %int0_1108 = torch.constant.int 0 | |
| %2760 = torch.prim.ListConstruct %int0_1102, %int0_1103 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2761 = torch.prim.ListConstruct %int1_1104, %int1_1105 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2762 = torch.prim.ListConstruct %int1_1106, %int1_1107 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2763 = torch.prim.ListConstruct %int0_1108, %int0_1108 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1109 = torch.constant.bool false | |
| %int1_1110 = torch.constant.int 1 | |
| %2764 = torch.aten.convolution %2743, %2751, %2759, %2762, %2760, %2761, %false_1109, %2763, %int1_1110 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %2765 = torch.aten.relu %2764 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_1111 = torch.constant.int 12 | |
| %2766 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2767 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2768 = torch.aten.quantize_per_tensor %168, %2766, %2767, %int12_1111 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2769 = torch.aten.int_repr %2768 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2770 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2771 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2772 = torch.aten._make_per_tensor_quantized_tensor %2769, %2770, %2771 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2773 = torch.aten.dequantize.self %2772 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1112 = torch.constant.int 12 | |
| %2774 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2775 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2776 = torch.aten.quantize_per_tensor %169, %2774, %2775, %int12_1112 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2777 = torch.aten.int_repr %2776 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2778 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2779 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2780 = torch.aten._make_per_tensor_quantized_tensor %2777, %2778, %2779 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2781 = torch.aten.dequantize.self %2780 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1113 = torch.constant.int 0 | |
| %int0_1114 = torch.constant.int 0 | |
| %int1_1115 = torch.constant.int 1 | |
| %int1_1116 = torch.constant.int 1 | |
| %int1_1117 = torch.constant.int 1 | |
| %int1_1118 = torch.constant.int 1 | |
| %int0_1119 = torch.constant.int 0 | |
| %2782 = torch.prim.ListConstruct %int0_1113, %int0_1114 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2783 = torch.prim.ListConstruct %int1_1115, %int1_1116 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2784 = torch.prim.ListConstruct %int1_1117, %int1_1118 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2785 = torch.prim.ListConstruct %int0_1119, %int0_1119 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1120 = torch.constant.bool false | |
| %int1_1121 = torch.constant.int 1 | |
| %2786 = torch.aten.convolution %2743, %2773, %2781, %2784, %2782, %2783, %false_1120, %2785, %int1_1121 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2787 = torch.aten.relu %2786 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1122 = torch.constant.int 12 | |
| %2788 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2789 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2790 = torch.aten.quantize_per_tensor %2787, %2788, %2789, %int12_1122 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2791 = torch.aten.int_repr %2790 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2792 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2793 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2794 = torch.aten._make_per_tensor_quantized_tensor %2791, %2792, %2793 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2795 = torch.aten.dequantize.self %2794 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1123 = torch.constant.int 12 | |
| %2796 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2797 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2798 = torch.aten.quantize_per_tensor %170, %2796, %2797, %int12_1123 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2799 = torch.aten.int_repr %2798 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2800 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2801 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2802 = torch.aten._make_per_tensor_quantized_tensor %2799, %2800, %2801 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2803 = torch.aten.dequantize.self %2802 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1124 = torch.constant.int 12 | |
| %2804 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2805 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2806 = torch.aten.quantize_per_tensor %171, %2804, %2805, %int12_1124 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2807 = torch.aten.int_repr %2806 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2808 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2809 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2810 = torch.aten._make_per_tensor_quantized_tensor %2807, %2808, %2809 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2811 = torch.aten.dequantize.self %2810 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1125 = torch.constant.int 0 | |
| %int3_1126 = torch.constant.int 3 | |
| %int1_1127 = torch.constant.int 1 | |
| %int1_1128 = torch.constant.int 1 | |
| %int1_1129 = torch.constant.int 1 | |
| %int1_1130 = torch.constant.int 1 | |
| %int0_1131 = torch.constant.int 0 | |
| %2812 = torch.prim.ListConstruct %int0_1125, %int3_1126 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2813 = torch.prim.ListConstruct %int1_1127, %int1_1128 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2814 = torch.prim.ListConstruct %int1_1129, %int1_1130 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2815 = torch.prim.ListConstruct %int0_1131, %int0_1131 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1132 = torch.constant.bool false | |
| %int1_1133 = torch.constant.int 1 | |
| %2816 = torch.aten.convolution %2795, %2803, %2811, %2814, %2812, %2813, %false_1132, %2815, %int1_1133 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2817 = torch.aten.relu %2816 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1134 = torch.constant.int 12 | |
| %2818 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2819 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2820 = torch.aten.quantize_per_tensor %2817, %2818, %2819, %int12_1134 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2821 = torch.aten.int_repr %2820 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2822 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2823 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2824 = torch.aten._make_per_tensor_quantized_tensor %2821, %2822, %2823 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2825 = torch.aten.dequantize.self %2824 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1135 = torch.constant.int 12 | |
| %2826 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2827 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2828 = torch.aten.quantize_per_tensor %172, %2826, %2827, %int12_1135 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2829 = torch.aten.int_repr %2828 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %2830 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2831 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2832 = torch.aten._make_per_tensor_quantized_tensor %2829, %2830, %2831 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %2833 = torch.aten.dequantize.self %2832 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_1136 = torch.constant.int 12 | |
| %2834 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2835 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2836 = torch.aten.quantize_per_tensor %173, %2834, %2835, %int12_1136 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2837 = torch.aten.int_repr %2836 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2838 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2839 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2840 = torch.aten._make_per_tensor_quantized_tensor %2837, %2838, %2839 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2841 = torch.aten.dequantize.self %2840 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_1137 = torch.constant.int 3 | |
| %int0_1138 = torch.constant.int 0 | |
| %int1_1139 = torch.constant.int 1 | |
| %int1_1140 = torch.constant.int 1 | |
| %int1_1141 = torch.constant.int 1 | |
| %int1_1142 = torch.constant.int 1 | |
| %int0_1143 = torch.constant.int 0 | |
| %2842 = torch.prim.ListConstruct %int3_1137, %int0_1138 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2843 = torch.prim.ListConstruct %int1_1139, %int1_1140 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2844 = torch.prim.ListConstruct %int1_1141, %int1_1142 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2845 = torch.prim.ListConstruct %int0_1143, %int0_1143 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1144 = torch.constant.bool false | |
| %int1_1145 = torch.constant.int 1 | |
| %2846 = torch.aten.convolution %2825, %2833, %2841, %2844, %2842, %2843, %false_1144, %2845, %int1_1145 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2847 = torch.aten.relu %2846 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1146 = torch.constant.int 12 | |
| %2848 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2849 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2850 = torch.aten.quantize_per_tensor %174, %2848, %2849, %int12_1146 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2851 = torch.aten.int_repr %2850 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %2852 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2853 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2854 = torch.aten._make_per_tensor_quantized_tensor %2851, %2852, %2853 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %2855 = torch.aten.dequantize.self %2854 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1147 = torch.constant.int 12 | |
| %2856 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2857 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2858 = torch.aten.quantize_per_tensor %175, %2856, %2857, %int12_1147 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2859 = torch.aten.int_repr %2858 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2860 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2861 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2862 = torch.aten._make_per_tensor_quantized_tensor %2859, %2860, %2861 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2863 = torch.aten.dequantize.self %2862 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1148 = torch.constant.int 0 | |
| %int0_1149 = torch.constant.int 0 | |
| %int1_1150 = torch.constant.int 1 | |
| %int1_1151 = torch.constant.int 1 | |
| %int1_1152 = torch.constant.int 1 | |
| %int1_1153 = torch.constant.int 1 | |
| %int0_1154 = torch.constant.int 0 | |
| %2864 = torch.prim.ListConstruct %int0_1148, %int0_1149 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2865 = torch.prim.ListConstruct %int1_1150, %int1_1151 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2866 = torch.prim.ListConstruct %int1_1152, %int1_1153 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2867 = torch.prim.ListConstruct %int0_1154, %int0_1154 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1155 = torch.constant.bool false | |
| %int1_1156 = torch.constant.int 1 | |
| %2868 = torch.aten.convolution %2743, %2855, %2863, %2866, %2864, %2865, %false_1155, %2867, %int1_1156 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2869 = torch.aten.relu %2868 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1157 = torch.constant.int 12 | |
| %2870 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2871 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2872 = torch.aten.quantize_per_tensor %2869, %2870, %2871, %int12_1157 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2873 = torch.aten.int_repr %2872 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2874 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2875 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2876 = torch.aten._make_per_tensor_quantized_tensor %2873, %2874, %2875 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2877 = torch.aten.dequantize.self %2876 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1158 = torch.constant.int 12 | |
| %2878 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2879 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2880 = torch.aten.quantize_per_tensor %176, %2878, %2879, %int12_1158 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2881 = torch.aten.int_repr %2880 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %2882 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %2883 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2884 = torch.aten._make_per_tensor_quantized_tensor %2881, %2882, %2883 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %2885 = torch.aten.dequantize.self %2884 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_1159 = torch.constant.int 12 | |
| %2886 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2887 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2888 = torch.aten.quantize_per_tensor %177, %2886, %2887, %int12_1159 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2889 = torch.aten.int_repr %2888 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %2890 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2891 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2892 = torch.aten._make_per_tensor_quantized_tensor %2889, %2890, %2891 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %2893 = torch.aten.dequantize.self %2892 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_1160 = torch.constant.int 3 | |
| %int0_1161 = torch.constant.int 0 | |
| %int1_1162 = torch.constant.int 1 | |
| %int1_1163 = torch.constant.int 1 | |
| %int1_1164 = torch.constant.int 1 | |
| %int1_1165 = torch.constant.int 1 | |
| %int0_1166 = torch.constant.int 0 | |
| %2894 = torch.prim.ListConstruct %int3_1160, %int0_1161 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2895 = torch.prim.ListConstruct %int1_1162, %int1_1163 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2896 = torch.prim.ListConstruct %int1_1164, %int1_1165 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2897 = torch.prim.ListConstruct %int0_1166, %int0_1166 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1167 = torch.constant.bool false | |
| %int1_1168 = torch.constant.int 1 | |
| %2898 = torch.aten.convolution %2877, %2885, %2893, %2896, %2894, %2895, %false_1167, %2897, %int1_1168 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %2899 = torch.aten.relu %2898 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1169 = torch.constant.int 12 | |
| %2900 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2901 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2902 = torch.aten.quantize_per_tensor %2899, %2900, %2901, %int12_1169 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2903 = torch.aten.int_repr %2902 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %2904 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2905 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2906 = torch.aten._make_per_tensor_quantized_tensor %2903, %2904, %2905 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %2907 = torch.aten.dequantize.self %2906 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1170 = torch.constant.int 12 | |
| %2908 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2909 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2910 = torch.aten.quantize_per_tensor %178, %2908, %2909, %int12_1170 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2911 = torch.aten.int_repr %2910 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %2912 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2913 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2914 = torch.aten._make_per_tensor_quantized_tensor %2911, %2912, %2913 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %2915 = torch.aten.dequantize.self %2914 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1171 = torch.constant.int 12 | |
| %2916 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2917 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2918 = torch.aten.quantize_per_tensor %179, %2916, %2917, %int12_1171 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2919 = torch.aten.int_repr %2918 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2920 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2921 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2922 = torch.aten._make_per_tensor_quantized_tensor %2919, %2920, %2921 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2923 = torch.aten.dequantize.self %2922 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1172 = torch.constant.int 0 | |
| %int3_1173 = torch.constant.int 3 | |
| %int1_1174 = torch.constant.int 1 | |
| %int1_1175 = torch.constant.int 1 | |
| %int1_1176 = torch.constant.int 1 | |
| %int1_1177 = torch.constant.int 1 | |
| %int0_1178 = torch.constant.int 0 | |
| %2924 = torch.prim.ListConstruct %int0_1172, %int3_1173 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2925 = torch.prim.ListConstruct %int1_1174, %int1_1175 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2926 = torch.prim.ListConstruct %int1_1176, %int1_1177 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2927 = torch.prim.ListConstruct %int0_1178, %int0_1178 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1179 = torch.constant.bool false | |
| %int1_1180 = torch.constant.int 1 | |
| %2928 = torch.aten.convolution %2907, %2915, %2923, %2926, %2924, %2925, %false_1179, %2927, %int1_1180 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2929 = torch.aten.relu %2928 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1181 = torch.constant.int 12 | |
| %2930 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2931 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2932 = torch.aten.quantize_per_tensor %2929, %2930, %2931, %int12_1181 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2933 = torch.aten.int_repr %2932 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2934 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2935 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2936 = torch.aten._make_per_tensor_quantized_tensor %2933, %2934, %2935 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2937 = torch.aten.dequantize.self %2936 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1182 = torch.constant.int 12 | |
| %2938 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2939 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2940 = torch.aten.quantize_per_tensor %180, %2938, %2939, %int12_1182 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2941 = torch.aten.int_repr %2940 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %2942 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2943 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2944 = torch.aten._make_per_tensor_quantized_tensor %2941, %2942, %2943 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %2945 = torch.aten.dequantize.self %2944 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_1183 = torch.constant.int 12 | |
| %2946 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2947 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2948 = torch.aten.quantize_per_tensor %181, %2946, %2947, %int12_1183 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2949 = torch.aten.int_repr %2948 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %2950 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %2951 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2952 = torch.aten._make_per_tensor_quantized_tensor %2949, %2950, %2951 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %2953 = torch.aten.dequantize.self %2952 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_1184 = torch.constant.int 3 | |
| %int0_1185 = torch.constant.int 0 | |
| %int1_1186 = torch.constant.int 1 | |
| %int1_1187 = torch.constant.int 1 | |
| %int1_1188 = torch.constant.int 1 | |
| %int1_1189 = torch.constant.int 1 | |
| %int0_1190 = torch.constant.int 0 | |
| %2954 = torch.prim.ListConstruct %int3_1184, %int0_1185 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2955 = torch.prim.ListConstruct %int1_1186, %int1_1187 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2956 = torch.prim.ListConstruct %int1_1188, %int1_1189 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2957 = torch.prim.ListConstruct %int0_1190, %int0_1190 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1191 = torch.constant.bool false | |
| %int1_1192 = torch.constant.int 1 | |
| %2958 = torch.aten.convolution %2937, %2945, %2953, %2956, %2954, %2955, %false_1191, %2957, %int1_1192 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %2959 = torch.aten.relu %2958 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1193 = torch.constant.int 12 | |
| %2960 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2961 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2962 = torch.aten.quantize_per_tensor %2959, %2960, %2961, %int12_1193 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2963 = torch.aten.int_repr %2962 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %2964 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %2965 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2966 = torch.aten._make_per_tensor_quantized_tensor %2963, %2964, %2965 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %2967 = torch.aten.dequantize.self %2966 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1194 = torch.constant.int 12 | |
| %2968 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2969 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2970 = torch.aten.quantize_per_tensor %182, %2968, %2969, %int12_1194 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2971 = torch.aten.int_repr %2970 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %2972 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %2973 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2974 = torch.aten._make_per_tensor_quantized_tensor %2971, %2972, %2973 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %2975 = torch.aten.dequantize.self %2974 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_1195 = torch.constant.int 12 | |
| %2976 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2977 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2978 = torch.aten.quantize_per_tensor %183, %2976, %2977, %int12_1195 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2979 = torch.aten.int_repr %2978 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %2980 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2981 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2982 = torch.aten._make_per_tensor_quantized_tensor %2979, %2980, %2981 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %2983 = torch.aten.dequantize.self %2982 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1196 = torch.constant.int 0 | |
| %int3_1197 = torch.constant.int 3 | |
| %int1_1198 = torch.constant.int 1 | |
| %int1_1199 = torch.constant.int 1 | |
| %int1_1200 = torch.constant.int 1 | |
| %int1_1201 = torch.constant.int 1 | |
| %int0_1202 = torch.constant.int 0 | |
| %2984 = torch.prim.ListConstruct %int0_1196, %int3_1197 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2985 = torch.prim.ListConstruct %int1_1198, %int1_1199 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2986 = torch.prim.ListConstruct %int1_1200, %int1_1201 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2987 = torch.prim.ListConstruct %int0_1202, %int0_1202 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1203 = torch.constant.bool false | |
| %int1_1204 = torch.constant.int 1 | |
| %2988 = torch.aten.convolution %2967, %2975, %2983, %2986, %2984, %2985, %false_1203, %2987, %int1_1204 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %2989 = torch.aten.relu %2988 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_1205 = torch.constant.int 3 | |
| %int3_1206 = torch.constant.int 3 | |
| %int1_1207 = torch.constant.int 1 | |
| %int1_1208 = torch.constant.int 1 | |
| %int1_1209 = torch.constant.int 1 | |
| %int1_1210 = torch.constant.int 1 | |
| %int1_1211 = torch.constant.int 1 | |
| %int1_1212 = torch.constant.int 1 | |
| %2990 = torch.prim.ListConstruct %int3_1205, %int3_1206 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %2991 = torch.prim.ListConstruct %int1_1207, %int1_1208, %int1_1209, %int1_1210 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2992 = torch.prim.ListConstruct %int1_1211, %int1_1212 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1213 = torch.constant.bool false | |
| %false_1214 = torch.constant.bool false | |
| %none_1215 = torch.constant.none | |
| %2993 = torch.aten.avg_pool2d %2743, %2990, %2992, %2991, %false_1213, %false_1214, %none_1215 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %2994 = torch.aten.mul.Tensor %2993, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1216 = torch.constant.int 12 | |
| %2995 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %2996 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %2997 = torch.aten.quantize_per_tensor %2994, %2995, %2996, %int12_1216 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %2998 = torch.aten.int_repr %2997 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %2999 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3000 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3001 = torch.aten._make_per_tensor_quantized_tensor %2998, %2999, %3000 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3002 = torch.aten.dequantize.self %3001 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1217 = torch.constant.int 12 | |
| %3003 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3004 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3005 = torch.aten.quantize_per_tensor %184, %3003, %3004, %int12_1217 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3006 = torch.aten.int_repr %3005 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %3007 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3008 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3009 = torch.aten._make_per_tensor_quantized_tensor %3006, %3007, %3008 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3010 = torch.aten.dequantize.self %3009 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_1218 = torch.constant.int 12 | |
| %3011 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3012 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3013 = torch.aten.quantize_per_tensor %185, %3011, %3012, %int12_1218 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3014 = torch.aten.int_repr %3013 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %3015 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3016 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3017 = torch.aten._make_per_tensor_quantized_tensor %3014, %3015, %3016 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3018 = torch.aten.dequantize.self %3017 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_1219 = torch.constant.int 0 | |
| %int0_1220 = torch.constant.int 0 | |
| %int1_1221 = torch.constant.int 1 | |
| %int1_1222 = torch.constant.int 1 | |
| %int1_1223 = torch.constant.int 1 | |
| %int1_1224 = torch.constant.int 1 | |
| %int0_1225 = torch.constant.int 0 | |
| %3019 = torch.prim.ListConstruct %int0_1219, %int0_1220 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3020 = torch.prim.ListConstruct %int1_1221, %int1_1222 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3021 = torch.prim.ListConstruct %int1_1223, %int1_1224 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3022 = torch.prim.ListConstruct %int0_1225, %int0_1225 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1226 = torch.constant.bool false | |
| %int1_1227 = torch.constant.int 1 | |
| %3023 = torch.aten.convolution %3002, %3010, %3018, %3021, %3019, %3020, %false_1226, %3022, %int1_1227 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %3024 = torch.aten.relu %3023 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %3025 = torch.prim.ListConstruct %2765, %2847, %2989, %3024 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_1228 = torch.constant.int 1 | |
| %3026 = torch.aten.cat %3025, %int1_1228 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1229 = torch.constant.int 12 | |
| %3027 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3028 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3029 = torch.aten.quantize_per_tensor %3026, %3027, %3028, %int12_1229 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3030 = torch.aten.int_repr %3029 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %3031 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3032 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3033 = torch.aten._make_per_tensor_quantized_tensor %3030, %3031, %3032 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3034 = torch.aten.dequantize.self %3033 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1230 = torch.constant.int 12 | |
| %3035 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3036 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3037 = torch.aten.quantize_per_tensor %186, %3035, %3036, %int12_1230 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %3038 = torch.aten.int_repr %3037 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %3039 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3040 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3041 = torch.aten._make_per_tensor_quantized_tensor %3038, %3039, %3040 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %3042 = torch.aten.dequantize.self %3041 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_1231 = torch.constant.int 12 | |
| %3043 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3044 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3045 = torch.aten.quantize_per_tensor %187, %3043, %3044, %int12_1231 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3046 = torch.aten.int_repr %3045 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %3047 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3048 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3049 = torch.aten._make_per_tensor_quantized_tensor %3046, %3047, %3048 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3050 = torch.aten.dequantize.self %3049 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1232 = torch.constant.int 0 | |
| %int0_1233 = torch.constant.int 0 | |
| %int1_1234 = torch.constant.int 1 | |
| %int1_1235 = torch.constant.int 1 | |
| %int1_1236 = torch.constant.int 1 | |
| %int1_1237 = torch.constant.int 1 | |
| %int0_1238 = torch.constant.int 0 | |
| %3051 = torch.prim.ListConstruct %int0_1232, %int0_1233 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3052 = torch.prim.ListConstruct %int1_1234, %int1_1235 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3053 = torch.prim.ListConstruct %int1_1236, %int1_1237 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3054 = torch.prim.ListConstruct %int0_1238, %int0_1238 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1239 = torch.constant.bool false | |
| %int1_1240 = torch.constant.int 1 | |
| %3055 = torch.aten.convolution %3034, %3042, %3050, %3053, %3051, %3052, %false_1239, %3054, %int1_1240 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %3056 = torch.aten.relu %3055 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_1241 = torch.constant.int 12 | |
| %3057 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3058 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3059 = torch.aten.quantize_per_tensor %188, %3057, %3058, %int12_1241 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3060 = torch.aten.int_repr %3059 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %3061 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3062 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3063 = torch.aten._make_per_tensor_quantized_tensor %3060, %3061, %3062 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3064 = torch.aten.dequantize.self %3063 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1242 = torch.constant.int 12 | |
| %3065 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3066 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3067 = torch.aten.quantize_per_tensor %189, %3065, %3066, %int12_1242 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3068 = torch.aten.int_repr %3067 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3069 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3070 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3071 = torch.aten._make_per_tensor_quantized_tensor %3068, %3069, %3070 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3072 = torch.aten.dequantize.self %3071 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1243 = torch.constant.int 0 | |
| %int0_1244 = torch.constant.int 0 | |
| %int1_1245 = torch.constant.int 1 | |
| %int1_1246 = torch.constant.int 1 | |
| %int1_1247 = torch.constant.int 1 | |
| %int1_1248 = torch.constant.int 1 | |
| %int0_1249 = torch.constant.int 0 | |
| %3073 = torch.prim.ListConstruct %int0_1243, %int0_1244 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3074 = torch.prim.ListConstruct %int1_1245, %int1_1246 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3075 = torch.prim.ListConstruct %int1_1247, %int1_1248 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3076 = torch.prim.ListConstruct %int0_1249, %int0_1249 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1250 = torch.constant.bool false | |
| %int1_1251 = torch.constant.int 1 | |
| %3077 = torch.aten.convolution %3034, %3064, %3072, %3075, %3073, %3074, %false_1250, %3076, %int1_1251 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3078 = torch.aten.relu %3077 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1252 = torch.constant.int 12 | |
| %3079 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3080 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3081 = torch.aten.quantize_per_tensor %3078, %3079, %3080, %int12_1252 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3082 = torch.aten.int_repr %3081 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3083 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3084 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3085 = torch.aten._make_per_tensor_quantized_tensor %3082, %3083, %3084 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3086 = torch.aten.dequantize.self %3085 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1253 = torch.constant.int 12 | |
| %3087 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3088 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3089 = torch.aten.quantize_per_tensor %190, %3087, %3088, %int12_1253 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3090 = torch.aten.int_repr %3089 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %3091 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3092 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3093 = torch.aten._make_per_tensor_quantized_tensor %3090, %3091, %3092 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3094 = torch.aten.dequantize.self %3093 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1254 = torch.constant.int 12 | |
| %3095 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3096 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3097 = torch.aten.quantize_per_tensor %191, %3095, %3096, %int12_1254 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3098 = torch.aten.int_repr %3097 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3099 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3100 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3101 = torch.aten._make_per_tensor_quantized_tensor %3098, %3099, %3100 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3102 = torch.aten.dequantize.self %3101 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1255 = torch.constant.int 0 | |
| %int3_1256 = torch.constant.int 3 | |
| %int1_1257 = torch.constant.int 1 | |
| %int1_1258 = torch.constant.int 1 | |
| %int1_1259 = torch.constant.int 1 | |
| %int1_1260 = torch.constant.int 1 | |
| %int0_1261 = torch.constant.int 0 | |
| %3103 = torch.prim.ListConstruct %int0_1255, %int3_1256 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3104 = torch.prim.ListConstruct %int1_1257, %int1_1258 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3105 = torch.prim.ListConstruct %int1_1259, %int1_1260 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3106 = torch.prim.ListConstruct %int0_1261, %int0_1261 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1262 = torch.constant.bool false | |
| %int1_1263 = torch.constant.int 1 | |
| %3107 = torch.aten.convolution %3086, %3094, %3102, %3105, %3103, %3104, %false_1262, %3106, %int1_1263 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3108 = torch.aten.relu %3107 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1264 = torch.constant.int 12 | |
| %3109 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3110 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3111 = torch.aten.quantize_per_tensor %3108, %3109, %3110, %int12_1264 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3112 = torch.aten.int_repr %3111 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3113 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3114 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3115 = torch.aten._make_per_tensor_quantized_tensor %3112, %3113, %3114 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3116 = torch.aten.dequantize.self %3115 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1265 = torch.constant.int 12 | |
| %3117 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3118 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3119 = torch.aten.quantize_per_tensor %192, %3117, %3118, %int12_1265 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %3120 = torch.aten.int_repr %3119 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %3121 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3122 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3123 = torch.aten._make_per_tensor_quantized_tensor %3120, %3121, %3122 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %3124 = torch.aten.dequantize.self %3123 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_1266 = torch.constant.int 12 | |
| %3125 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3126 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3127 = torch.aten.quantize_per_tensor %193, %3125, %3126, %int12_1266 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3128 = torch.aten.int_repr %3127 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3129 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3130 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3131 = torch.aten._make_per_tensor_quantized_tensor %3128, %3129, %3130 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3132 = torch.aten.dequantize.self %3131 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_1267 = torch.constant.int 3 | |
| %int0_1268 = torch.constant.int 0 | |
| %int1_1269 = torch.constant.int 1 | |
| %int1_1270 = torch.constant.int 1 | |
| %int1_1271 = torch.constant.int 1 | |
| %int1_1272 = torch.constant.int 1 | |
| %int0_1273 = torch.constant.int 0 | |
| %3133 = torch.prim.ListConstruct %int3_1267, %int0_1268 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3134 = torch.prim.ListConstruct %int1_1269, %int1_1270 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3135 = torch.prim.ListConstruct %int1_1271, %int1_1272 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3136 = torch.prim.ListConstruct %int0_1273, %int0_1273 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1274 = torch.constant.bool false | |
| %int1_1275 = torch.constant.int 1 | |
| %3137 = torch.aten.convolution %3116, %3124, %3132, %3135, %3133, %3134, %false_1274, %3136, %int1_1275 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3138 = torch.aten.relu %3137 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1276 = torch.constant.int 12 | |
| %3139 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3140 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3141 = torch.aten.quantize_per_tensor %194, %3139, %3140, %int12_1276 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3142 = torch.aten.int_repr %3141 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %3143 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3144 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3145 = torch.aten._make_per_tensor_quantized_tensor %3142, %3143, %3144 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3146 = torch.aten.dequantize.self %3145 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1277 = torch.constant.int 12 | |
| %3147 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3148 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3149 = torch.aten.quantize_per_tensor %195, %3147, %3148, %int12_1277 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3150 = torch.aten.int_repr %3149 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3151 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3152 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3153 = torch.aten._make_per_tensor_quantized_tensor %3150, %3151, %3152 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3154 = torch.aten.dequantize.self %3153 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1278 = torch.constant.int 0 | |
| %int0_1279 = torch.constant.int 0 | |
| %int1_1280 = torch.constant.int 1 | |
| %int1_1281 = torch.constant.int 1 | |
| %int1_1282 = torch.constant.int 1 | |
| %int1_1283 = torch.constant.int 1 | |
| %int0_1284 = torch.constant.int 0 | |
| %3155 = torch.prim.ListConstruct %int0_1278, %int0_1279 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3156 = torch.prim.ListConstruct %int1_1280, %int1_1281 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3157 = torch.prim.ListConstruct %int1_1282, %int1_1283 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3158 = torch.prim.ListConstruct %int0_1284, %int0_1284 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1285 = torch.constant.bool false | |
| %int1_1286 = torch.constant.int 1 | |
| %3159 = torch.aten.convolution %3034, %3146, %3154, %3157, %3155, %3156, %false_1285, %3158, %int1_1286 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3160 = torch.aten.relu %3159 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1287 = torch.constant.int 12 | |
| %3161 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3162 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3163 = torch.aten.quantize_per_tensor %3160, %3161, %3162, %int12_1287 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3164 = torch.aten.int_repr %3163 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3165 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3166 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3167 = torch.aten._make_per_tensor_quantized_tensor %3164, %3165, %3166 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3168 = torch.aten.dequantize.self %3167 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1288 = torch.constant.int 12 | |
| %3169 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3170 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3171 = torch.aten.quantize_per_tensor %196, %3169, %3170, %int12_1288 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %3172 = torch.aten.int_repr %3171 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %3173 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3174 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3175 = torch.aten._make_per_tensor_quantized_tensor %3172, %3173, %3174 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %3176 = torch.aten.dequantize.self %3175 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_1289 = torch.constant.int 12 | |
| %3177 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3178 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3179 = torch.aten.quantize_per_tensor %197, %3177, %3178, %int12_1289 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3180 = torch.aten.int_repr %3179 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3181 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3182 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3183 = torch.aten._make_per_tensor_quantized_tensor %3180, %3181, %3182 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3184 = torch.aten.dequantize.self %3183 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_1290 = torch.constant.int 3 | |
| %int0_1291 = torch.constant.int 0 | |
| %int1_1292 = torch.constant.int 1 | |
| %int1_1293 = torch.constant.int 1 | |
| %int1_1294 = torch.constant.int 1 | |
| %int1_1295 = torch.constant.int 1 | |
| %int0_1296 = torch.constant.int 0 | |
| %3185 = torch.prim.ListConstruct %int3_1290, %int0_1291 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3186 = torch.prim.ListConstruct %int1_1292, %int1_1293 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3187 = torch.prim.ListConstruct %int1_1294, %int1_1295 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3188 = torch.prim.ListConstruct %int0_1296, %int0_1296 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1297 = torch.constant.bool false | |
| %int1_1298 = torch.constant.int 1 | |
| %3189 = torch.aten.convolution %3168, %3176, %3184, %3187, %3185, %3186, %false_1297, %3188, %int1_1298 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3190 = torch.aten.relu %3189 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1299 = torch.constant.int 12 | |
| %3191 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3192 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3193 = torch.aten.quantize_per_tensor %3190, %3191, %3192, %int12_1299 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3194 = torch.aten.int_repr %3193 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3195 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3196 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3197 = torch.aten._make_per_tensor_quantized_tensor %3194, %3195, %3196 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3198 = torch.aten.dequantize.self %3197 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1300 = torch.constant.int 12 | |
| %3199 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3200 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3201 = torch.aten.quantize_per_tensor %198, %3199, %3200, %int12_1300 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3202 = torch.aten.int_repr %3201 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %3203 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3204 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3205 = torch.aten._make_per_tensor_quantized_tensor %3202, %3203, %3204 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3206 = torch.aten.dequantize.self %3205 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1301 = torch.constant.int 12 | |
| %3207 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3208 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3209 = torch.aten.quantize_per_tensor %199, %3207, %3208, %int12_1301 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3210 = torch.aten.int_repr %3209 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3211 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3212 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3213 = torch.aten._make_per_tensor_quantized_tensor %3210, %3211, %3212 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3214 = torch.aten.dequantize.self %3213 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1302 = torch.constant.int 0 | |
| %int3_1303 = torch.constant.int 3 | |
| %int1_1304 = torch.constant.int 1 | |
| %int1_1305 = torch.constant.int 1 | |
| %int1_1306 = torch.constant.int 1 | |
| %int1_1307 = torch.constant.int 1 | |
| %int0_1308 = torch.constant.int 0 | |
| %3215 = torch.prim.ListConstruct %int0_1302, %int3_1303 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3216 = torch.prim.ListConstruct %int1_1304, %int1_1305 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3217 = torch.prim.ListConstruct %int1_1306, %int1_1307 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3218 = torch.prim.ListConstruct %int0_1308, %int0_1308 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1309 = torch.constant.bool false | |
| %int1_1310 = torch.constant.int 1 | |
| %3219 = torch.aten.convolution %3198, %3206, %3214, %3217, %3215, %3216, %false_1309, %3218, %int1_1310 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3220 = torch.aten.relu %3219 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1311 = torch.constant.int 12 | |
| %3221 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3222 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3223 = torch.aten.quantize_per_tensor %3220, %3221, %3222, %int12_1311 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3224 = torch.aten.int_repr %3223 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3225 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3226 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3227 = torch.aten._make_per_tensor_quantized_tensor %3224, %3225, %3226 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3228 = torch.aten.dequantize.self %3227 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1312 = torch.constant.int 12 | |
| %3229 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3230 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3231 = torch.aten.quantize_per_tensor %200, %3229, %3230, %int12_1312 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %3232 = torch.aten.int_repr %3231 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %3233 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3234 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3235 = torch.aten._make_per_tensor_quantized_tensor %3232, %3233, %3234 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %3236 = torch.aten.dequantize.self %3235 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_1313 = torch.constant.int 12 | |
| %3237 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3238 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3239 = torch.aten.quantize_per_tensor %201, %3237, %3238, %int12_1313 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3240 = torch.aten.int_repr %3239 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3241 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3242 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3243 = torch.aten._make_per_tensor_quantized_tensor %3240, %3241, %3242 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3244 = torch.aten.dequantize.self %3243 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_1314 = torch.constant.int 3 | |
| %int0_1315 = torch.constant.int 0 | |
| %int1_1316 = torch.constant.int 1 | |
| %int1_1317 = torch.constant.int 1 | |
| %int1_1318 = torch.constant.int 1 | |
| %int1_1319 = torch.constant.int 1 | |
| %int0_1320 = torch.constant.int 0 | |
| %3245 = torch.prim.ListConstruct %int3_1314, %int0_1315 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3246 = torch.prim.ListConstruct %int1_1316, %int1_1317 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3247 = torch.prim.ListConstruct %int1_1318, %int1_1319 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3248 = torch.prim.ListConstruct %int0_1320, %int0_1320 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1321 = torch.constant.bool false | |
| %int1_1322 = torch.constant.int 1 | |
| %3249 = torch.aten.convolution %3228, %3236, %3244, %3247, %3245, %3246, %false_1321, %3248, %int1_1322 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3250 = torch.aten.relu %3249 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1323 = torch.constant.int 12 | |
| %3251 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3252 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3253 = torch.aten.quantize_per_tensor %3250, %3251, %3252, %int12_1323 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3254 = torch.aten.int_repr %3253 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3255 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3256 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3257 = torch.aten._make_per_tensor_quantized_tensor %3254, %3255, %3256 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3258 = torch.aten.dequantize.self %3257 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1324 = torch.constant.int 12 | |
| %3259 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3260 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3261 = torch.aten.quantize_per_tensor %202, %3259, %3260, %int12_1324 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %3262 = torch.aten.int_repr %3261 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %3263 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3264 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3265 = torch.aten._make_per_tensor_quantized_tensor %3262, %3263, %3264 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %3266 = torch.aten.dequantize.self %3265 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_1325 = torch.constant.int 12 | |
| %3267 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3268 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3269 = torch.aten.quantize_per_tensor %203, %3267, %3268, %int12_1325 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3270 = torch.aten.int_repr %3269 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3271 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3272 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3273 = torch.aten._make_per_tensor_quantized_tensor %3270, %3271, %3272 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3274 = torch.aten.dequantize.self %3273 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1326 = torch.constant.int 0 | |
| %int3_1327 = torch.constant.int 3 | |
| %int1_1328 = torch.constant.int 1 | |
| %int1_1329 = torch.constant.int 1 | |
| %int1_1330 = torch.constant.int 1 | |
| %int1_1331 = torch.constant.int 1 | |
| %int0_1332 = torch.constant.int 0 | |
| %3275 = torch.prim.ListConstruct %int0_1326, %int3_1327 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3276 = torch.prim.ListConstruct %int1_1328, %int1_1329 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3277 = torch.prim.ListConstruct %int1_1330, %int1_1331 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3278 = torch.prim.ListConstruct %int0_1332, %int0_1332 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1333 = torch.constant.bool false | |
| %int1_1334 = torch.constant.int 1 | |
| %3279 = torch.aten.convolution %3258, %3266, %3274, %3277, %3275, %3276, %false_1333, %3278, %int1_1334 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3280 = torch.aten.relu %3279 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_1335 = torch.constant.int 3 | |
| %int3_1336 = torch.constant.int 3 | |
| %int1_1337 = torch.constant.int 1 | |
| %int1_1338 = torch.constant.int 1 | |
| %int1_1339 = torch.constant.int 1 | |
| %int1_1340 = torch.constant.int 1 | |
| %int1_1341 = torch.constant.int 1 | |
| %int1_1342 = torch.constant.int 1 | |
| %3281 = torch.prim.ListConstruct %int3_1335, %int3_1336 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3282 = torch.prim.ListConstruct %int1_1337, %int1_1338, %int1_1339, %int1_1340 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %3283 = torch.prim.ListConstruct %int1_1341, %int1_1342 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1343 = torch.constant.bool false | |
| %false_1344 = torch.constant.bool false | |
| %none_1345 = torch.constant.none | |
| %3284 = torch.aten.avg_pool2d %3034, %3281, %3283, %3282, %false_1343, %false_1344, %none_1345 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %3285 = torch.aten.mul.Tensor %3284, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1346 = torch.constant.int 12 | |
| %3286 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3287 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3288 = torch.aten.quantize_per_tensor %3285, %3286, %3287, %int12_1346 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3289 = torch.aten.int_repr %3288 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %3290 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3291 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3292 = torch.aten._make_per_tensor_quantized_tensor %3289, %3290, %3291 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3293 = torch.aten.dequantize.self %3292 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1347 = torch.constant.int 12 | |
| %3294 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3295 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3296 = torch.aten.quantize_per_tensor %204, %3294, %3295, %int12_1347 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3297 = torch.aten.int_repr %3296 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %3298 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3299 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3300 = torch.aten._make_per_tensor_quantized_tensor %3297, %3298, %3299 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3301 = torch.aten.dequantize.self %3300 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_1348 = torch.constant.int 12 | |
| %3302 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3303 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3304 = torch.aten.quantize_per_tensor %205, %3302, %3303, %int12_1348 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3305 = torch.aten.int_repr %3304 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %3306 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3307 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3308 = torch.aten._make_per_tensor_quantized_tensor %3305, %3306, %3307 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3309 = torch.aten.dequantize.self %3308 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_1349 = torch.constant.int 0 | |
| %int0_1350 = torch.constant.int 0 | |
| %int1_1351 = torch.constant.int 1 | |
| %int1_1352 = torch.constant.int 1 | |
| %int1_1353 = torch.constant.int 1 | |
| %int1_1354 = torch.constant.int 1 | |
| %int0_1355 = torch.constant.int 0 | |
| %3310 = torch.prim.ListConstruct %int0_1349, %int0_1350 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3311 = torch.prim.ListConstruct %int1_1351, %int1_1352 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3312 = torch.prim.ListConstruct %int1_1353, %int1_1354 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3313 = torch.prim.ListConstruct %int0_1355, %int0_1355 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1356 = torch.constant.bool false | |
| %int1_1357 = torch.constant.int 1 | |
| %3314 = torch.aten.convolution %3293, %3301, %3309, %3312, %3310, %3311, %false_1356, %3313, %int1_1357 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %3315 = torch.aten.relu %3314 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %3316 = torch.prim.ListConstruct %3056, %3138, %3280, %3315 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_1358 = torch.constant.int 1 | |
| %3317 = torch.aten.cat %3316, %int1_1358 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1359 = torch.constant.int 12 | |
| %3318 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3319 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3320 = torch.aten.quantize_per_tensor %3317, %3318, %3319, %int12_1359 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3321 = torch.aten.int_repr %3320 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %3322 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3323 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3324 = torch.aten._make_per_tensor_quantized_tensor %3321, %3322, %3323 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3325 = torch.aten.dequantize.self %3324 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1360 = torch.constant.int 12 | |
| %3326 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3327 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3328 = torch.aten.quantize_per_tensor %206, %3326, %3327, %int12_1360 : !torch.vtensor<[384,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %3329 = torch.aten.int_repr %3328 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],si8> | |
| %3330 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3331 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3332 = torch.aten._make_per_tensor_quantized_tensor %3329, %3330, %3331 : !torch.vtensor<[384,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1024,1,1],!torch.qint8> | |
| %3333 = torch.aten.dequantize.self %3332 : !torch.vtensor<[384,1024,1,1],!torch.qint8> -> !torch.vtensor<[384,1024,1,1],f32> | |
| %int12_1361 = torch.constant.int 12 | |
| %3334 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3335 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3336 = torch.aten.quantize_per_tensor %207, %3334, %3335, %int12_1361 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3337 = torch.aten.int_repr %3336 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %3338 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3339 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3340 = torch.aten._make_per_tensor_quantized_tensor %3337, %3338, %3339 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3341 = torch.aten.dequantize.self %3340 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1362 = torch.constant.int 0 | |
| %int0_1363 = torch.constant.int 0 | |
| %int1_1364 = torch.constant.int 1 | |
| %int1_1365 = torch.constant.int 1 | |
| %int1_1366 = torch.constant.int 1 | |
| %int1_1367 = torch.constant.int 1 | |
| %int0_1368 = torch.constant.int 0 | |
| %3342 = torch.prim.ListConstruct %int0_1362, %int0_1363 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3343 = torch.prim.ListConstruct %int1_1364, %int1_1365 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3344 = torch.prim.ListConstruct %int1_1366, %int1_1367 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3345 = torch.prim.ListConstruct %int0_1368, %int0_1368 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1369 = torch.constant.bool false | |
| %int1_1370 = torch.constant.int 1 | |
| %3346 = torch.aten.convolution %3325, %3333, %3341, %3344, %3342, %3343, %false_1369, %3345, %int1_1370 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[384,1024,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,12,12],f32> | |
| %3347 = torch.aten.relu %3346 : !torch.vtensor<[32,384,12,12],f32> -> !torch.vtensor<[32,384,12,12],f32> | |
| %int12_1371 = torch.constant.int 12 | |
| %3348 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3349 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3350 = torch.aten.quantize_per_tensor %208, %3348, %3349, %int12_1371 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3351 = torch.aten.int_repr %3350 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %3352 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3353 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3354 = torch.aten._make_per_tensor_quantized_tensor %3351, %3352, %3353 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3355 = torch.aten.dequantize.self %3354 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1372 = torch.constant.int 12 | |
| %3356 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3357 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3358 = torch.aten.quantize_per_tensor %209, %3356, %3357, %int12_1372 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3359 = torch.aten.int_repr %3358 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3360 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3361 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3362 = torch.aten._make_per_tensor_quantized_tensor %3359, %3360, %3361 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3363 = torch.aten.dequantize.self %3362 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1373 = torch.constant.int 0 | |
| %int0_1374 = torch.constant.int 0 | |
| %int1_1375 = torch.constant.int 1 | |
| %int1_1376 = torch.constant.int 1 | |
| %int1_1377 = torch.constant.int 1 | |
| %int1_1378 = torch.constant.int 1 | |
| %int0_1379 = torch.constant.int 0 | |
| %3364 = torch.prim.ListConstruct %int0_1373, %int0_1374 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3365 = torch.prim.ListConstruct %int1_1375, %int1_1376 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3366 = torch.prim.ListConstruct %int1_1377, %int1_1378 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3367 = torch.prim.ListConstruct %int0_1379, %int0_1379 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1380 = torch.constant.bool false | |
| %int1_1381 = torch.constant.int 1 | |
| %3368 = torch.aten.convolution %3325, %3355, %3363, %3366, %3364, %3365, %false_1380, %3367, %int1_1381 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3369 = torch.aten.relu %3368 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1382 = torch.constant.int 12 | |
| %3370 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3371 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3372 = torch.aten.quantize_per_tensor %3369, %3370, %3371, %int12_1382 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3373 = torch.aten.int_repr %3372 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3374 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3375 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3376 = torch.aten._make_per_tensor_quantized_tensor %3373, %3374, %3375 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3377 = torch.aten.dequantize.self %3376 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1383 = torch.constant.int 12 | |
| %3378 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3379 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3380 = torch.aten.quantize_per_tensor %210, %3378, %3379, %int12_1383 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3381 = torch.aten.int_repr %3380 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %3382 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3383 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3384 = torch.aten._make_per_tensor_quantized_tensor %3381, %3382, %3383 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3385 = torch.aten.dequantize.self %3384 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1384 = torch.constant.int 12 | |
| %3386 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3387 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3388 = torch.aten.quantize_per_tensor %211, %3386, %3387, %int12_1384 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3389 = torch.aten.int_repr %3388 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3390 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3391 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3392 = torch.aten._make_per_tensor_quantized_tensor %3389, %3390, %3391 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3393 = torch.aten.dequantize.self %3392 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1385 = torch.constant.int 0 | |
| %int3_1386 = torch.constant.int 3 | |
| %int1_1387 = torch.constant.int 1 | |
| %int1_1388 = torch.constant.int 1 | |
| %int1_1389 = torch.constant.int 1 | |
| %int1_1390 = torch.constant.int 1 | |
| %int0_1391 = torch.constant.int 0 | |
| %3394 = torch.prim.ListConstruct %int0_1385, %int3_1386 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3395 = torch.prim.ListConstruct %int1_1387, %int1_1388 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3396 = torch.prim.ListConstruct %int1_1389, %int1_1390 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3397 = torch.prim.ListConstruct %int0_1391, %int0_1391 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1392 = torch.constant.bool false | |
| %int1_1393 = torch.constant.int 1 | |
| %3398 = torch.aten.convolution %3377, %3385, %3393, %3396, %3394, %3395, %false_1392, %3397, %int1_1393 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3399 = torch.aten.relu %3398 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1394 = torch.constant.int 12 | |
| %3400 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3401 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3402 = torch.aten.quantize_per_tensor %3399, %3400, %3401, %int12_1394 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3403 = torch.aten.int_repr %3402 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3404 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3405 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3406 = torch.aten._make_per_tensor_quantized_tensor %3403, %3404, %3405 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3407 = torch.aten.dequantize.self %3406 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1395 = torch.constant.int 12 | |
| %3408 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3409 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3410 = torch.aten.quantize_per_tensor %212, %3408, %3409, %int12_1395 : !torch.vtensor<[256,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %3411 = torch.aten.int_repr %3410 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],si8> | |
| %3412 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3413 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3414 = torch.aten._make_per_tensor_quantized_tensor %3411, %3412, %3413 : !torch.vtensor<[256,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,7,1],!torch.qint8> | |
| %3415 = torch.aten.dequantize.self %3414 : !torch.vtensor<[256,224,7,1],!torch.qint8> -> !torch.vtensor<[256,224,7,1],f32> | |
| %int12_1396 = torch.constant.int 12 | |
| %3416 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3417 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3418 = torch.aten.quantize_per_tensor %213, %3416, %3417, %int12_1396 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3419 = torch.aten.int_repr %3418 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3420 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3421 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3422 = torch.aten._make_per_tensor_quantized_tensor %3419, %3420, %3421 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3423 = torch.aten.dequantize.self %3422 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int3_1397 = torch.constant.int 3 | |
| %int0_1398 = torch.constant.int 0 | |
| %int1_1399 = torch.constant.int 1 | |
| %int1_1400 = torch.constant.int 1 | |
| %int1_1401 = torch.constant.int 1 | |
| %int1_1402 = torch.constant.int 1 | |
| %int0_1403 = torch.constant.int 0 | |
| %3424 = torch.prim.ListConstruct %int3_1397, %int0_1398 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3425 = torch.prim.ListConstruct %int1_1399, %int1_1400 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3426 = torch.prim.ListConstruct %int1_1401, %int1_1402 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3427 = torch.prim.ListConstruct %int0_1403, %int0_1403 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1404 = torch.constant.bool false | |
| %int1_1405 = torch.constant.int 1 | |
| %3428 = torch.aten.convolution %3407, %3415, %3423, %3426, %3424, %3425, %false_1404, %3427, %int1_1405 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,7,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3429 = torch.aten.relu %3428 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1406 = torch.constant.int 12 | |
| %3430 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3431 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3432 = torch.aten.quantize_per_tensor %214, %3430, %3431, %int12_1406 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3433 = torch.aten.int_repr %3432 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %3434 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3435 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3436 = torch.aten._make_per_tensor_quantized_tensor %3433, %3434, %3435 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3437 = torch.aten.dequantize.self %3436 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1407 = torch.constant.int 12 | |
| %3438 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3439 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3440 = torch.aten.quantize_per_tensor %215, %3438, %3439, %int12_1407 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3441 = torch.aten.int_repr %3440 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3442 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3443 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3444 = torch.aten._make_per_tensor_quantized_tensor %3441, %3442, %3443 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3445 = torch.aten.dequantize.self %3444 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1408 = torch.constant.int 0 | |
| %int0_1409 = torch.constant.int 0 | |
| %int1_1410 = torch.constant.int 1 | |
| %int1_1411 = torch.constant.int 1 | |
| %int1_1412 = torch.constant.int 1 | |
| %int1_1413 = torch.constant.int 1 | |
| %int0_1414 = torch.constant.int 0 | |
| %3446 = torch.prim.ListConstruct %int0_1408, %int0_1409 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3447 = torch.prim.ListConstruct %int1_1410, %int1_1411 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3448 = torch.prim.ListConstruct %int1_1412, %int1_1413 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3449 = torch.prim.ListConstruct %int0_1414, %int0_1414 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1415 = torch.constant.bool false | |
| %int1_1416 = torch.constant.int 1 | |
| %3450 = torch.aten.convolution %3325, %3437, %3445, %3448, %3446, %3447, %false_1415, %3449, %int1_1416 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3451 = torch.aten.relu %3450 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1417 = torch.constant.int 12 | |
| %3452 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3453 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3454 = torch.aten.quantize_per_tensor %3451, %3452, %3453, %int12_1417 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3455 = torch.aten.int_repr %3454 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3456 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3457 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3458 = torch.aten._make_per_tensor_quantized_tensor %3455, %3456, %3457 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3459 = torch.aten.dequantize.self %3458 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1418 = torch.constant.int 12 | |
| %3460 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3461 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3462 = torch.aten.quantize_per_tensor %216, %3460, %3461, %int12_1418 : !torch.vtensor<[192,192,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %3463 = torch.aten.int_repr %3462 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],si8> | |
| %3464 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3465 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3466 = torch.aten._make_per_tensor_quantized_tensor %3463, %3464, %3465 : !torch.vtensor<[192,192,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,7,1],!torch.qint8> | |
| %3467 = torch.aten.dequantize.self %3466 : !torch.vtensor<[192,192,7,1],!torch.qint8> -> !torch.vtensor<[192,192,7,1],f32> | |
| %int12_1419 = torch.constant.int 12 | |
| %3468 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3469 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3470 = torch.aten.quantize_per_tensor %217, %3468, %3469, %int12_1419 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3471 = torch.aten.int_repr %3470 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3472 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3473 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3474 = torch.aten._make_per_tensor_quantized_tensor %3471, %3472, %3473 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3475 = torch.aten.dequantize.self %3474 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int3_1420 = torch.constant.int 3 | |
| %int0_1421 = torch.constant.int 0 | |
| %int1_1422 = torch.constant.int 1 | |
| %int1_1423 = torch.constant.int 1 | |
| %int1_1424 = torch.constant.int 1 | |
| %int1_1425 = torch.constant.int 1 | |
| %int0_1426 = torch.constant.int 0 | |
| %3476 = torch.prim.ListConstruct %int3_1420, %int0_1421 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3477 = torch.prim.ListConstruct %int1_1422, %int1_1423 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3478 = torch.prim.ListConstruct %int1_1424, %int1_1425 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3479 = torch.prim.ListConstruct %int0_1426, %int0_1426 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1427 = torch.constant.bool false | |
| %int1_1428 = torch.constant.int 1 | |
| %3480 = torch.aten.convolution %3459, %3467, %3475, %3478, %3476, %3477, %false_1427, %3479, %int1_1428 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,7,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3481 = torch.aten.relu %3480 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1429 = torch.constant.int 12 | |
| %3482 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3483 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3484 = torch.aten.quantize_per_tensor %3481, %3482, %3483, %int12_1429 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3485 = torch.aten.int_repr %3484 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3486 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %3487 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3488 = torch.aten._make_per_tensor_quantized_tensor %3485, %3486, %3487 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3489 = torch.aten.dequantize.self %3488 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1430 = torch.constant.int 12 | |
| %3490 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3491 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3492 = torch.aten.quantize_per_tensor %218, %3490, %3491, %int12_1430 : !torch.vtensor<[224,192,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3493 = torch.aten.int_repr %3492 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],si8> | |
| %3494 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3495 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3496 = torch.aten._make_per_tensor_quantized_tensor %3493, %3494, %3495 : !torch.vtensor<[224,192,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,192,1,7],!torch.qint8> | |
| %3497 = torch.aten.dequantize.self %3496 : !torch.vtensor<[224,192,1,7],!torch.qint8> -> !torch.vtensor<[224,192,1,7],f32> | |
| %int12_1431 = torch.constant.int 12 | |
| %3498 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3499 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3500 = torch.aten.quantize_per_tensor %219, %3498, %3499, %int12_1431 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3501 = torch.aten.int_repr %3500 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3502 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3503 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3504 = torch.aten._make_per_tensor_quantized_tensor %3501, %3502, %3503 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3505 = torch.aten.dequantize.self %3504 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int0_1432 = torch.constant.int 0 | |
| %int3_1433 = torch.constant.int 3 | |
| %int1_1434 = torch.constant.int 1 | |
| %int1_1435 = torch.constant.int 1 | |
| %int1_1436 = torch.constant.int 1 | |
| %int1_1437 = torch.constant.int 1 | |
| %int0_1438 = torch.constant.int 0 | |
| %3506 = torch.prim.ListConstruct %int0_1432, %int3_1433 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3507 = torch.prim.ListConstruct %int1_1434, %int1_1435 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3508 = torch.prim.ListConstruct %int1_1436, %int1_1437 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3509 = torch.prim.ListConstruct %int0_1438, %int0_1438 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1439 = torch.constant.bool false | |
| %int1_1440 = torch.constant.int 1 | |
| %3510 = torch.aten.convolution %3489, %3497, %3505, %3508, %3506, %3507, %false_1439, %3509, %int1_1440 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[224,192,1,7],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3511 = torch.aten.relu %3510 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1441 = torch.constant.int 12 | |
| %3512 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3513 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3514 = torch.aten.quantize_per_tensor %3511, %3512, %3513, %int12_1441 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3515 = torch.aten.int_repr %3514 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3516 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3517 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3518 = torch.aten._make_per_tensor_quantized_tensor %3515, %3516, %3517 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3519 = torch.aten.dequantize.self %3518 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1442 = torch.constant.int 12 | |
| %3520 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3521 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3522 = torch.aten.quantize_per_tensor %220, %3520, %3521, %int12_1442 : !torch.vtensor<[224,224,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %3523 = torch.aten.int_repr %3522 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],si8> | |
| %3524 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3525 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3526 = torch.aten._make_per_tensor_quantized_tensor %3523, %3524, %3525 : !torch.vtensor<[224,224,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[224,224,7,1],!torch.qint8> | |
| %3527 = torch.aten.dequantize.self %3526 : !torch.vtensor<[224,224,7,1],!torch.qint8> -> !torch.vtensor<[224,224,7,1],f32> | |
| %int12_1443 = torch.constant.int 12 | |
| %3528 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3529 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3530 = torch.aten.quantize_per_tensor %221, %3528, %3529, %int12_1443 : !torch.vtensor<[224],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3531 = torch.aten.int_repr %3530 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],si8> | |
| %3532 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3533 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3534 = torch.aten._make_per_tensor_quantized_tensor %3531, %3532, %3533 : !torch.vtensor<[224],si8>, !torch.float, !torch.int -> !torch.vtensor<[224],!torch.qint8> | |
| %3535 = torch.aten.dequantize.self %3534 : !torch.vtensor<[224],!torch.qint8> -> !torch.vtensor<[224],f32> | |
| %int3_1444 = torch.constant.int 3 | |
| %int0_1445 = torch.constant.int 0 | |
| %int1_1446 = torch.constant.int 1 | |
| %int1_1447 = torch.constant.int 1 | |
| %int1_1448 = torch.constant.int 1 | |
| %int1_1449 = torch.constant.int 1 | |
| %int0_1450 = torch.constant.int 0 | |
| %3536 = torch.prim.ListConstruct %int3_1444, %int0_1445 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3537 = torch.prim.ListConstruct %int1_1446, %int1_1447 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3538 = torch.prim.ListConstruct %int1_1448, %int1_1449 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3539 = torch.prim.ListConstruct %int0_1450, %int0_1450 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1451 = torch.constant.bool false | |
| %int1_1452 = torch.constant.int 1 | |
| %3540 = torch.aten.convolution %3519, %3527, %3535, %3538, %3536, %3537, %false_1451, %3539, %int1_1452 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[224,224,7,1],f32>, !torch.vtensor<[224],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,224,12,12],f32> | |
| %3541 = torch.aten.relu %3540 : !torch.vtensor<[32,224,12,12],f32> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1453 = torch.constant.int 12 | |
| %3542 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3543 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3544 = torch.aten.quantize_per_tensor %3541, %3542, %3543, %int12_1453 : !torch.vtensor<[32,224,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3545 = torch.aten.int_repr %3544 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],si8> | |
| %3546 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3547 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3548 = torch.aten._make_per_tensor_quantized_tensor %3545, %3546, %3547 : !torch.vtensor<[32,224,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,224,12,12],!torch.qint8> | |
| %3549 = torch.aten.dequantize.self %3548 : !torch.vtensor<[32,224,12,12],!torch.qint8> -> !torch.vtensor<[32,224,12,12],f32> | |
| %int12_1454 = torch.constant.int 12 | |
| %3550 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3551 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3552 = torch.aten.quantize_per_tensor %222, %3550, %3551, %int12_1454 : !torch.vtensor<[256,224,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %3553 = torch.aten.int_repr %3552 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],si8> | |
| %3554 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3555 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3556 = torch.aten._make_per_tensor_quantized_tensor %3553, %3554, %3555 : !torch.vtensor<[256,224,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,224,1,7],!torch.qint8> | |
| %3557 = torch.aten.dequantize.self %3556 : !torch.vtensor<[256,224,1,7],!torch.qint8> -> !torch.vtensor<[256,224,1,7],f32> | |
| %int12_1455 = torch.constant.int 12 | |
| %3558 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3559 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3560 = torch.aten.quantize_per_tensor %223, %3558, %3559, %int12_1455 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3561 = torch.aten.int_repr %3560 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3562 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3563 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3564 = torch.aten._make_per_tensor_quantized_tensor %3561, %3562, %3563 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3565 = torch.aten.dequantize.self %3564 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1456 = torch.constant.int 0 | |
| %int3_1457 = torch.constant.int 3 | |
| %int1_1458 = torch.constant.int 1 | |
| %int1_1459 = torch.constant.int 1 | |
| %int1_1460 = torch.constant.int 1 | |
| %int1_1461 = torch.constant.int 1 | |
| %int0_1462 = torch.constant.int 0 | |
| %3566 = torch.prim.ListConstruct %int0_1456, %int3_1457 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3567 = torch.prim.ListConstruct %int1_1458, %int1_1459 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3568 = torch.prim.ListConstruct %int1_1460, %int1_1461 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3569 = torch.prim.ListConstruct %int0_1462, %int0_1462 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1463 = torch.constant.bool false | |
| %int1_1464 = torch.constant.int 1 | |
| %3570 = torch.aten.convolution %3549, %3557, %3565, %3568, %3566, %3567, %false_1463, %3569, %int1_1464 : !torch.vtensor<[32,224,12,12],f32>, !torch.vtensor<[256,224,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3571 = torch.aten.relu %3570 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int3_1465 = torch.constant.int 3 | |
| %int3_1466 = torch.constant.int 3 | |
| %int1_1467 = torch.constant.int 1 | |
| %int1_1468 = torch.constant.int 1 | |
| %int1_1469 = torch.constant.int 1 | |
| %int1_1470 = torch.constant.int 1 | |
| %int1_1471 = torch.constant.int 1 | |
| %int1_1472 = torch.constant.int 1 | |
| %3572 = torch.prim.ListConstruct %int3_1465, %int3_1466 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3573 = torch.prim.ListConstruct %int1_1467, %int1_1468, %int1_1469, %int1_1470 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %3574 = torch.prim.ListConstruct %int1_1471, %int1_1472 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1473 = torch.constant.bool false | |
| %false_1474 = torch.constant.bool false | |
| %none_1475 = torch.constant.none | |
| %3575 = torch.aten.avg_pool2d %3325, %3572, %3574, %3573, %false_1473, %false_1474, %none_1475 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1024,12,12],f32> | |
| %3576 = torch.aten.mul.Tensor %3575, %308 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1476 = torch.constant.int 12 | |
| %3577 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3578 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3579 = torch.aten.quantize_per_tensor %3576, %3577, %3578, %int12_1476 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3580 = torch.aten.int_repr %3579 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %3581 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3582 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3583 = torch.aten._make_per_tensor_quantized_tensor %3580, %3581, %3582 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3584 = torch.aten.dequantize.self %3583 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1477 = torch.constant.int 12 | |
| %3585 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3586 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3587 = torch.aten.quantize_per_tensor %224, %3585, %3586, %int12_1477 : !torch.vtensor<[128,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3588 = torch.aten.int_repr %3587 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],si8> | |
| %3589 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3590 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3591 = torch.aten._make_per_tensor_quantized_tensor %3588, %3589, %3590 : !torch.vtensor<[128,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[128,1024,1,1],!torch.qint8> | |
| %3592 = torch.aten.dequantize.self %3591 : !torch.vtensor<[128,1024,1,1],!torch.qint8> -> !torch.vtensor<[128,1024,1,1],f32> | |
| %int12_1478 = torch.constant.int 12 | |
| %3593 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3594 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3595 = torch.aten.quantize_per_tensor %225, %3593, %3594, %int12_1478 : !torch.vtensor<[128],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3596 = torch.aten.int_repr %3595 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],si8> | |
| %3597 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3598 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3599 = torch.aten._make_per_tensor_quantized_tensor %3596, %3597, %3598 : !torch.vtensor<[128],si8>, !torch.float, !torch.int -> !torch.vtensor<[128],!torch.qint8> | |
| %3600 = torch.aten.dequantize.self %3599 : !torch.vtensor<[128],!torch.qint8> -> !torch.vtensor<[128],f32> | |
| %int0_1479 = torch.constant.int 0 | |
| %int0_1480 = torch.constant.int 0 | |
| %int1_1481 = torch.constant.int 1 | |
| %int1_1482 = torch.constant.int 1 | |
| %int1_1483 = torch.constant.int 1 | |
| %int1_1484 = torch.constant.int 1 | |
| %int0_1485 = torch.constant.int 0 | |
| %3601 = torch.prim.ListConstruct %int0_1479, %int0_1480 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3602 = torch.prim.ListConstruct %int1_1481, %int1_1482 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3603 = torch.prim.ListConstruct %int1_1483, %int1_1484 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3604 = torch.prim.ListConstruct %int0_1485, %int0_1485 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1486 = torch.constant.bool false | |
| %int1_1487 = torch.constant.int 1 | |
| %3605 = torch.aten.convolution %3584, %3592, %3600, %3603, %3601, %3602, %false_1486, %3604, %int1_1487 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[128,1024,1,1],f32>, !torch.vtensor<[128],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,128,12,12],f32> | |
| %3606 = torch.aten.relu %3605 : !torch.vtensor<[32,128,12,12],f32> -> !torch.vtensor<[32,128,12,12],f32> | |
| %3607 = torch.prim.ListConstruct %3347, %3429, %3571, %3606 : (!torch.vtensor<[32,384,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[32,128,12,12],f32>) -> !torch.list<vtensor> | |
| %int1_1488 = torch.constant.int 1 | |
| %3608 = torch.aten.cat %3607, %int1_1488 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1489 = torch.constant.int 12 | |
| %3609 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3610 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3611 = torch.aten.quantize_per_tensor %3608, %3609, %3610, %int12_1489 : !torch.vtensor<[32,1024,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3612 = torch.aten.int_repr %3611 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],si8> | |
| %3613 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3614 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3615 = torch.aten._make_per_tensor_quantized_tensor %3612, %3613, %3614 : !torch.vtensor<[32,1024,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1024,12,12],!torch.qint8> | |
| %3616 = torch.aten.dequantize.self %3615 : !torch.vtensor<[32,1024,12,12],!torch.qint8> -> !torch.vtensor<[32,1024,12,12],f32> | |
| %int12_1490 = torch.constant.int 12 | |
| %3617 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3618 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3619 = torch.aten.quantize_per_tensor %226, %3617, %3618, %int12_1490 : !torch.vtensor<[192,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3620 = torch.aten.int_repr %3619 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],si8> | |
| %3621 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3622 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3623 = torch.aten._make_per_tensor_quantized_tensor %3620, %3621, %3622 : !torch.vtensor<[192,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,1024,1,1],!torch.qint8> | |
| %3624 = torch.aten.dequantize.self %3623 : !torch.vtensor<[192,1024,1,1],!torch.qint8> -> !torch.vtensor<[192,1024,1,1],f32> | |
| %int12_1491 = torch.constant.int 12 | |
| %3625 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3626 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3627 = torch.aten.quantize_per_tensor %227, %3625, %3626, %int12_1491 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3628 = torch.aten.int_repr %3627 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3629 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3630 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3631 = torch.aten._make_per_tensor_quantized_tensor %3628, %3629, %3630 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3632 = torch.aten.dequantize.self %3631 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1492 = torch.constant.int 0 | |
| %int0_1493 = torch.constant.int 0 | |
| %int1_1494 = torch.constant.int 1 | |
| %int1_1495 = torch.constant.int 1 | |
| %int1_1496 = torch.constant.int 1 | |
| %int1_1497 = torch.constant.int 1 | |
| %int0_1498 = torch.constant.int 0 | |
| %3633 = torch.prim.ListConstruct %int0_1492, %int0_1493 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3634 = torch.prim.ListConstruct %int1_1494, %int1_1495 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3635 = torch.prim.ListConstruct %int1_1496, %int1_1497 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3636 = torch.prim.ListConstruct %int0_1498, %int0_1498 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1499 = torch.constant.bool false | |
| %int1_1500 = torch.constant.int 1 | |
| %3637 = torch.aten.convolution %3616, %3624, %3632, %3635, %3633, %3634, %false_1499, %3636, %int1_1500 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[192,1024,1,1],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,12,12],f32> | |
| %3638 = torch.aten.relu %3637 : !torch.vtensor<[32,192,12,12],f32> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1501 = torch.constant.int 12 | |
| %3639 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3640 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3641 = torch.aten.quantize_per_tensor %3638, %3639, %3640, %int12_1501 : !torch.vtensor<[32,192,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3642 = torch.aten.int_repr %3641 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],si8> | |
| %3643 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3644 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3645 = torch.aten._make_per_tensor_quantized_tensor %3642, %3643, %3644 : !torch.vtensor<[32,192,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,192,12,12],!torch.qint8> | |
| %3646 = torch.aten.dequantize.self %3645 : !torch.vtensor<[32,192,12,12],!torch.qint8> -> !torch.vtensor<[32,192,12,12],f32> | |
| %int12_1502 = torch.constant.int 12 | |
| %3647 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3648 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3649 = torch.aten.quantize_per_tensor %228, %3647, %3648, %int12_1502 : !torch.vtensor<[192,192,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192,192,3,3],!torch.qint8> | |
| %3650 = torch.aten.int_repr %3649 : !torch.vtensor<[192,192,3,3],!torch.qint8> -> !torch.vtensor<[192,192,3,3],si8> | |
| %3651 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3652 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3653 = torch.aten._make_per_tensor_quantized_tensor %3650, %3651, %3652 : !torch.vtensor<[192,192,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[192,192,3,3],!torch.qint8> | |
| %3654 = torch.aten.dequantize.self %3653 : !torch.vtensor<[192,192,3,3],!torch.qint8> -> !torch.vtensor<[192,192,3,3],f32> | |
| %int12_1503 = torch.constant.int 12 | |
| %3655 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3656 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3657 = torch.aten.quantize_per_tensor %229, %3655, %3656, %int12_1503 : !torch.vtensor<[192],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3658 = torch.aten.int_repr %3657 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],si8> | |
| %3659 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3660 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3661 = torch.aten._make_per_tensor_quantized_tensor %3658, %3659, %3660 : !torch.vtensor<[192],si8>, !torch.float, !torch.int -> !torch.vtensor<[192],!torch.qint8> | |
| %3662 = torch.aten.dequantize.self %3661 : !torch.vtensor<[192],!torch.qint8> -> !torch.vtensor<[192],f32> | |
| %int0_1504 = torch.constant.int 0 | |
| %int0_1505 = torch.constant.int 0 | |
| %int1_1506 = torch.constant.int 1 | |
| %int1_1507 = torch.constant.int 1 | |
| %int2_1508 = torch.constant.int 2 | |
| %int2_1509 = torch.constant.int 2 | |
| %int0_1510 = torch.constant.int 0 | |
| %3663 = torch.prim.ListConstruct %int0_1504, %int0_1505 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3664 = torch.prim.ListConstruct %int1_1506, %int1_1507 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3665 = torch.prim.ListConstruct %int2_1508, %int2_1509 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3666 = torch.prim.ListConstruct %int0_1510, %int0_1510 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1511 = torch.constant.bool false | |
| %int1_1512 = torch.constant.int 1 | |
| %3667 = torch.aten.convolution %3646, %3654, %3662, %3665, %3663, %3664, %false_1511, %3666, %int1_1512 : !torch.vtensor<[32,192,12,12],f32>, !torch.vtensor<[192,192,3,3],f32>, !torch.vtensor<[192],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,192,5,5],f32> | |
| %3668 = torch.aten.relu %3667 : !torch.vtensor<[32,192,5,5],f32> -> !torch.vtensor<[32,192,5,5],f32> | |
| %int12_1513 = torch.constant.int 12 | |
| %3669 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3670 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3671 = torch.aten.quantize_per_tensor %230, %3669, %3670, %int12_1513 : !torch.vtensor<[256,1024,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8> | |
| %3672 = torch.aten.int_repr %3671 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],si8> | |
| %3673 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3674 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3675 = torch.aten._make_per_tensor_quantized_tensor %3672, %3673, %3674 : !torch.vtensor<[256,1024,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1024,1,1],!torch.qint8> | |
| %3676 = torch.aten.dequantize.self %3675 : !torch.vtensor<[256,1024,1,1],!torch.qint8> -> !torch.vtensor<[256,1024,1,1],f32> | |
| %int12_1514 = torch.constant.int 12 | |
| %3677 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3678 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3679 = torch.aten.quantize_per_tensor %231, %3677, %3678, %int12_1514 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3680 = torch.aten.int_repr %3679 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3681 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3682 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3683 = torch.aten._make_per_tensor_quantized_tensor %3680, %3681, %3682 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3684 = torch.aten.dequantize.self %3683 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1515 = torch.constant.int 0 | |
| %int0_1516 = torch.constant.int 0 | |
| %int1_1517 = torch.constant.int 1 | |
| %int1_1518 = torch.constant.int 1 | |
| %int1_1519 = torch.constant.int 1 | |
| %int1_1520 = torch.constant.int 1 | |
| %int0_1521 = torch.constant.int 0 | |
| %3685 = torch.prim.ListConstruct %int0_1515, %int0_1516 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3686 = torch.prim.ListConstruct %int1_1517, %int1_1518 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3687 = torch.prim.ListConstruct %int1_1519, %int1_1520 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3688 = torch.prim.ListConstruct %int0_1521, %int0_1521 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1522 = torch.constant.bool false | |
| %int1_1523 = torch.constant.int 1 | |
| %3689 = torch.aten.convolution %3616, %3676, %3684, %3687, %3685, %3686, %false_1522, %3688, %int1_1523 : !torch.vtensor<[32,1024,12,12],f32>, !torch.vtensor<[256,1024,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3690 = torch.aten.relu %3689 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1524 = torch.constant.int 12 | |
| %3691 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3692 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3693 = torch.aten.quantize_per_tensor %3690, %3691, %3692, %int12_1524 : !torch.vtensor<[32,256,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,12,12],!torch.qint8> | |
| %3694 = torch.aten.int_repr %3693 : !torch.vtensor<[32,256,12,12],!torch.qint8> -> !torch.vtensor<[32,256,12,12],si8> | |
| %3695 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3696 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3697 = torch.aten._make_per_tensor_quantized_tensor %3694, %3695, %3696 : !torch.vtensor<[32,256,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,12,12],!torch.qint8> | |
| %3698 = torch.aten.dequantize.self %3697 : !torch.vtensor<[32,256,12,12],!torch.qint8> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1525 = torch.constant.int 12 | |
| %3699 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3700 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3701 = torch.aten.quantize_per_tensor %232, %3699, %3700, %int12_1525 : !torch.vtensor<[256,256,1,7],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,256,1,7],!torch.qint8> | |
| %3702 = torch.aten.int_repr %3701 : !torch.vtensor<[256,256,1,7],!torch.qint8> -> !torch.vtensor<[256,256,1,7],si8> | |
| %3703 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3704 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3705 = torch.aten._make_per_tensor_quantized_tensor %3702, %3703, %3704 : !torch.vtensor<[256,256,1,7],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,256,1,7],!torch.qint8> | |
| %3706 = torch.aten.dequantize.self %3705 : !torch.vtensor<[256,256,1,7],!torch.qint8> -> !torch.vtensor<[256,256,1,7],f32> | |
| %int12_1526 = torch.constant.int 12 | |
| %3707 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3708 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3709 = torch.aten.quantize_per_tensor %233, %3707, %3708, %int12_1526 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3710 = torch.aten.int_repr %3709 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3711 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3712 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3713 = torch.aten._make_per_tensor_quantized_tensor %3710, %3711, %3712 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3714 = torch.aten.dequantize.self %3713 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1527 = torch.constant.int 0 | |
| %int3_1528 = torch.constant.int 3 | |
| %int1_1529 = torch.constant.int 1 | |
| %int1_1530 = torch.constant.int 1 | |
| %int1_1531 = torch.constant.int 1 | |
| %int1_1532 = torch.constant.int 1 | |
| %int0_1533 = torch.constant.int 0 | |
| %3715 = torch.prim.ListConstruct %int0_1527, %int3_1528 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3716 = torch.prim.ListConstruct %int1_1529, %int1_1530 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3717 = torch.prim.ListConstruct %int1_1531, %int1_1532 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3718 = torch.prim.ListConstruct %int0_1533, %int0_1533 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1534 = torch.constant.bool false | |
| %int1_1535 = torch.constant.int 1 | |
| %3719 = torch.aten.convolution %3698, %3706, %3714, %3717, %3715, %3716, %false_1534, %3718, %int1_1535 : !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[256,256,1,7],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,12,12],f32> | |
| %3720 = torch.aten.relu %3719 : !torch.vtensor<[32,256,12,12],f32> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1536 = torch.constant.int 12 | |
| %3721 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3722 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3723 = torch.aten.quantize_per_tensor %3720, %3721, %3722, %int12_1536 : !torch.vtensor<[32,256,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,256,12,12],!torch.qint8> | |
| %3724 = torch.aten.int_repr %3723 : !torch.vtensor<[32,256,12,12],!torch.qint8> -> !torch.vtensor<[32,256,12,12],si8> | |
| %3725 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3726 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3727 = torch.aten._make_per_tensor_quantized_tensor %3724, %3725, %3726 : !torch.vtensor<[32,256,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,256,12,12],!torch.qint8> | |
| %3728 = torch.aten.dequantize.self %3727 : !torch.vtensor<[32,256,12,12],!torch.qint8> -> !torch.vtensor<[32,256,12,12],f32> | |
| %int12_1537 = torch.constant.int 12 | |
| %3729 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3730 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3731 = torch.aten.quantize_per_tensor %234, %3729, %3730, %int12_1537 : !torch.vtensor<[320,256,7,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[320,256,7,1],!torch.qint8> | |
| %3732 = torch.aten.int_repr %3731 : !torch.vtensor<[320,256,7,1],!torch.qint8> -> !torch.vtensor<[320,256,7,1],si8> | |
| %3733 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3734 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3735 = torch.aten._make_per_tensor_quantized_tensor %3732, %3733, %3734 : !torch.vtensor<[320,256,7,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[320,256,7,1],!torch.qint8> | |
| %3736 = torch.aten.dequantize.self %3735 : !torch.vtensor<[320,256,7,1],!torch.qint8> -> !torch.vtensor<[320,256,7,1],f32> | |
| %int12_1538 = torch.constant.int 12 | |
| %3737 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3738 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3739 = torch.aten.quantize_per_tensor %235, %3737, %3738, %int12_1538 : !torch.vtensor<[320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[320],!torch.qint8> | |
| %3740 = torch.aten.int_repr %3739 : !torch.vtensor<[320],!torch.qint8> -> !torch.vtensor<[320],si8> | |
| %3741 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3742 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3743 = torch.aten._make_per_tensor_quantized_tensor %3740, %3741, %3742 : !torch.vtensor<[320],si8>, !torch.float, !torch.int -> !torch.vtensor<[320],!torch.qint8> | |
| %3744 = torch.aten.dequantize.self %3743 : !torch.vtensor<[320],!torch.qint8> -> !torch.vtensor<[320],f32> | |
| %int3_1539 = torch.constant.int 3 | |
| %int0_1540 = torch.constant.int 0 | |
| %int1_1541 = torch.constant.int 1 | |
| %int1_1542 = torch.constant.int 1 | |
| %int1_1543 = torch.constant.int 1 | |
| %int1_1544 = torch.constant.int 1 | |
| %int0_1545 = torch.constant.int 0 | |
| %3745 = torch.prim.ListConstruct %int3_1539, %int0_1540 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3746 = torch.prim.ListConstruct %int1_1541, %int1_1542 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3747 = torch.prim.ListConstruct %int1_1543, %int1_1544 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3748 = torch.prim.ListConstruct %int0_1545, %int0_1545 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1546 = torch.constant.bool false | |
| %int1_1547 = torch.constant.int 1 | |
| %3749 = torch.aten.convolution %3728, %3736, %3744, %3747, %3745, %3746, %false_1546, %3748, %int1_1547 : !torch.vtensor<[32,256,12,12],f32>, !torch.vtensor<[320,256,7,1],f32>, !torch.vtensor<[320],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,320,12,12],f32> | |
| %3750 = torch.aten.relu %3749 : !torch.vtensor<[32,320,12,12],f32> -> !torch.vtensor<[32,320,12,12],f32> | |
| %int12_1548 = torch.constant.int 12 | |
| %3751 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3752 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3753 = torch.aten.quantize_per_tensor %3750, %3751, %3752, %int12_1548 : !torch.vtensor<[32,320,12,12],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,320,12,12],!torch.qint8> | |
| %3754 = torch.aten.int_repr %3753 : !torch.vtensor<[32,320,12,12],!torch.qint8> -> !torch.vtensor<[32,320,12,12],si8> | |
| %3755 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3756 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3757 = torch.aten._make_per_tensor_quantized_tensor %3754, %3755, %3756 : !torch.vtensor<[32,320,12,12],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,320,12,12],!torch.qint8> | |
| %3758 = torch.aten.dequantize.self %3757 : !torch.vtensor<[32,320,12,12],!torch.qint8> -> !torch.vtensor<[32,320,12,12],f32> | |
| %int12_1549 = torch.constant.int 12 | |
| %3759 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3760 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3761 = torch.aten.quantize_per_tensor %236, %3759, %3760, %int12_1549 : !torch.vtensor<[320,320,3,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[320,320,3,3],!torch.qint8> | |
| %3762 = torch.aten.int_repr %3761 : !torch.vtensor<[320,320,3,3],!torch.qint8> -> !torch.vtensor<[320,320,3,3],si8> | |
| %3763 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3764 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3765 = torch.aten._make_per_tensor_quantized_tensor %3762, %3763, %3764 : !torch.vtensor<[320,320,3,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[320,320,3,3],!torch.qint8> | |
| %3766 = torch.aten.dequantize.self %3765 : !torch.vtensor<[320,320,3,3],!torch.qint8> -> !torch.vtensor<[320,320,3,3],f32> | |
| %int12_1550 = torch.constant.int 12 | |
| %3767 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3768 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3769 = torch.aten.quantize_per_tensor %237, %3767, %3768, %int12_1550 : !torch.vtensor<[320],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[320],!torch.qint8> | |
| %3770 = torch.aten.int_repr %3769 : !torch.vtensor<[320],!torch.qint8> -> !torch.vtensor<[320],si8> | |
| %3771 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3772 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3773 = torch.aten._make_per_tensor_quantized_tensor %3770, %3771, %3772 : !torch.vtensor<[320],si8>, !torch.float, !torch.int -> !torch.vtensor<[320],!torch.qint8> | |
| %3774 = torch.aten.dequantize.self %3773 : !torch.vtensor<[320],!torch.qint8> -> !torch.vtensor<[320],f32> | |
| %int0_1551 = torch.constant.int 0 | |
| %int0_1552 = torch.constant.int 0 | |
| %int1_1553 = torch.constant.int 1 | |
| %int1_1554 = torch.constant.int 1 | |
| %int2_1555 = torch.constant.int 2 | |
| %int2_1556 = torch.constant.int 2 | |
| %int0_1557 = torch.constant.int 0 | |
| %3775 = torch.prim.ListConstruct %int0_1551, %int0_1552 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3776 = torch.prim.ListConstruct %int1_1553, %int1_1554 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3777 = torch.prim.ListConstruct %int2_1555, %int2_1556 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3778 = torch.prim.ListConstruct %int0_1557, %int0_1557 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1558 = torch.constant.bool false | |
| %int1_1559 = torch.constant.int 1 | |
| %3779 = torch.aten.convolution %3758, %3766, %3774, %3777, %3775, %3776, %false_1558, %3778, %int1_1559 : !torch.vtensor<[32,320,12,12],f32>, !torch.vtensor<[320,320,3,3],f32>, !torch.vtensor<[320],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,320,5,5],f32> | |
| %3780 = torch.aten.relu %3779 : !torch.vtensor<[32,320,5,5],f32> -> !torch.vtensor<[32,320,5,5],f32> | |
| %int3_1560 = torch.constant.int 3 | |
| %int3_1561 = torch.constant.int 3 | |
| %3781 = torch.prim.ListConstruct %int3_1560, %int3_1561 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int0_1562 = torch.constant.int 0 | |
| %int0_1563 = torch.constant.int 0 | |
| %3782 = torch.prim.ListConstruct %int0_1562, %int0_1563 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int2_1564 = torch.constant.int 2 | |
| %int2_1565 = torch.constant.int 2 | |
| %3783 = torch.prim.ListConstruct %int2_1564, %int2_1565 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %int1_1566 = torch.constant.int 1 | |
| %int1_1567 = torch.constant.int 1 | |
| %3784 = torch.prim.ListConstruct %int1_1566, %int1_1567 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1568 = torch.constant.bool false | |
| %3785 = torch.aten.max_pool2d %3616, %3781, %3783, %3782, %3784, %false_1568 : !torch.vtensor<[32,1024,12,12],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool -> !torch.vtensor<[32,1024,5,5],f32> | |
| %3786 = torch.prim.ListConstruct %3668, %3780, %3785 : (!torch.vtensor<[32,192,5,5],f32>, !torch.vtensor<[32,320,5,5],f32>, !torch.vtensor<[32,1024,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1569 = torch.constant.int 1 | |
| %3787 = torch.aten.cat %3786, %int1_1569 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1570 = torch.constant.int 12 | |
| %3788 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3789 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3790 = torch.aten.quantize_per_tensor %3787, %3788, %3789, %int12_1570 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %3791 = torch.aten.int_repr %3790 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %3792 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3793 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3794 = torch.aten._make_per_tensor_quantized_tensor %3791, %3792, %3793 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %3795 = torch.aten.dequantize.self %3794 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1571 = torch.constant.int 12 | |
| %3796 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3797 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3798 = torch.aten.quantize_per_tensor %238, %3796, %3797, %int12_1571 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %3799 = torch.aten.int_repr %3798 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %3800 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3801 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3802 = torch.aten._make_per_tensor_quantized_tensor %3799, %3800, %3801 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %3803 = torch.aten.dequantize.self %3802 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1572 = torch.constant.int 12 | |
| %3804 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3805 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3806 = torch.aten.quantize_per_tensor %239, %3804, %3805, %int12_1572 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3807 = torch.aten.int_repr %3806 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3808 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3809 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3810 = torch.aten._make_per_tensor_quantized_tensor %3807, %3808, %3809 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3811 = torch.aten.dequantize.self %3810 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1573 = torch.constant.int 0 | |
| %int0_1574 = torch.constant.int 0 | |
| %int1_1575 = torch.constant.int 1 | |
| %int1_1576 = torch.constant.int 1 | |
| %int1_1577 = torch.constant.int 1 | |
| %int1_1578 = torch.constant.int 1 | |
| %int0_1579 = torch.constant.int 0 | |
| %3812 = torch.prim.ListConstruct %int0_1573, %int0_1574 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3813 = torch.prim.ListConstruct %int1_1575, %int1_1576 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3814 = torch.prim.ListConstruct %int1_1577, %int1_1578 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3815 = torch.prim.ListConstruct %int0_1579, %int0_1579 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1580 = torch.constant.bool false | |
| %int1_1581 = torch.constant.int 1 | |
| %3816 = torch.aten.convolution %3795, %3803, %3811, %3814, %3812, %3813, %false_1580, %3815, %int1_1581 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %3817 = torch.aten.relu %3816 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1582 = torch.constant.int 12 | |
| %3818 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3819 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3820 = torch.aten.quantize_per_tensor %240, %3818, %3819, %int12_1582 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %3821 = torch.aten.int_repr %3820 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %3822 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3823 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3824 = torch.aten._make_per_tensor_quantized_tensor %3821, %3822, %3823 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %3825 = torch.aten.dequantize.self %3824 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1583 = torch.constant.int 12 | |
| %3826 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3827 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3828 = torch.aten.quantize_per_tensor %241, %3826, %3827, %int12_1583 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3829 = torch.aten.int_repr %3828 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %3830 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3831 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3832 = torch.aten._make_per_tensor_quantized_tensor %3829, %3830, %3831 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3833 = torch.aten.dequantize.self %3832 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1584 = torch.constant.int 0 | |
| %int0_1585 = torch.constant.int 0 | |
| %int1_1586 = torch.constant.int 1 | |
| %int1_1587 = torch.constant.int 1 | |
| %int1_1588 = torch.constant.int 1 | |
| %int1_1589 = torch.constant.int 1 | |
| %int0_1590 = torch.constant.int 0 | |
| %3834 = torch.prim.ListConstruct %int0_1584, %int0_1585 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3835 = torch.prim.ListConstruct %int1_1586, %int1_1587 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3836 = torch.prim.ListConstruct %int1_1588, %int1_1589 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3837 = torch.prim.ListConstruct %int0_1590, %int0_1590 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1591 = torch.constant.bool false | |
| %int1_1592 = torch.constant.int 1 | |
| %3838 = torch.aten.convolution %3795, %3825, %3833, %3836, %3834, %3835, %false_1591, %3837, %int1_1592 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %3839 = torch.aten.relu %3838 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1593 = torch.constant.int 12 | |
| %3840 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3841 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3842 = torch.aten.quantize_per_tensor %3839, %3840, %3841, %int12_1593 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %3843 = torch.aten.int_repr %3842 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %3844 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3845 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3846 = torch.aten._make_per_tensor_quantized_tensor %3843, %3844, %3845 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %3847 = torch.aten.dequantize.self %3846 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1594 = torch.constant.int 12 | |
| %3848 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3849 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3850 = torch.aten.quantize_per_tensor %242, %3848, %3849, %int12_1594 : !torch.vtensor<[256,384,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %3851 = torch.aten.int_repr %3850 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],si8> | |
| %3852 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3853 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3854 = torch.aten._make_per_tensor_quantized_tensor %3851, %3852, %3853 : !torch.vtensor<[256,384,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %3855 = torch.aten.dequantize.self %3854 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],f32> | |
| %int12_1595 = torch.constant.int 12 | |
| %3856 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3857 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3858 = torch.aten.quantize_per_tensor %243, %3856, %3857, %int12_1595 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3859 = torch.aten.int_repr %3858 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3860 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3861 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3862 = torch.aten._make_per_tensor_quantized_tensor %3859, %3860, %3861 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3863 = torch.aten.dequantize.self %3862 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1596 = torch.constant.int 0 | |
| %int1_1597 = torch.constant.int 1 | |
| %int1_1598 = torch.constant.int 1 | |
| %int1_1599 = torch.constant.int 1 | |
| %int1_1600 = torch.constant.int 1 | |
| %int1_1601 = torch.constant.int 1 | |
| %int0_1602 = torch.constant.int 0 | |
| %3864 = torch.prim.ListConstruct %int0_1596, %int1_1597 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3865 = torch.prim.ListConstruct %int1_1598, %int1_1599 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3866 = torch.prim.ListConstruct %int1_1600, %int1_1601 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3867 = torch.prim.ListConstruct %int0_1602, %int0_1602 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1603 = torch.constant.bool false | |
| %int1_1604 = torch.constant.int 1 | |
| %3868 = torch.aten.convolution %3847, %3855, %3863, %3866, %3864, %3865, %false_1603, %3867, %int1_1604 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %3869 = torch.aten.relu %3868 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1605 = torch.constant.int 12 | |
| %3870 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3871 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3872 = torch.aten.quantize_per_tensor %244, %3870, %3871, %int12_1605 : !torch.vtensor<[256,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %3873 = torch.aten.int_repr %3872 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],si8> | |
| %3874 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3875 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3876 = torch.aten._make_per_tensor_quantized_tensor %3873, %3874, %3875 : !torch.vtensor<[256,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %3877 = torch.aten.dequantize.self %3876 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],f32> | |
| %int12_1606 = torch.constant.int 12 | |
| %3878 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3879 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3880 = torch.aten.quantize_per_tensor %245, %3878, %3879, %int12_1606 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3881 = torch.aten.int_repr %3880 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3882 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %3883 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3884 = torch.aten._make_per_tensor_quantized_tensor %3881, %3882, %3883 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3885 = torch.aten.dequantize.self %3884 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1607 = torch.constant.int 1 | |
| %int0_1608 = torch.constant.int 0 | |
| %int1_1609 = torch.constant.int 1 | |
| %int1_1610 = torch.constant.int 1 | |
| %int1_1611 = torch.constant.int 1 | |
| %int1_1612 = torch.constant.int 1 | |
| %int0_1613 = torch.constant.int 0 | |
| %3886 = torch.prim.ListConstruct %int1_1607, %int0_1608 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3887 = torch.prim.ListConstruct %int1_1609, %int1_1610 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3888 = torch.prim.ListConstruct %int1_1611, %int1_1612 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3889 = torch.prim.ListConstruct %int0_1613, %int0_1613 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1614 = torch.constant.bool false | |
| %int1_1615 = torch.constant.int 1 | |
| %3890 = torch.aten.convolution %3847, %3877, %3885, %3888, %3886, %3887, %false_1614, %3889, %int1_1615 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %3891 = torch.aten.relu %3890 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1616 = torch.constant.int 12 | |
| %3892 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3893 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3894 = torch.aten.quantize_per_tensor %246, %3892, %3893, %int12_1616 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %3895 = torch.aten.int_repr %3894 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %3896 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3897 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3898 = torch.aten._make_per_tensor_quantized_tensor %3895, %3896, %3897 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %3899 = torch.aten.dequantize.self %3898 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1617 = torch.constant.int 12 | |
| %3900 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3901 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3902 = torch.aten.quantize_per_tensor %247, %3900, %3901, %int12_1617 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3903 = torch.aten.int_repr %3902 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %3904 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3905 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3906 = torch.aten._make_per_tensor_quantized_tensor %3903, %3904, %3905 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %3907 = torch.aten.dequantize.self %3906 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1618 = torch.constant.int 0 | |
| %int0_1619 = torch.constant.int 0 | |
| %int1_1620 = torch.constant.int 1 | |
| %int1_1621 = torch.constant.int 1 | |
| %int1_1622 = torch.constant.int 1 | |
| %int1_1623 = torch.constant.int 1 | |
| %int0_1624 = torch.constant.int 0 | |
| %3908 = torch.prim.ListConstruct %int0_1618, %int0_1619 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3909 = torch.prim.ListConstruct %int1_1620, %int1_1621 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3910 = torch.prim.ListConstruct %int1_1622, %int1_1623 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3911 = torch.prim.ListConstruct %int0_1624, %int0_1624 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1625 = torch.constant.bool false | |
| %int1_1626 = torch.constant.int 1 | |
| %3912 = torch.aten.convolution %3795, %3899, %3907, %3910, %3908, %3909, %false_1625, %3911, %int1_1626 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %3913 = torch.aten.relu %3912 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1627 = torch.constant.int 12 | |
| %3914 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3915 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3916 = torch.aten.quantize_per_tensor %3913, %3914, %3915, %int12_1627 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %3917 = torch.aten.int_repr %3916 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %3918 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3919 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3920 = torch.aten._make_per_tensor_quantized_tensor %3917, %3918, %3919 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %3921 = torch.aten.dequantize.self %3920 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1628 = torch.constant.int 12 | |
| %3922 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3923 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3924 = torch.aten.quantize_per_tensor %248, %3922, %3923, %int12_1628 : !torch.vtensor<[448,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %3925 = torch.aten.int_repr %3924 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],si8> | |
| %3926 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3927 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3928 = torch.aten._make_per_tensor_quantized_tensor %3925, %3926, %3927 : !torch.vtensor<[448,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %3929 = torch.aten.dequantize.self %3928 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],f32> | |
| %int12_1629 = torch.constant.int 12 | |
| %3930 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3931 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3932 = torch.aten.quantize_per_tensor %249, %3930, %3931, %int12_1629 : !torch.vtensor<[448],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %3933 = torch.aten.int_repr %3932 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],si8> | |
| %3934 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %3935 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3936 = torch.aten._make_per_tensor_quantized_tensor %3933, %3934, %3935 : !torch.vtensor<[448],si8>, !torch.float, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %3937 = torch.aten.dequantize.self %3936 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],f32> | |
| %int1_1630 = torch.constant.int 1 | |
| %int0_1631 = torch.constant.int 0 | |
| %int1_1632 = torch.constant.int 1 | |
| %int1_1633 = torch.constant.int 1 | |
| %int1_1634 = torch.constant.int 1 | |
| %int1_1635 = torch.constant.int 1 | |
| %int0_1636 = torch.constant.int 0 | |
| %3938 = torch.prim.ListConstruct %int1_1630, %int0_1631 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3939 = torch.prim.ListConstruct %int1_1632, %int1_1633 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3940 = torch.prim.ListConstruct %int1_1634, %int1_1635 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3941 = torch.prim.ListConstruct %int0_1636, %int0_1636 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1637 = torch.constant.bool false | |
| %int1_1638 = torch.constant.int 1 | |
| %3942 = torch.aten.convolution %3921, %3929, %3937, %3940, %3938, %3939, %false_1637, %3941, %int1_1638 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[448,384,3,1],f32>, !torch.vtensor<[448],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,448,5,5],f32> | |
| %3943 = torch.aten.relu %3942 : !torch.vtensor<[32,448,5,5],f32> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1639 = torch.constant.int 12 | |
| %3944 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3945 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3946 = torch.aten.quantize_per_tensor %3943, %3944, %3945, %int12_1639 : !torch.vtensor<[32,448,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %3947 = torch.aten.int_repr %3946 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],si8> | |
| %3948 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3949 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3950 = torch.aten._make_per_tensor_quantized_tensor %3947, %3948, %3949 : !torch.vtensor<[32,448,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %3951 = torch.aten.dequantize.self %3950 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1640 = torch.constant.int 12 | |
| %3952 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3953 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3954 = torch.aten.quantize_per_tensor %250, %3952, %3953, %int12_1640 : !torch.vtensor<[512,448,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %3955 = torch.aten.int_repr %3954 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],si8> | |
| %3956 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %3957 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3958 = torch.aten._make_per_tensor_quantized_tensor %3955, %3956, %3957 : !torch.vtensor<[512,448,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %3959 = torch.aten.dequantize.self %3958 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],f32> | |
| %int12_1641 = torch.constant.int 12 | |
| %3960 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3961 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3962 = torch.aten.quantize_per_tensor %251, %3960, %3961, %int12_1641 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %3963 = torch.aten.int_repr %3962 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8> | |
| %3964 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3965 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3966 = torch.aten._make_per_tensor_quantized_tensor %3963, %3964, %3965 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %3967 = torch.aten.dequantize.self %3966 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32> | |
| %int0_1642 = torch.constant.int 0 | |
| %int1_1643 = torch.constant.int 1 | |
| %int1_1644 = torch.constant.int 1 | |
| %int1_1645 = torch.constant.int 1 | |
| %int1_1646 = torch.constant.int 1 | |
| %int1_1647 = torch.constant.int 1 | |
| %int0_1648 = torch.constant.int 0 | |
| %3968 = torch.prim.ListConstruct %int0_1642, %int1_1643 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3969 = torch.prim.ListConstruct %int1_1644, %int1_1645 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3970 = torch.prim.ListConstruct %int1_1646, %int1_1647 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3971 = torch.prim.ListConstruct %int0_1648, %int0_1648 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1649 = torch.constant.bool false | |
| %int1_1650 = torch.constant.int 1 | |
| %3972 = torch.aten.convolution %3951, %3959, %3967, %3970, %3968, %3969, %false_1649, %3971, %int1_1650 : !torch.vtensor<[32,448,5,5],f32>, !torch.vtensor<[512,448,1,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %3973 = torch.aten.relu %3972 : !torch.vtensor<[32,512,5,5],f32> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1651 = torch.constant.int 12 | |
| %3974 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3975 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3976 = torch.aten.quantize_per_tensor %3973, %3974, %3975, %int12_1651 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %3977 = torch.aten.int_repr %3976 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %3978 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3979 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3980 = torch.aten._make_per_tensor_quantized_tensor %3977, %3978, %3979 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %3981 = torch.aten.dequantize.self %3980 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1652 = torch.constant.int 12 | |
| %3982 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3983 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3984 = torch.aten.quantize_per_tensor %252, %3982, %3983, %int12_1652 : !torch.vtensor<[256,512,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %3985 = torch.aten.int_repr %3984 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],si8> | |
| %3986 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %3987 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3988 = torch.aten._make_per_tensor_quantized_tensor %3985, %3986, %3987 : !torch.vtensor<[256,512,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %3989 = torch.aten.dequantize.self %3988 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],f32> | |
| %int12_1653 = torch.constant.int 12 | |
| %3990 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3991 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3992 = torch.aten.quantize_per_tensor %253, %3990, %3991, %int12_1653 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3993 = torch.aten.int_repr %3992 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %3994 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %3995 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %3996 = torch.aten._make_per_tensor_quantized_tensor %3993, %3994, %3995 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %3997 = torch.aten.dequantize.self %3996 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1654 = torch.constant.int 0 | |
| %int1_1655 = torch.constant.int 1 | |
| %int1_1656 = torch.constant.int 1 | |
| %int1_1657 = torch.constant.int 1 | |
| %int1_1658 = torch.constant.int 1 | |
| %int1_1659 = torch.constant.int 1 | |
| %int0_1660 = torch.constant.int 0 | |
| %3998 = torch.prim.ListConstruct %int0_1654, %int1_1655 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %3999 = torch.prim.ListConstruct %int1_1656, %int1_1657 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4000 = torch.prim.ListConstruct %int1_1658, %int1_1659 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4001 = torch.prim.ListConstruct %int0_1660, %int0_1660 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1661 = torch.constant.bool false | |
| %int1_1662 = torch.constant.int 1 | |
| %4002 = torch.aten.convolution %3981, %3989, %3997, %4000, %3998, %3999, %false_1661, %4001, %int1_1662 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4003 = torch.aten.relu %4002 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1663 = torch.constant.int 12 | |
| %4004 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4005 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4006 = torch.aten.quantize_per_tensor %254, %4004, %4005, %int12_1663 : !torch.vtensor<[256,512,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4007 = torch.aten.int_repr %4006 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],si8> | |
| %4008 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4009 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4010 = torch.aten._make_per_tensor_quantized_tensor %4007, %4008, %4009 : !torch.vtensor<[256,512,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4011 = torch.aten.dequantize.self %4010 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],f32> | |
| %int12_1664 = torch.constant.int 12 | |
| %4012 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4013 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4014 = torch.aten.quantize_per_tensor %255, %4012, %4013, %int12_1664 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4015 = torch.aten.int_repr %4014 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4016 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4017 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4018 = torch.aten._make_per_tensor_quantized_tensor %4015, %4016, %4017 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4019 = torch.aten.dequantize.self %4018 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1665 = torch.constant.int 1 | |
| %int0_1666 = torch.constant.int 0 | |
| %int1_1667 = torch.constant.int 1 | |
| %int1_1668 = torch.constant.int 1 | |
| %int1_1669 = torch.constant.int 1 | |
| %int1_1670 = torch.constant.int 1 | |
| %int0_1671 = torch.constant.int 0 | |
| %4020 = torch.prim.ListConstruct %int1_1665, %int0_1666 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4021 = torch.prim.ListConstruct %int1_1667, %int1_1668 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4022 = torch.prim.ListConstruct %int1_1669, %int1_1670 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4023 = torch.prim.ListConstruct %int0_1671, %int0_1671 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1672 = torch.constant.bool false | |
| %int1_1673 = torch.constant.int 1 | |
| %4024 = torch.aten.convolution %3981, %4011, %4019, %4022, %4020, %4021, %false_1672, %4023, %int1_1673 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4025 = torch.aten.relu %4024 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4026 = torch.prim.ListConstruct %4003, %4025 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1674 = torch.constant.int 1 | |
| %4027 = torch.aten.cat %4026, %int1_1674 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1675 = torch.constant.int 12 | |
| %4028 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4029 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4030 = torch.aten.quantize_per_tensor %4027, %4028, %4029, %int12_1675 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4031 = torch.aten.int_repr %4030 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %4032 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4033 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4034 = torch.aten._make_per_tensor_quantized_tensor %4031, %4032, %4033 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4035 = torch.aten.dequantize.self %4034 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int3_1676 = torch.constant.int 3 | |
| %int3_1677 = torch.constant.int 3 | |
| %int1_1678 = torch.constant.int 1 | |
| %int1_1679 = torch.constant.int 1 | |
| %int1_1680 = torch.constant.int 1 | |
| %int1_1681 = torch.constant.int 1 | |
| %int1_1682 = torch.constant.int 1 | |
| %int1_1683 = torch.constant.int 1 | |
| %4036 = torch.prim.ListConstruct %int3_1676, %int3_1677 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4037 = torch.prim.ListConstruct %int1_1678, %int1_1679, %int1_1680, %int1_1681 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %4038 = torch.prim.ListConstruct %int1_1682, %int1_1683 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1684 = torch.constant.bool false | |
| %false_1685 = torch.constant.bool false | |
| %none_1686 = torch.constant.none | |
| %4039 = torch.aten.avg_pool2d %3795, %4036, %4038, %4037, %false_1684, %false_1685, %none_1686 : !torch.vtensor<[32,1536,5,5],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1536,5,5],f32> | |
| %4040 = torch.aten.mul.Tensor %4039, %308 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1687 = torch.constant.int 12 | |
| %4041 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4042 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4043 = torch.aten.quantize_per_tensor %4040, %4041, %4042, %int12_1687 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4044 = torch.aten.int_repr %4043 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4045 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4046 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4047 = torch.aten._make_per_tensor_quantized_tensor %4044, %4045, %4046 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4048 = torch.aten.dequantize.self %4047 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1688 = torch.constant.int 12 | |
| %4049 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4050 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4051 = torch.aten.quantize_per_tensor %256, %4049, %4050, %int12_1688 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4052 = torch.aten.int_repr %4051 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %4053 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4054 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4055 = torch.aten._make_per_tensor_quantized_tensor %4052, %4053, %4054 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4056 = torch.aten.dequantize.self %4055 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1689 = torch.constant.int 12 | |
| %4057 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4058 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4059 = torch.aten.quantize_per_tensor %257, %4057, %4058, %int12_1689 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4060 = torch.aten.int_repr %4059 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4061 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4062 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4063 = torch.aten._make_per_tensor_quantized_tensor %4060, %4061, %4062 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4064 = torch.aten.dequantize.self %4063 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1690 = torch.constant.int 0 | |
| %int0_1691 = torch.constant.int 0 | |
| %int1_1692 = torch.constant.int 1 | |
| %int1_1693 = torch.constant.int 1 | |
| %int1_1694 = torch.constant.int 1 | |
| %int1_1695 = torch.constant.int 1 | |
| %int0_1696 = torch.constant.int 0 | |
| %4065 = torch.prim.ListConstruct %int0_1690, %int0_1691 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4066 = torch.prim.ListConstruct %int1_1692, %int1_1693 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4067 = torch.prim.ListConstruct %int1_1694, %int1_1695 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4068 = torch.prim.ListConstruct %int0_1696, %int0_1696 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1697 = torch.constant.bool false | |
| %int1_1698 = torch.constant.int 1 | |
| %4069 = torch.aten.convolution %4048, %4056, %4064, %4067, %4065, %4066, %false_1697, %4068, %int1_1698 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4070 = torch.aten.relu %4069 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4071 = torch.prim.ListConstruct %3817, %3869, %3891, %4035, %4070 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1699 = torch.constant.int 1 | |
| %4072 = torch.aten.cat %4071, %int1_1699 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1700 = torch.constant.int 12 | |
| %4073 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4074 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4075 = torch.aten.quantize_per_tensor %4072, %4073, %4074, %int12_1700 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4076 = torch.aten.int_repr %4075 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4077 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4078 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4079 = torch.aten._make_per_tensor_quantized_tensor %4076, %4077, %4078 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4080 = torch.aten.dequantize.self %4079 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1701 = torch.constant.int 12 | |
| %4081 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4082 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4083 = torch.aten.quantize_per_tensor %258, %4081, %4082, %int12_1701 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4084 = torch.aten.int_repr %4083 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %4085 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4086 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4087 = torch.aten._make_per_tensor_quantized_tensor %4084, %4085, %4086 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4088 = torch.aten.dequantize.self %4087 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1702 = torch.constant.int 12 | |
| %4089 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4090 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4091 = torch.aten.quantize_per_tensor %259, %4089, %4090, %int12_1702 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4092 = torch.aten.int_repr %4091 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4093 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4094 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4095 = torch.aten._make_per_tensor_quantized_tensor %4092, %4093, %4094 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4096 = torch.aten.dequantize.self %4095 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1703 = torch.constant.int 0 | |
| %int0_1704 = torch.constant.int 0 | |
| %int1_1705 = torch.constant.int 1 | |
| %int1_1706 = torch.constant.int 1 | |
| %int1_1707 = torch.constant.int 1 | |
| %int1_1708 = torch.constant.int 1 | |
| %int0_1709 = torch.constant.int 0 | |
| %4097 = torch.prim.ListConstruct %int0_1703, %int0_1704 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4098 = torch.prim.ListConstruct %int1_1705, %int1_1706 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4099 = torch.prim.ListConstruct %int1_1707, %int1_1708 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4100 = torch.prim.ListConstruct %int0_1709, %int0_1709 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1710 = torch.constant.bool false | |
| %int1_1711 = torch.constant.int 1 | |
| %4101 = torch.aten.convolution %4080, %4088, %4096, %4099, %4097, %4098, %false_1710, %4100, %int1_1711 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4102 = torch.aten.relu %4101 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1712 = torch.constant.int 12 | |
| %4103 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4104 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4105 = torch.aten.quantize_per_tensor %260, %4103, %4104, %int12_1712 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4106 = torch.aten.int_repr %4105 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %4107 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4108 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4109 = torch.aten._make_per_tensor_quantized_tensor %4106, %4107, %4108 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4110 = torch.aten.dequantize.self %4109 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1713 = torch.constant.int 12 | |
| %4111 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4112 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4113 = torch.aten.quantize_per_tensor %261, %4111, %4112, %int12_1713 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4114 = torch.aten.int_repr %4113 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %4115 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4116 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4117 = torch.aten._make_per_tensor_quantized_tensor %4114, %4115, %4116 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4118 = torch.aten.dequantize.self %4117 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1714 = torch.constant.int 0 | |
| %int0_1715 = torch.constant.int 0 | |
| %int1_1716 = torch.constant.int 1 | |
| %int1_1717 = torch.constant.int 1 | |
| %int1_1718 = torch.constant.int 1 | |
| %int1_1719 = torch.constant.int 1 | |
| %int0_1720 = torch.constant.int 0 | |
| %4119 = torch.prim.ListConstruct %int0_1714, %int0_1715 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4120 = torch.prim.ListConstruct %int1_1716, %int1_1717 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4121 = torch.prim.ListConstruct %int1_1718, %int1_1719 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4122 = torch.prim.ListConstruct %int0_1720, %int0_1720 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1721 = torch.constant.bool false | |
| %int1_1722 = torch.constant.int 1 | |
| %4123 = torch.aten.convolution %4080, %4110, %4118, %4121, %4119, %4120, %false_1721, %4122, %int1_1722 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %4124 = torch.aten.relu %4123 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1723 = torch.constant.int 12 | |
| %4125 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4126 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4127 = torch.aten.quantize_per_tensor %4124, %4125, %4126, %int12_1723 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4128 = torch.aten.int_repr %4127 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %4129 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4130 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4131 = torch.aten._make_per_tensor_quantized_tensor %4128, %4129, %4130 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4132 = torch.aten.dequantize.self %4131 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1724 = torch.constant.int 12 | |
| %4133 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4134 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4135 = torch.aten.quantize_per_tensor %262, %4133, %4134, %int12_1724 : !torch.vtensor<[256,384,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %4136 = torch.aten.int_repr %4135 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],si8> | |
| %4137 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4138 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4139 = torch.aten._make_per_tensor_quantized_tensor %4136, %4137, %4138 : !torch.vtensor<[256,384,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %4140 = torch.aten.dequantize.self %4139 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],f32> | |
| %int12_1725 = torch.constant.int 12 | |
| %4141 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4142 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4143 = torch.aten.quantize_per_tensor %263, %4141, %4142, %int12_1725 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4144 = torch.aten.int_repr %4143 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4145 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4146 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4147 = torch.aten._make_per_tensor_quantized_tensor %4144, %4145, %4146 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4148 = torch.aten.dequantize.self %4147 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1726 = torch.constant.int 0 | |
| %int1_1727 = torch.constant.int 1 | |
| %int1_1728 = torch.constant.int 1 | |
| %int1_1729 = torch.constant.int 1 | |
| %int1_1730 = torch.constant.int 1 | |
| %int1_1731 = torch.constant.int 1 | |
| %int0_1732 = torch.constant.int 0 | |
| %4149 = torch.prim.ListConstruct %int0_1726, %int1_1727 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4150 = torch.prim.ListConstruct %int1_1728, %int1_1729 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4151 = torch.prim.ListConstruct %int1_1730, %int1_1731 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4152 = torch.prim.ListConstruct %int0_1732, %int0_1732 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1733 = torch.constant.bool false | |
| %int1_1734 = torch.constant.int 1 | |
| %4153 = torch.aten.convolution %4132, %4140, %4148, %4151, %4149, %4150, %false_1733, %4152, %int1_1734 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4154 = torch.aten.relu %4153 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1735 = torch.constant.int 12 | |
| %4155 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4156 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4157 = torch.aten.quantize_per_tensor %264, %4155, %4156, %int12_1735 : !torch.vtensor<[256,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %4158 = torch.aten.int_repr %4157 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],si8> | |
| %4159 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4160 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4161 = torch.aten._make_per_tensor_quantized_tensor %4158, %4159, %4160 : !torch.vtensor<[256,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %4162 = torch.aten.dequantize.self %4161 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],f32> | |
| %int12_1736 = torch.constant.int 12 | |
| %4163 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4164 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4165 = torch.aten.quantize_per_tensor %265, %4163, %4164, %int12_1736 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4166 = torch.aten.int_repr %4165 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4167 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4168 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4169 = torch.aten._make_per_tensor_quantized_tensor %4166, %4167, %4168 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4170 = torch.aten.dequantize.self %4169 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1737 = torch.constant.int 1 | |
| %int0_1738 = torch.constant.int 0 | |
| %int1_1739 = torch.constant.int 1 | |
| %int1_1740 = torch.constant.int 1 | |
| %int1_1741 = torch.constant.int 1 | |
| %int1_1742 = torch.constant.int 1 | |
| %int0_1743 = torch.constant.int 0 | |
| %4171 = torch.prim.ListConstruct %int1_1737, %int0_1738 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4172 = torch.prim.ListConstruct %int1_1739, %int1_1740 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4173 = torch.prim.ListConstruct %int1_1741, %int1_1742 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4174 = torch.prim.ListConstruct %int0_1743, %int0_1743 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1744 = torch.constant.bool false | |
| %int1_1745 = torch.constant.int 1 | |
| %4175 = torch.aten.convolution %4132, %4162, %4170, %4173, %4171, %4172, %false_1744, %4174, %int1_1745 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4176 = torch.aten.relu %4175 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1746 = torch.constant.int 12 | |
| %4177 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4178 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4179 = torch.aten.quantize_per_tensor %266, %4177, %4178, %int12_1746 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4180 = torch.aten.int_repr %4179 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %4181 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4182 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4183 = torch.aten._make_per_tensor_quantized_tensor %4180, %4181, %4182 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4184 = torch.aten.dequantize.self %4183 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1747 = torch.constant.int 12 | |
| %4185 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4186 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4187 = torch.aten.quantize_per_tensor %267, %4185, %4186, %int12_1747 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4188 = torch.aten.int_repr %4187 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %4189 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4190 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4191 = torch.aten._make_per_tensor_quantized_tensor %4188, %4189, %4190 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4192 = torch.aten.dequantize.self %4191 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1748 = torch.constant.int 0 | |
| %int0_1749 = torch.constant.int 0 | |
| %int1_1750 = torch.constant.int 1 | |
| %int1_1751 = torch.constant.int 1 | |
| %int1_1752 = torch.constant.int 1 | |
| %int1_1753 = torch.constant.int 1 | |
| %int0_1754 = torch.constant.int 0 | |
| %4193 = torch.prim.ListConstruct %int0_1748, %int0_1749 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4194 = torch.prim.ListConstruct %int1_1750, %int1_1751 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4195 = torch.prim.ListConstruct %int1_1752, %int1_1753 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4196 = torch.prim.ListConstruct %int0_1754, %int0_1754 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1755 = torch.constant.bool false | |
| %int1_1756 = torch.constant.int 1 | |
| %4197 = torch.aten.convolution %4080, %4184, %4192, %4195, %4193, %4194, %false_1755, %4196, %int1_1756 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %4198 = torch.aten.relu %4197 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1757 = torch.constant.int 12 | |
| %4199 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4200 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4201 = torch.aten.quantize_per_tensor %4198, %4199, %4200, %int12_1757 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4202 = torch.aten.int_repr %4201 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %4203 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4204 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4205 = torch.aten._make_per_tensor_quantized_tensor %4202, %4203, %4204 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4206 = torch.aten.dequantize.self %4205 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1758 = torch.constant.int 12 | |
| %4207 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4208 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4209 = torch.aten.quantize_per_tensor %268, %4207, %4208, %int12_1758 : !torch.vtensor<[448,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %4210 = torch.aten.int_repr %4209 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],si8> | |
| %4211 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4212 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4213 = torch.aten._make_per_tensor_quantized_tensor %4210, %4211, %4212 : !torch.vtensor<[448,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %4214 = torch.aten.dequantize.self %4213 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],f32> | |
| %int12_1759 = torch.constant.int 12 | |
| %4215 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4216 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4217 = torch.aten.quantize_per_tensor %269, %4215, %4216, %int12_1759 : !torch.vtensor<[448],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %4218 = torch.aten.int_repr %4217 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],si8> | |
| %4219 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4220 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4221 = torch.aten._make_per_tensor_quantized_tensor %4218, %4219, %4220 : !torch.vtensor<[448],si8>, !torch.float, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %4222 = torch.aten.dequantize.self %4221 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],f32> | |
| %int1_1760 = torch.constant.int 1 | |
| %int0_1761 = torch.constant.int 0 | |
| %int1_1762 = torch.constant.int 1 | |
| %int1_1763 = torch.constant.int 1 | |
| %int1_1764 = torch.constant.int 1 | |
| %int1_1765 = torch.constant.int 1 | |
| %int0_1766 = torch.constant.int 0 | |
| %4223 = torch.prim.ListConstruct %int1_1760, %int0_1761 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4224 = torch.prim.ListConstruct %int1_1762, %int1_1763 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4225 = torch.prim.ListConstruct %int1_1764, %int1_1765 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4226 = torch.prim.ListConstruct %int0_1766, %int0_1766 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1767 = torch.constant.bool false | |
| %int1_1768 = torch.constant.int 1 | |
| %4227 = torch.aten.convolution %4206, %4214, %4222, %4225, %4223, %4224, %false_1767, %4226, %int1_1768 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[448,384,3,1],f32>, !torch.vtensor<[448],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,448,5,5],f32> | |
| %4228 = torch.aten.relu %4227 : !torch.vtensor<[32,448,5,5],f32> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1769 = torch.constant.int 12 | |
| %4229 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4230 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4231 = torch.aten.quantize_per_tensor %4228, %4229, %4230, %int12_1769 : !torch.vtensor<[32,448,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %4232 = torch.aten.int_repr %4231 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],si8> | |
| %4233 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4234 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4235 = torch.aten._make_per_tensor_quantized_tensor %4232, %4233, %4234 : !torch.vtensor<[32,448,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %4236 = torch.aten.dequantize.self %4235 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1770 = torch.constant.int 12 | |
| %4237 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4238 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4239 = torch.aten.quantize_per_tensor %270, %4237, %4238, %int12_1770 : !torch.vtensor<[512,448,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %4240 = torch.aten.int_repr %4239 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],si8> | |
| %4241 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4242 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4243 = torch.aten._make_per_tensor_quantized_tensor %4240, %4241, %4242 : !torch.vtensor<[512,448,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %4244 = torch.aten.dequantize.self %4243 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],f32> | |
| %int12_1771 = torch.constant.int 12 | |
| %4245 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4246 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4247 = torch.aten.quantize_per_tensor %271, %4245, %4246, %int12_1771 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %4248 = torch.aten.int_repr %4247 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8> | |
| %4249 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4250 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4251 = torch.aten._make_per_tensor_quantized_tensor %4248, %4249, %4250 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %4252 = torch.aten.dequantize.self %4251 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32> | |
| %int0_1772 = torch.constant.int 0 | |
| %int1_1773 = torch.constant.int 1 | |
| %int1_1774 = torch.constant.int 1 | |
| %int1_1775 = torch.constant.int 1 | |
| %int1_1776 = torch.constant.int 1 | |
| %int1_1777 = torch.constant.int 1 | |
| %int0_1778 = torch.constant.int 0 | |
| %4253 = torch.prim.ListConstruct %int0_1772, %int1_1773 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4254 = torch.prim.ListConstruct %int1_1774, %int1_1775 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4255 = torch.prim.ListConstruct %int1_1776, %int1_1777 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4256 = torch.prim.ListConstruct %int0_1778, %int0_1778 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1779 = torch.constant.bool false | |
| %int1_1780 = torch.constant.int 1 | |
| %4257 = torch.aten.convolution %4236, %4244, %4252, %4255, %4253, %4254, %false_1779, %4256, %int1_1780 : !torch.vtensor<[32,448,5,5],f32>, !torch.vtensor<[512,448,1,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %4258 = torch.aten.relu %4257 : !torch.vtensor<[32,512,5,5],f32> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1781 = torch.constant.int 12 | |
| %4259 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4260 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4261 = torch.aten.quantize_per_tensor %4258, %4259, %4260, %int12_1781 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4262 = torch.aten.int_repr %4261 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %4263 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4264 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4265 = torch.aten._make_per_tensor_quantized_tensor %4262, %4263, %4264 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4266 = torch.aten.dequantize.self %4265 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1782 = torch.constant.int 12 | |
| %4267 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4268 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4269 = torch.aten.quantize_per_tensor %272, %4267, %4268, %int12_1782 : !torch.vtensor<[256,512,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %4270 = torch.aten.int_repr %4269 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],si8> | |
| %4271 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4272 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4273 = torch.aten._make_per_tensor_quantized_tensor %4270, %4271, %4272 : !torch.vtensor<[256,512,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %4274 = torch.aten.dequantize.self %4273 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],f32> | |
| %int12_1783 = torch.constant.int 12 | |
| %4275 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4276 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4277 = torch.aten.quantize_per_tensor %273, %4275, %4276, %int12_1783 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4278 = torch.aten.int_repr %4277 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4279 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4280 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4281 = torch.aten._make_per_tensor_quantized_tensor %4278, %4279, %4280 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4282 = torch.aten.dequantize.self %4281 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1784 = torch.constant.int 0 | |
| %int1_1785 = torch.constant.int 1 | |
| %int1_1786 = torch.constant.int 1 | |
| %int1_1787 = torch.constant.int 1 | |
| %int1_1788 = torch.constant.int 1 | |
| %int1_1789 = torch.constant.int 1 | |
| %int0_1790 = torch.constant.int 0 | |
| %4283 = torch.prim.ListConstruct %int0_1784, %int1_1785 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4284 = torch.prim.ListConstruct %int1_1786, %int1_1787 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4285 = torch.prim.ListConstruct %int1_1788, %int1_1789 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4286 = torch.prim.ListConstruct %int0_1790, %int0_1790 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1791 = torch.constant.bool false | |
| %int1_1792 = torch.constant.int 1 | |
| %4287 = torch.aten.convolution %4266, %4274, %4282, %4285, %4283, %4284, %false_1791, %4286, %int1_1792 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4288 = torch.aten.relu %4287 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1793 = torch.constant.int 12 | |
| %4289 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4290 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4291 = torch.aten.quantize_per_tensor %274, %4289, %4290, %int12_1793 : !torch.vtensor<[256,512,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4292 = torch.aten.int_repr %4291 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],si8> | |
| %4293 = torch.aten.item %307 : !torch.vtensor<[],f32> -> !torch.float | |
| %4294 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4295 = torch.aten._make_per_tensor_quantized_tensor %4292, %4293, %4294 : !torch.vtensor<[256,512,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4296 = torch.aten.dequantize.self %4295 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],f32> | |
| %int12_1794 = torch.constant.int 12 | |
| %4297 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4298 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4299 = torch.aten.quantize_per_tensor %275, %4297, %4298, %int12_1794 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4300 = torch.aten.int_repr %4299 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4301 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4302 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4303 = torch.aten._make_per_tensor_quantized_tensor %4300, %4301, %4302 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4304 = torch.aten.dequantize.self %4303 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1795 = torch.constant.int 1 | |
| %int0_1796 = torch.constant.int 0 | |
| %int1_1797 = torch.constant.int 1 | |
| %int1_1798 = torch.constant.int 1 | |
| %int1_1799 = torch.constant.int 1 | |
| %int1_1800 = torch.constant.int 1 | |
| %int0_1801 = torch.constant.int 0 | |
| %4305 = torch.prim.ListConstruct %int1_1795, %int0_1796 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4306 = torch.prim.ListConstruct %int1_1797, %int1_1798 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4307 = torch.prim.ListConstruct %int1_1799, %int1_1800 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4308 = torch.prim.ListConstruct %int0_1801, %int0_1801 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1802 = torch.constant.bool false | |
| %int1_1803 = torch.constant.int 1 | |
| %4309 = torch.aten.convolution %4266, %4296, %4304, %4307, %4305, %4306, %false_1802, %4308, %int1_1803 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4310 = torch.aten.relu %4309 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4311 = torch.prim.ListConstruct %4288, %4310 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1804 = torch.constant.int 1 | |
| %4312 = torch.aten.cat %4311, %int1_1804 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1805 = torch.constant.int 12 | |
| %4313 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4314 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4315 = torch.aten.quantize_per_tensor %4312, %4313, %4314, %int12_1805 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4316 = torch.aten.int_repr %4315 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %4317 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4318 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4319 = torch.aten._make_per_tensor_quantized_tensor %4316, %4317, %4318 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4320 = torch.aten.dequantize.self %4319 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int3_1806 = torch.constant.int 3 | |
| %int3_1807 = torch.constant.int 3 | |
| %int1_1808 = torch.constant.int 1 | |
| %int1_1809 = torch.constant.int 1 | |
| %int1_1810 = torch.constant.int 1 | |
| %int1_1811 = torch.constant.int 1 | |
| %int1_1812 = torch.constant.int 1 | |
| %int1_1813 = torch.constant.int 1 | |
| %4321 = torch.prim.ListConstruct %int3_1806, %int3_1807 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4322 = torch.prim.ListConstruct %int1_1808, %int1_1809, %int1_1810, %int1_1811 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %4323 = torch.prim.ListConstruct %int1_1812, %int1_1813 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1814 = torch.constant.bool false | |
| %false_1815 = torch.constant.bool false | |
| %none_1816 = torch.constant.none | |
| %4324 = torch.aten.avg_pool2d %4080, %4321, %4323, %4322, %false_1814, %false_1815, %none_1816 : !torch.vtensor<[32,1536,5,5],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1536,5,5],f32> | |
| %4325 = torch.aten.mul.Tensor %4324, %308 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1817 = torch.constant.int 12 | |
| %4326 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4327 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4328 = torch.aten.quantize_per_tensor %4325, %4326, %4327, %int12_1817 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4329 = torch.aten.int_repr %4328 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4330 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4331 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4332 = torch.aten._make_per_tensor_quantized_tensor %4329, %4330, %4331 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4333 = torch.aten.dequantize.self %4332 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1818 = torch.constant.int 12 | |
| %4334 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4335 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4336 = torch.aten.quantize_per_tensor %276, %4334, %4335, %int12_1818 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4337 = torch.aten.int_repr %4336 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %4338 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4339 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4340 = torch.aten._make_per_tensor_quantized_tensor %4337, %4338, %4339 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4341 = torch.aten.dequantize.self %4340 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1819 = torch.constant.int 12 | |
| %4342 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4343 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4344 = torch.aten.quantize_per_tensor %277, %4342, %4343, %int12_1819 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4345 = torch.aten.int_repr %4344 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4346 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4347 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4348 = torch.aten._make_per_tensor_quantized_tensor %4345, %4346, %4347 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4349 = torch.aten.dequantize.self %4348 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1820 = torch.constant.int 0 | |
| %int0_1821 = torch.constant.int 0 | |
| %int1_1822 = torch.constant.int 1 | |
| %int1_1823 = torch.constant.int 1 | |
| %int1_1824 = torch.constant.int 1 | |
| %int1_1825 = torch.constant.int 1 | |
| %int0_1826 = torch.constant.int 0 | |
| %4350 = torch.prim.ListConstruct %int0_1820, %int0_1821 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4351 = torch.prim.ListConstruct %int1_1822, %int1_1823 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4352 = torch.prim.ListConstruct %int1_1824, %int1_1825 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4353 = torch.prim.ListConstruct %int0_1826, %int0_1826 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1827 = torch.constant.bool false | |
| %int1_1828 = torch.constant.int 1 | |
| %4354 = torch.aten.convolution %4333, %4341, %4349, %4352, %4350, %4351, %false_1827, %4353, %int1_1828 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4355 = torch.aten.relu %4354 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4356 = torch.prim.ListConstruct %4102, %4154, %4176, %4320, %4355 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1829 = torch.constant.int 1 | |
| %4357 = torch.aten.cat %4356, %int1_1829 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1830 = torch.constant.int 12 | |
| %4358 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4359 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4360 = torch.aten.quantize_per_tensor %4357, %4358, %4359, %int12_1830 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4361 = torch.aten.int_repr %4360 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4362 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4363 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4364 = torch.aten._make_per_tensor_quantized_tensor %4361, %4362, %4363 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4365 = torch.aten.dequantize.self %4364 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1831 = torch.constant.int 12 | |
| %4366 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4367 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4368 = torch.aten.quantize_per_tensor %278, %4366, %4367, %int12_1831 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4369 = torch.aten.int_repr %4368 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %4370 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4371 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4372 = torch.aten._make_per_tensor_quantized_tensor %4369, %4370, %4371 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4373 = torch.aten.dequantize.self %4372 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1832 = torch.constant.int 12 | |
| %4374 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4375 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4376 = torch.aten.quantize_per_tensor %279, %4374, %4375, %int12_1832 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4377 = torch.aten.int_repr %4376 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4378 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4379 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4380 = torch.aten._make_per_tensor_quantized_tensor %4377, %4378, %4379 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4381 = torch.aten.dequantize.self %4380 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1833 = torch.constant.int 0 | |
| %int0_1834 = torch.constant.int 0 | |
| %int1_1835 = torch.constant.int 1 | |
| %int1_1836 = torch.constant.int 1 | |
| %int1_1837 = torch.constant.int 1 | |
| %int1_1838 = torch.constant.int 1 | |
| %int0_1839 = torch.constant.int 0 | |
| %4382 = torch.prim.ListConstruct %int0_1833, %int0_1834 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4383 = torch.prim.ListConstruct %int1_1835, %int1_1836 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4384 = torch.prim.ListConstruct %int1_1837, %int1_1838 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4385 = torch.prim.ListConstruct %int0_1839, %int0_1839 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1840 = torch.constant.bool false | |
| %int1_1841 = torch.constant.int 1 | |
| %4386 = torch.aten.convolution %4365, %4373, %4381, %4384, %4382, %4383, %false_1840, %4385, %int1_1841 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4387 = torch.aten.relu %4386 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1842 = torch.constant.int 12 | |
| %4388 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4389 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4390 = torch.aten.quantize_per_tensor %280, %4388, %4389, %int12_1842 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4391 = torch.aten.int_repr %4390 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %4392 = torch.aten.item %304 : !torch.vtensor<[],f32> -> !torch.float | |
| %4393 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4394 = torch.aten._make_per_tensor_quantized_tensor %4391, %4392, %4393 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4395 = torch.aten.dequantize.self %4394 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1843 = torch.constant.int 12 | |
| %4396 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4397 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4398 = torch.aten.quantize_per_tensor %281, %4396, %4397, %int12_1843 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4399 = torch.aten.int_repr %4398 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %4400 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4401 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4402 = torch.aten._make_per_tensor_quantized_tensor %4399, %4400, %4401 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4403 = torch.aten.dequantize.self %4402 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1844 = torch.constant.int 0 | |
| %int0_1845 = torch.constant.int 0 | |
| %int1_1846 = torch.constant.int 1 | |
| %int1_1847 = torch.constant.int 1 | |
| %int1_1848 = torch.constant.int 1 | |
| %int1_1849 = torch.constant.int 1 | |
| %int0_1850 = torch.constant.int 0 | |
| %4404 = torch.prim.ListConstruct %int0_1844, %int0_1845 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4405 = torch.prim.ListConstruct %int1_1846, %int1_1847 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4406 = torch.prim.ListConstruct %int1_1848, %int1_1849 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4407 = torch.prim.ListConstruct %int0_1850, %int0_1850 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1851 = torch.constant.bool false | |
| %int1_1852 = torch.constant.int 1 | |
| %4408 = torch.aten.convolution %4365, %4395, %4403, %4406, %4404, %4405, %false_1851, %4407, %int1_1852 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %4409 = torch.aten.relu %4408 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1853 = torch.constant.int 12 | |
| %4410 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4411 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4412 = torch.aten.quantize_per_tensor %4409, %4410, %4411, %int12_1853 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4413 = torch.aten.int_repr %4412 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %4414 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4415 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4416 = torch.aten._make_per_tensor_quantized_tensor %4413, %4414, %4415 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4417 = torch.aten.dequantize.self %4416 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1854 = torch.constant.int 12 | |
| %4418 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4419 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4420 = torch.aten.quantize_per_tensor %282, %4418, %4419, %int12_1854 : !torch.vtensor<[256,384,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %4421 = torch.aten.int_repr %4420 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],si8> | |
| %4422 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4423 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4424 = torch.aten._make_per_tensor_quantized_tensor %4421, %4422, %4423 : !torch.vtensor<[256,384,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,1,3],!torch.qint8> | |
| %4425 = torch.aten.dequantize.self %4424 : !torch.vtensor<[256,384,1,3],!torch.qint8> -> !torch.vtensor<[256,384,1,3],f32> | |
| %int12_1855 = torch.constant.int 12 | |
| %4426 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4427 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4428 = torch.aten.quantize_per_tensor %283, %4426, %4427, %int12_1855 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4429 = torch.aten.int_repr %4428 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4430 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4431 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4432 = torch.aten._make_per_tensor_quantized_tensor %4429, %4430, %4431 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4433 = torch.aten.dequantize.self %4432 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1856 = torch.constant.int 0 | |
| %int1_1857 = torch.constant.int 1 | |
| %int1_1858 = torch.constant.int 1 | |
| %int1_1859 = torch.constant.int 1 | |
| %int1_1860 = torch.constant.int 1 | |
| %int1_1861 = torch.constant.int 1 | |
| %int0_1862 = torch.constant.int 0 | |
| %4434 = torch.prim.ListConstruct %int0_1856, %int1_1857 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4435 = torch.prim.ListConstruct %int1_1858, %int1_1859 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4436 = torch.prim.ListConstruct %int1_1860, %int1_1861 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4437 = torch.prim.ListConstruct %int0_1862, %int0_1862 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1863 = torch.constant.bool false | |
| %int1_1864 = torch.constant.int 1 | |
| %4438 = torch.aten.convolution %4417, %4425, %4433, %4436, %4434, %4435, %false_1863, %4437, %int1_1864 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4439 = torch.aten.relu %4438 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1865 = torch.constant.int 12 | |
| %4440 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4441 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4442 = torch.aten.quantize_per_tensor %284, %4440, %4441, %int12_1865 : !torch.vtensor<[256,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %4443 = torch.aten.int_repr %4442 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],si8> | |
| %4444 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4445 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4446 = torch.aten._make_per_tensor_quantized_tensor %4443, %4444, %4445 : !torch.vtensor<[256,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,384,3,1],!torch.qint8> | |
| %4447 = torch.aten.dequantize.self %4446 : !torch.vtensor<[256,384,3,1],!torch.qint8> -> !torch.vtensor<[256,384,3,1],f32> | |
| %int12_1866 = torch.constant.int 12 | |
| %4448 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4449 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4450 = torch.aten.quantize_per_tensor %285, %4448, %4449, %int12_1866 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4451 = torch.aten.int_repr %4450 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4452 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4453 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4454 = torch.aten._make_per_tensor_quantized_tensor %4451, %4452, %4453 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4455 = torch.aten.dequantize.self %4454 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1867 = torch.constant.int 1 | |
| %int0_1868 = torch.constant.int 0 | |
| %int1_1869 = torch.constant.int 1 | |
| %int1_1870 = torch.constant.int 1 | |
| %int1_1871 = torch.constant.int 1 | |
| %int1_1872 = torch.constant.int 1 | |
| %int0_1873 = torch.constant.int 0 | |
| %4456 = torch.prim.ListConstruct %int1_1867, %int0_1868 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4457 = torch.prim.ListConstruct %int1_1869, %int1_1870 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4458 = torch.prim.ListConstruct %int1_1871, %int1_1872 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4459 = torch.prim.ListConstruct %int0_1873, %int0_1873 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1874 = torch.constant.bool false | |
| %int1_1875 = torch.constant.int 1 | |
| %4460 = torch.aten.convolution %4417, %4447, %4455, %4458, %4456, %4457, %false_1874, %4459, %int1_1875 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[256,384,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4461 = torch.aten.relu %4460 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1876 = torch.constant.int 12 | |
| %4462 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4463 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4464 = torch.aten.quantize_per_tensor %286, %4462, %4463, %int12_1876 : !torch.vtensor<[384,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4465 = torch.aten.int_repr %4464 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],si8> | |
| %4466 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4467 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4468 = torch.aten._make_per_tensor_quantized_tensor %4465, %4466, %4467 : !torch.vtensor<[384,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[384,1536,1,1],!torch.qint8> | |
| %4469 = torch.aten.dequantize.self %4468 : !torch.vtensor<[384,1536,1,1],!torch.qint8> -> !torch.vtensor<[384,1536,1,1],f32> | |
| %int12_1877 = torch.constant.int 12 | |
| %4470 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4471 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4472 = torch.aten.quantize_per_tensor %287, %4470, %4471, %int12_1877 : !torch.vtensor<[384],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4473 = torch.aten.int_repr %4472 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],si8> | |
| %4474 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4475 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4476 = torch.aten._make_per_tensor_quantized_tensor %4473, %4474, %4475 : !torch.vtensor<[384],si8>, !torch.float, !torch.int -> !torch.vtensor<[384],!torch.qint8> | |
| %4477 = torch.aten.dequantize.self %4476 : !torch.vtensor<[384],!torch.qint8> -> !torch.vtensor<[384],f32> | |
| %int0_1878 = torch.constant.int 0 | |
| %int0_1879 = torch.constant.int 0 | |
| %int1_1880 = torch.constant.int 1 | |
| %int1_1881 = torch.constant.int 1 | |
| %int1_1882 = torch.constant.int 1 | |
| %int1_1883 = torch.constant.int 1 | |
| %int0_1884 = torch.constant.int 0 | |
| %4478 = torch.prim.ListConstruct %int0_1878, %int0_1879 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4479 = torch.prim.ListConstruct %int1_1880, %int1_1881 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4480 = torch.prim.ListConstruct %int1_1882, %int1_1883 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4481 = torch.prim.ListConstruct %int0_1884, %int0_1884 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1885 = torch.constant.bool false | |
| %int1_1886 = torch.constant.int 1 | |
| %4482 = torch.aten.convolution %4365, %4469, %4477, %4480, %4478, %4479, %false_1885, %4481, %int1_1886 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[384,1536,1,1],f32>, !torch.vtensor<[384],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,384,5,5],f32> | |
| %4483 = torch.aten.relu %4482 : !torch.vtensor<[32,384,5,5],f32> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1887 = torch.constant.int 12 | |
| %4484 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4485 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4486 = torch.aten.quantize_per_tensor %4483, %4484, %4485, %int12_1887 : !torch.vtensor<[32,384,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4487 = torch.aten.int_repr %4486 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],si8> | |
| %4488 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4489 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4490 = torch.aten._make_per_tensor_quantized_tensor %4487, %4488, %4489 : !torch.vtensor<[32,384,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,384,5,5],!torch.qint8> | |
| %4491 = torch.aten.dequantize.self %4490 : !torch.vtensor<[32,384,5,5],!torch.qint8> -> !torch.vtensor<[32,384,5,5],f32> | |
| %int12_1888 = torch.constant.int 12 | |
| %4492 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4493 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4494 = torch.aten.quantize_per_tensor %288, %4492, %4493, %int12_1888 : !torch.vtensor<[448,384,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %4495 = torch.aten.int_repr %4494 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],si8> | |
| %4496 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4497 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4498 = torch.aten._make_per_tensor_quantized_tensor %4495, %4496, %4497 : !torch.vtensor<[448,384,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[448,384,3,1],!torch.qint8> | |
| %4499 = torch.aten.dequantize.self %4498 : !torch.vtensor<[448,384,3,1],!torch.qint8> -> !torch.vtensor<[448,384,3,1],f32> | |
| %int12_1889 = torch.constant.int 12 | |
| %4500 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4501 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4502 = torch.aten.quantize_per_tensor %289, %4500, %4501, %int12_1889 : !torch.vtensor<[448],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %4503 = torch.aten.int_repr %4502 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],si8> | |
| %4504 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4505 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4506 = torch.aten._make_per_tensor_quantized_tensor %4503, %4504, %4505 : !torch.vtensor<[448],si8>, !torch.float, !torch.int -> !torch.vtensor<[448],!torch.qint8> | |
| %4507 = torch.aten.dequantize.self %4506 : !torch.vtensor<[448],!torch.qint8> -> !torch.vtensor<[448],f32> | |
| %int1_1890 = torch.constant.int 1 | |
| %int0_1891 = torch.constant.int 0 | |
| %int1_1892 = torch.constant.int 1 | |
| %int1_1893 = torch.constant.int 1 | |
| %int1_1894 = torch.constant.int 1 | |
| %int1_1895 = torch.constant.int 1 | |
| %int0_1896 = torch.constant.int 0 | |
| %4508 = torch.prim.ListConstruct %int1_1890, %int0_1891 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4509 = torch.prim.ListConstruct %int1_1892, %int1_1893 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4510 = torch.prim.ListConstruct %int1_1894, %int1_1895 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4511 = torch.prim.ListConstruct %int0_1896, %int0_1896 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1897 = torch.constant.bool false | |
| %int1_1898 = torch.constant.int 1 | |
| %4512 = torch.aten.convolution %4491, %4499, %4507, %4510, %4508, %4509, %false_1897, %4511, %int1_1898 : !torch.vtensor<[32,384,5,5],f32>, !torch.vtensor<[448,384,3,1],f32>, !torch.vtensor<[448],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,448,5,5],f32> | |
| %4513 = torch.aten.relu %4512 : !torch.vtensor<[32,448,5,5],f32> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1899 = torch.constant.int 12 | |
| %4514 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4515 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4516 = torch.aten.quantize_per_tensor %4513, %4514, %4515, %int12_1899 : !torch.vtensor<[32,448,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %4517 = torch.aten.int_repr %4516 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],si8> | |
| %4518 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4519 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4520 = torch.aten._make_per_tensor_quantized_tensor %4517, %4518, %4519 : !torch.vtensor<[32,448,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,448,5,5],!torch.qint8> | |
| %4521 = torch.aten.dequantize.self %4520 : !torch.vtensor<[32,448,5,5],!torch.qint8> -> !torch.vtensor<[32,448,5,5],f32> | |
| %int12_1900 = torch.constant.int 12 | |
| %4522 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4523 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4524 = torch.aten.quantize_per_tensor %290, %4522, %4523, %int12_1900 : !torch.vtensor<[512,448,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %4525 = torch.aten.int_repr %4524 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],si8> | |
| %4526 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4527 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4528 = torch.aten._make_per_tensor_quantized_tensor %4525, %4526, %4527 : !torch.vtensor<[512,448,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[512,448,1,3],!torch.qint8> | |
| %4529 = torch.aten.dequantize.self %4528 : !torch.vtensor<[512,448,1,3],!torch.qint8> -> !torch.vtensor<[512,448,1,3],f32> | |
| %int12_1901 = torch.constant.int 12 | |
| %4530 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4531 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4532 = torch.aten.quantize_per_tensor %291, %4530, %4531, %int12_1901 : !torch.vtensor<[512],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %4533 = torch.aten.int_repr %4532 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],si8> | |
| %4534 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4535 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4536 = torch.aten._make_per_tensor_quantized_tensor %4533, %4534, %4535 : !torch.vtensor<[512],si8>, !torch.float, !torch.int -> !torch.vtensor<[512],!torch.qint8> | |
| %4537 = torch.aten.dequantize.self %4536 : !torch.vtensor<[512],!torch.qint8> -> !torch.vtensor<[512],f32> | |
| %int0_1902 = torch.constant.int 0 | |
| %int1_1903 = torch.constant.int 1 | |
| %int1_1904 = torch.constant.int 1 | |
| %int1_1905 = torch.constant.int 1 | |
| %int1_1906 = torch.constant.int 1 | |
| %int1_1907 = torch.constant.int 1 | |
| %int0_1908 = torch.constant.int 0 | |
| %4538 = torch.prim.ListConstruct %int0_1902, %int1_1903 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4539 = torch.prim.ListConstruct %int1_1904, %int1_1905 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4540 = torch.prim.ListConstruct %int1_1906, %int1_1907 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4541 = torch.prim.ListConstruct %int0_1908, %int0_1908 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1909 = torch.constant.bool false | |
| %int1_1910 = torch.constant.int 1 | |
| %4542 = torch.aten.convolution %4521, %4529, %4537, %4540, %4538, %4539, %false_1909, %4541, %int1_1910 : !torch.vtensor<[32,448,5,5],f32>, !torch.vtensor<[512,448,1,3],f32>, !torch.vtensor<[512],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %4543 = torch.aten.relu %4542 : !torch.vtensor<[32,512,5,5],f32> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1911 = torch.constant.int 12 | |
| %4544 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4545 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4546 = torch.aten.quantize_per_tensor %4543, %4544, %4545, %int12_1911 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4547 = torch.aten.int_repr %4546 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %4548 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4549 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4550 = torch.aten._make_per_tensor_quantized_tensor %4547, %4548, %4549 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4551 = torch.aten.dequantize.self %4550 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1912 = torch.constant.int 12 | |
| %4552 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4553 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4554 = torch.aten.quantize_per_tensor %292, %4552, %4553, %int12_1912 : !torch.vtensor<[256,512,1,3],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %4555 = torch.aten.int_repr %4554 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],si8> | |
| %4556 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4557 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4558 = torch.aten._make_per_tensor_quantized_tensor %4555, %4556, %4557 : !torch.vtensor<[256,512,1,3],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,1,3],!torch.qint8> | |
| %4559 = torch.aten.dequantize.self %4558 : !torch.vtensor<[256,512,1,3],!torch.qint8> -> !torch.vtensor<[256,512,1,3],f32> | |
| %int12_1913 = torch.constant.int 12 | |
| %4560 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4561 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4562 = torch.aten.quantize_per_tensor %293, %4560, %4561, %int12_1913 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4563 = torch.aten.int_repr %4562 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4564 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4565 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4566 = torch.aten._make_per_tensor_quantized_tensor %4563, %4564, %4565 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4567 = torch.aten.dequantize.self %4566 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1914 = torch.constant.int 0 | |
| %int1_1915 = torch.constant.int 1 | |
| %int1_1916 = torch.constant.int 1 | |
| %int1_1917 = torch.constant.int 1 | |
| %int1_1918 = torch.constant.int 1 | |
| %int1_1919 = torch.constant.int 1 | |
| %int0_1920 = torch.constant.int 0 | |
| %4568 = torch.prim.ListConstruct %int0_1914, %int1_1915 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4569 = torch.prim.ListConstruct %int1_1916, %int1_1917 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4570 = torch.prim.ListConstruct %int1_1918, %int1_1919 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4571 = torch.prim.ListConstruct %int0_1920, %int0_1920 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1921 = torch.constant.bool false | |
| %int1_1922 = torch.constant.int 1 | |
| %4572 = torch.aten.convolution %4551, %4559, %4567, %4570, %4568, %4569, %false_1921, %4571, %int1_1922 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,1,3],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4573 = torch.aten.relu %4572 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %int12_1923 = torch.constant.int 12 | |
| %4574 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4575 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4576 = torch.aten.quantize_per_tensor %294, %4574, %4575, %int12_1923 : !torch.vtensor<[256,512,3,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4577 = torch.aten.int_repr %4576 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],si8> | |
| %4578 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4579 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4580 = torch.aten._make_per_tensor_quantized_tensor %4577, %4578, %4579 : !torch.vtensor<[256,512,3,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,512,3,1],!torch.qint8> | |
| %4581 = torch.aten.dequantize.self %4580 : !torch.vtensor<[256,512,3,1],!torch.qint8> -> !torch.vtensor<[256,512,3,1],f32> | |
| %int12_1924 = torch.constant.int 12 | |
| %4582 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4583 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4584 = torch.aten.quantize_per_tensor %295, %4582, %4583, %int12_1924 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4585 = torch.aten.int_repr %4584 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4586 = torch.aten.item %305 : !torch.vtensor<[],f32> -> !torch.float | |
| %4587 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4588 = torch.aten._make_per_tensor_quantized_tensor %4585, %4586, %4587 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4589 = torch.aten.dequantize.self %4588 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int1_1925 = torch.constant.int 1 | |
| %int0_1926 = torch.constant.int 0 | |
| %int1_1927 = torch.constant.int 1 | |
| %int1_1928 = torch.constant.int 1 | |
| %int1_1929 = torch.constant.int 1 | |
| %int1_1930 = torch.constant.int 1 | |
| %int0_1931 = torch.constant.int 0 | |
| %4590 = torch.prim.ListConstruct %int1_1925, %int0_1926 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4591 = torch.prim.ListConstruct %int1_1927, %int1_1928 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4592 = torch.prim.ListConstruct %int1_1929, %int1_1930 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4593 = torch.prim.ListConstruct %int0_1931, %int0_1931 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1932 = torch.constant.bool false | |
| %int1_1933 = torch.constant.int 1 | |
| %4594 = torch.aten.convolution %4551, %4581, %4589, %4592, %4590, %4591, %false_1932, %4593, %int1_1933 : !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[256,512,3,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4595 = torch.aten.relu %4594 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4596 = torch.prim.ListConstruct %4573, %4595 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1934 = torch.constant.int 1 | |
| %4597 = torch.aten.cat %4596, %int1_1934 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,512,5,5],f32> | |
| %int12_1935 = torch.constant.int 12 | |
| %4598 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4599 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4600 = torch.aten.quantize_per_tensor %4597, %4598, %4599, %int12_1935 : !torch.vtensor<[32,512,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4601 = torch.aten.int_repr %4600 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],si8> | |
| %4602 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4603 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4604 = torch.aten._make_per_tensor_quantized_tensor %4601, %4602, %4603 : !torch.vtensor<[32,512,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,512,5,5],!torch.qint8> | |
| %4605 = torch.aten.dequantize.self %4604 : !torch.vtensor<[32,512,5,5],!torch.qint8> -> !torch.vtensor<[32,512,5,5],f32> | |
| %int3_1936 = torch.constant.int 3 | |
| %int3_1937 = torch.constant.int 3 | |
| %int1_1938 = torch.constant.int 1 | |
| %int1_1939 = torch.constant.int 1 | |
| %int1_1940 = torch.constant.int 1 | |
| %int1_1941 = torch.constant.int 1 | |
| %int1_1942 = torch.constant.int 1 | |
| %int1_1943 = torch.constant.int 1 | |
| %4606 = torch.prim.ListConstruct %int3_1936, %int3_1937 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4607 = torch.prim.ListConstruct %int1_1938, %int1_1939, %int1_1940, %int1_1941 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %4608 = torch.prim.ListConstruct %int1_1942, %int1_1943 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1944 = torch.constant.bool false | |
| %false_1945 = torch.constant.bool false | |
| %none_1946 = torch.constant.none | |
| %4609 = torch.aten.avg_pool2d %4365, %4606, %4608, %4607, %false_1944, %false_1945, %none_1946 : !torch.vtensor<[32,1536,5,5],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1536,5,5],f32> | |
| %4610 = torch.aten.mul.Tensor %4609, %308 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1947 = torch.constant.int 12 | |
| %4611 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4612 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4613 = torch.aten.quantize_per_tensor %4610, %4611, %4612, %int12_1947 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4614 = torch.aten.int_repr %4613 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4615 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4616 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4617 = torch.aten._make_per_tensor_quantized_tensor %4614, %4615, %4616 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4618 = torch.aten.dequantize.self %4617 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1948 = torch.constant.int 12 | |
| %4619 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4620 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4621 = torch.aten.quantize_per_tensor %296, %4619, %4620, %int12_1948 : !torch.vtensor<[256,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4622 = torch.aten.int_repr %4621 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],si8> | |
| %4623 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4624 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4625 = torch.aten._make_per_tensor_quantized_tensor %4622, %4623, %4624 : !torch.vtensor<[256,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[256,1536,1,1],!torch.qint8> | |
| %4626 = torch.aten.dequantize.self %4625 : !torch.vtensor<[256,1536,1,1],!torch.qint8> -> !torch.vtensor<[256,1536,1,1],f32> | |
| %int12_1949 = torch.constant.int 12 | |
| %4627 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4628 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4629 = torch.aten.quantize_per_tensor %297, %4627, %4628, %int12_1949 : !torch.vtensor<[256],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4630 = torch.aten.int_repr %4629 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],si8> | |
| %4631 = torch.aten.item %302 : !torch.vtensor<[],f32> -> !torch.float | |
| %4632 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4633 = torch.aten._make_per_tensor_quantized_tensor %4630, %4631, %4632 : !torch.vtensor<[256],si8>, !torch.float, !torch.int -> !torch.vtensor<[256],!torch.qint8> | |
| %4634 = torch.aten.dequantize.self %4633 : !torch.vtensor<[256],!torch.qint8> -> !torch.vtensor<[256],f32> | |
| %int0_1950 = torch.constant.int 0 | |
| %int0_1951 = torch.constant.int 0 | |
| %int1_1952 = torch.constant.int 1 | |
| %int1_1953 = torch.constant.int 1 | |
| %int1_1954 = torch.constant.int 1 | |
| %int1_1955 = torch.constant.int 1 | |
| %int0_1956 = torch.constant.int 0 | |
| %4635 = torch.prim.ListConstruct %int0_1950, %int0_1951 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4636 = torch.prim.ListConstruct %int1_1952, %int1_1953 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4637 = torch.prim.ListConstruct %int1_1954, %int1_1955 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4638 = torch.prim.ListConstruct %int0_1956, %int0_1956 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1957 = torch.constant.bool false | |
| %int1_1958 = torch.constant.int 1 | |
| %4639 = torch.aten.convolution %4618, %4626, %4634, %4637, %4635, %4636, %false_1957, %4638, %int1_1958 : !torch.vtensor<[32,1536,5,5],f32>, !torch.vtensor<[256,1536,1,1],f32>, !torch.vtensor<[256],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[32,256,5,5],f32> | |
| %4640 = torch.aten.relu %4639 : !torch.vtensor<[32,256,5,5],f32> -> !torch.vtensor<[32,256,5,5],f32> | |
| %4641 = torch.prim.ListConstruct %4387, %4439, %4461, %4605, %4640 : (!torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>, !torch.vtensor<[32,512,5,5],f32>, !torch.vtensor<[32,256,5,5],f32>) -> !torch.list<vtensor> | |
| %int1_1959 = torch.constant.int 1 | |
| %4642 = torch.aten.cat %4641, %int1_1959 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int12_1960 = torch.constant.int 12 | |
| %4643 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4644 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4645 = torch.aten.quantize_per_tensor %4642, %4643, %4644, %int12_1960 : !torch.vtensor<[32,1536,5,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4646 = torch.aten.int_repr %4645 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],si8> | |
| %4647 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4648 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4649 = torch.aten._make_per_tensor_quantized_tensor %4646, %4647, %4648 : !torch.vtensor<[32,1536,5,5],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,5,5],!torch.qint8> | |
| %4650 = torch.aten.dequantize.self %4649 : !torch.vtensor<[32,1536,5,5],!torch.qint8> -> !torch.vtensor<[32,1536,5,5],f32> | |
| %int0_1961 = torch.constant.int 0 | |
| %int1_1962 = torch.constant.int 1 | |
| %int5 = torch.constant.int 5 | |
| %int5_1963 = torch.constant.int 5 | |
| %4651 = torch.prim.ListConstruct %int5, %int5_1963 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4652 = torch.prim.ListConstruct %int0_1961, %int0_1961 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %4653 = torch.prim.ListConstruct %int1_1962, %int1_1962 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %false_1964 = torch.constant.bool false | |
| %none_1965 = torch.constant.none | |
| %4654 = torch.aten.avg_pool2d %4650, %4651, %4653, %4652, %false_1964, %false_1964, %none_1965 : !torch.vtensor<[32,1536,5,5],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[32,1536,1,1],f32> | |
| %4655 = torch.aten.mul.Tensor %4654, %309 : !torch.vtensor<[32,1536,1,1],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[32,1536,1,1],f32> | |
| %int12_1966 = torch.constant.int 12 | |
| %4656 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4657 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4658 = torch.aten.quantize_per_tensor %4655, %4656, %4657, %int12_1966 : !torch.vtensor<[32,1536,1,1],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1536,1,1],!torch.qint8> | |
| %4659 = torch.aten.int_repr %4658 : !torch.vtensor<[32,1536,1,1],!torch.qint8> -> !torch.vtensor<[32,1536,1,1],si8> | |
| %4660 = torch.aten.item %306 : !torch.vtensor<[],f32> -> !torch.float | |
| %4661 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4662 = torch.aten._make_per_tensor_quantized_tensor %4659, %4660, %4661 : !torch.vtensor<[32,1536,1,1],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1536,1,1],!torch.qint8> | |
| %4663 = torch.aten.dequantize.self %4662 : !torch.vtensor<[32,1536,1,1],!torch.qint8> -> !torch.vtensor<[32,1536,1,1],f32> | |
| %int1_1967 = torch.constant.int 1 | |
| %int3_1968 = torch.constant.int 3 | |
| %4664 = torch.prims.collapse %4663, %int1_1967, %int3_1968 : !torch.vtensor<[32,1536,1,1],f32>, !torch.int, !torch.int -> !torch.vtensor<[32,1536],f32> | |
| %int0_1969 = torch.constant.int 0 | |
| %int0_1970 = torch.constant.int 0 | |
| %4665 = torch.prims.collapse %4664, %int0_1969, %int0_1970 : !torch.vtensor<[32,1536],f32>, !torch.int, !torch.int -> !torch.vtensor<[32,1536],f32> | |
| %int12_1971 = torch.constant.int 12 | |
| %4666 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float | |
| %4667 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4668 = torch.aten.quantize_per_tensor %298, %4666, %4667, %int12_1971 : !torch.vtensor<[1000,1536],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1000,1536],!torch.qint8> | |
| %4669 = torch.aten.int_repr %4668 : !torch.vtensor<[1000,1536],!torch.qint8> -> !torch.vtensor<[1000,1536],si8> | |
| %4670 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float | |
| %4671 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4672 = torch.aten._make_per_tensor_quantized_tensor %4669, %4670, %4671 : !torch.vtensor<[1000,1536],si8>, !torch.float, !torch.int -> !torch.vtensor<[1000,1536],!torch.qint8> | |
| %4673 = torch.aten.dequantize.self %4672 : !torch.vtensor<[1000,1536],!torch.qint8> -> !torch.vtensor<[1000,1536],f32> | |
| %int12_1972 = torch.constant.int 12 | |
| %4674 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float | |
| %4675 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4676 = torch.aten.quantize_per_tensor %299, %4674, %4675, %int12_1972 : !torch.vtensor<[1000],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[1000],!torch.qint8> | |
| %4677 = torch.aten.int_repr %4676 : !torch.vtensor<[1000],!torch.qint8> -> !torch.vtensor<[1000],si8> | |
| %4678 = torch.aten.item %310 : !torch.vtensor<[],f32> -> !torch.float | |
| %4679 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4680 = torch.aten._make_per_tensor_quantized_tensor %4677, %4678, %4679 : !torch.vtensor<[1000],si8>, !torch.float, !torch.int -> !torch.vtensor<[1000],!torch.qint8> | |
| %4681 = torch.aten.dequantize.self %4680 : !torch.vtensor<[1000],!torch.qint8> -> !torch.vtensor<[1000],f32> | |
| %int0_1973 = torch.constant.int 0 | |
| %int1_1974 = torch.constant.int 1 | |
| %4682 = torch.aten.transpose.int %4673, %int0_1973, %int1_1974 : !torch.vtensor<[1000,1536],f32>, !torch.int, !torch.int -> !torch.vtensor<[1536,1000],f32> | |
| %4683 = torch.aten.mm %4665, %4682 : !torch.vtensor<[32,1536],f32>, !torch.vtensor<[1536,1000],f32> -> !torch.vtensor<[32,1000],f32> | |
| %4684 = torch.aten.add.Tensor %4683, %4681, %int1_1974 : !torch.vtensor<[32,1000],f32>, !torch.vtensor<[1000],f32>, !torch.int -> !torch.vtensor<[32,1000],f32> | |
| %int12_1975 = torch.constant.int 12 | |
| %4685 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4686 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4687 = torch.aten.quantize_per_tensor %4684, %4685, %4686, %int12_1975 : !torch.vtensor<[32,1000],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[32,1000],!torch.qint8> | |
| %4688 = torch.aten.int_repr %4687 : !torch.vtensor<[32,1000],!torch.qint8> -> !torch.vtensor<[32,1000],si8> | |
| %4689 = torch.aten.item %300 : !torch.vtensor<[],f32> -> !torch.float | |
| %4690 = torch.aten.item %301 : !torch.vtensor<[],si8> -> !torch.int | |
| %4691 = torch.aten._make_per_tensor_quantized_tensor %4688, %4689, %4690 : !torch.vtensor<[32,1000],si8>, !torch.float, !torch.int -> !torch.vtensor<[32,1000],!torch.qint8> | |
| %4692 = torch.aten.dequantize.self %4691 : !torch.vtensor<[32,1000],!torch.qint8> -> !torch.vtensor<[32,1000],f32> | |
| return %4692 : !torch.vtensor<[32,1000],f32> | |
| } | |
| } | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment