Skip to content

Instantly share code, notes, and snippets.

@jerryzh168
Created July 22, 2021 21:31
Show Gist options
  • Save jerryzh168/dae515dd1c27d8219f002231b5e2b518 to your computer and use it in GitHub Desktop.
Save jerryzh168/dae515dd1c27d8219f002231b5e2b518 to your computer and use it in GitHub Desktop.
GraphModule(
(conv1): ConvReLU2d(
(0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
(1): ReLU()
)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Module(
(0): Module(
(conv1): ConvReLU2d(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu): ReLU(inplace=True)
)
(1): Module(
(conv1): ConvReLU2d(
(0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu): ReLU(inplace=True)
)
)
(layer2): Module(
(0): Module(
(conv1): ConvReLU2d(
(0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(downsample): Module(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2))
)
(relu): ReLU(inplace=True)
)
(1): Module(
(conv1): ConvReLU2d(
(0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu): ReLU(inplace=True)
)
)
(layer3): Module(
(0): Module(
(conv1): ConvReLU2d(
(0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(downsample): Module(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2))
)
(relu): ReLU(inplace=True)
)
(1): Module(
(conv1): ConvReLU2d(
(0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu): ReLU(inplace=True)
)
)
(layer4): Module(
(0): Module(
(conv1): ConvReLU2d(
(0): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(downsample): Module(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2))
)
(relu): ReLU(inplace=True)
)
(1): Module(
(conv1): ConvReLU2d(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu): ReLU(inplace=True)
)
)
)
def forward(self, x : torch.Tensor):
conv1_input_scale_0 = self.conv1_input_scale_0
conv1_input_zero_point_0 = self.conv1_input_zero_point_0
quantize_per_tensor = torch.quantize_per_tensor(x, conv1_input_scale_0, conv1_input_zero_point_0, torch.quint8); x = conv1_input_scale_0 = conv1_input_zero_point_0 = None
dequantize_17 = quantize_per_tensor.dequantize(); quantize_per_tensor = None
conv1_1 = self.conv1(dequantize_17); dequantize_17 = None
conv1_scale_0 = self.conv1_scale_0
conv1_zero_point_0 = self.conv1_zero_point_0
quantize_per_tensor_9 = torch.quantize_per_tensor(conv1_1, conv1_scale_0, conv1_zero_point_0, torch.quint8); conv1_1 = conv1_scale_0 = conv1_zero_point_0 = None
maxpool = self.maxpool(quantize_per_tensor_9); quantize_per_tensor_9 = None
dequantize_18 = maxpool.dequantize()
layer1_0_conv1_1 = getattr(self.layer1, "0").conv1(dequantize_18); dequantize_18 = None
layer1_0_conv1_scale_0 = self.layer1_0_conv1_scale_0
layer1_0_conv1_zero_point_0 = self.layer1_0_conv1_zero_point_0
quantize_per_tensor_10 = torch.quantize_per_tensor(layer1_0_conv1_1, layer1_0_conv1_scale_0, layer1_0_conv1_zero_point_0, torch.quint8); layer1_0_conv1_1 = layer1_0_conv1_scale_0 = layer1_0_conv1_zero_point_0 = None
dequantize_19 = quantize_per_tensor_10.dequantize(); quantize_per_tensor_10 = None
layer1_0_conv2_1 = getattr(self.layer1, "0").conv2(dequantize_19); dequantize_19 = None
layer1_0_conv2_scale_0 = self.layer1_0_conv2_scale_0
layer1_0_conv2_zero_point_0 = self.layer1_0_conv2_zero_point_0
quantize_per_tensor_11 = torch.quantize_per_tensor(layer1_0_conv2_1, layer1_0_conv2_scale_0, layer1_0_conv2_zero_point_0, torch.quint8); layer1_0_conv2_1 = layer1_0_conv2_scale_0 = layer1_0_conv2_zero_point_0 = None
dequantize = quantize_per_tensor_11.dequantize(); quantize_per_tensor_11 = None
dequantize_1 = maxpool.dequantize(); maxpool = None
add = dequantize + dequantize_1; dequantize = dequantize_1 = None
layer1_0_relu_1 = getattr(self.layer1, "0").relu(add); add = None
layer1_0_relu_output_scale_0 = self.layer1_0_relu_output_scale_0
layer1_0_relu_output_zero_point_0 = self.layer1_0_relu_output_zero_point_0
quantize_per_tensor_1 = torch.quantize_per_tensor(layer1_0_relu_1, layer1_0_relu_output_scale_0, layer1_0_relu_output_zero_point_0, torch.quint8); layer1_0_relu_1 = layer1_0_relu_output_scale_0 = layer1_0_relu_output_zero_point_0 = None
dequantize_20 = quantize_per_tensor_1.dequantize()
layer1_1_conv1_1 = getattr(self.layer1, "1").conv1(dequantize_20); dequantize_20 = None
layer1_1_conv1_scale_0 = self.layer1_1_conv1_scale_0
layer1_1_conv1_zero_point_0 = self.layer1_1_conv1_zero_point_0
quantize_per_tensor_12 = torch.quantize_per_tensor(layer1_1_conv1_1, layer1_1_conv1_scale_0, layer1_1_conv1_zero_point_0, torch.quint8); layer1_1_conv1_1 = layer1_1_conv1_scale_0 = layer1_1_conv1_zero_point_0 = None
dequantize_21 = quantize_per_tensor_12.dequantize(); quantize_per_tensor_12 = None
layer1_1_conv2_1 = getattr(self.layer1, "1").conv2(dequantize_21); dequantize_21 = None
layer1_1_conv2_scale_0 = self.layer1_1_conv2_scale_0
layer1_1_conv2_zero_point_0 = self.layer1_1_conv2_zero_point_0
quantize_per_tensor_13 = torch.quantize_per_tensor(layer1_1_conv2_1, layer1_1_conv2_scale_0, layer1_1_conv2_zero_point_0, torch.quint8); layer1_1_conv2_1 = layer1_1_conv2_scale_0 = layer1_1_conv2_zero_point_0 = None
dequantize_2 = quantize_per_tensor_13.dequantize(); quantize_per_tensor_13 = None
dequantize_3 = quantize_per_tensor_1.dequantize(); quantize_per_tensor_1 = None
add_1 = dequantize_2 + dequantize_3; dequantize_2 = dequantize_3 = None
layer1_1_relu_1 = getattr(self.layer1, "1").relu(add_1); add_1 = None
layer1_1_relu_output_scale_0 = self.layer1_1_relu_output_scale_0
layer1_1_relu_output_zero_point_0 = self.layer1_1_relu_output_zero_point_0
quantize_per_tensor_2 = torch.quantize_per_tensor(layer1_1_relu_1, layer1_1_relu_output_scale_0, layer1_1_relu_output_zero_point_0, torch.quint8); layer1_1_relu_1 = layer1_1_relu_output_scale_0 = layer1_1_relu_output_zero_point_0 = None
dequantize_22 = quantize_per_tensor_2.dequantize()
layer2_0_conv1_1 = getattr(self.layer2, "0").conv1(dequantize_22); dequantize_22 = None
layer2_0_conv1_scale_0 = self.layer2_0_conv1_scale_0
layer2_0_conv1_zero_point_0 = self.layer2_0_conv1_zero_point_0
quantize_per_tensor_14 = torch.quantize_per_tensor(layer2_0_conv1_1, layer2_0_conv1_scale_0, layer2_0_conv1_zero_point_0, torch.quint8); layer2_0_conv1_1 = layer2_0_conv1_scale_0 = layer2_0_conv1_zero_point_0 = None
dequantize_23 = quantize_per_tensor_14.dequantize(); quantize_per_tensor_14 = None
layer2_0_conv2_1 = getattr(self.layer2, "0").conv2(dequantize_23); dequantize_23 = None
layer2_0_conv2_scale_0 = self.layer2_0_conv2_scale_0
layer2_0_conv2_zero_point_0 = self.layer2_0_conv2_zero_point_0
quantize_per_tensor_15 = torch.quantize_per_tensor(layer2_0_conv2_1, layer2_0_conv2_scale_0, layer2_0_conv2_zero_point_0, torch.quint8); layer2_0_conv2_1 = layer2_0_conv2_scale_0 = layer2_0_conv2_zero_point_0 = None
dequantize_24 = quantize_per_tensor_2.dequantize(); quantize_per_tensor_2 = None
layer2_0_downsample_1 = getattr(getattr(self.layer2, "0").downsample, "0")(dequantize_24); dequantize_24 = None
layer2_0_downsample_0_scale_0 = self.layer2_0_downsample_0_scale_0
layer2_0_downsample_0_zero_point_0 = self.layer2_0_downsample_0_zero_point_0
quantize_per_tensor_16 = torch.quantize_per_tensor(layer2_0_downsample_1, layer2_0_downsample_0_scale_0, layer2_0_downsample_0_zero_point_0, torch.quint8); layer2_0_downsample_1 = layer2_0_downsample_0_scale_0 = layer2_0_downsample_0_zero_point_0 = None
dequantize_4 = quantize_per_tensor_15.dequantize(); quantize_per_tensor_15 = None
dequantize_5 = quantize_per_tensor_16.dequantize(); quantize_per_tensor_16 = None
add_2 = dequantize_4 + dequantize_5; dequantize_4 = dequantize_5 = None
layer2_0_relu_1 = getattr(self.layer2, "0").relu(add_2); add_2 = None
layer2_0_relu_output_scale_0 = self.layer2_0_relu_output_scale_0
layer2_0_relu_output_zero_point_0 = self.layer2_0_relu_output_zero_point_0
quantize_per_tensor_3 = torch.quantize_per_tensor(layer2_0_relu_1, layer2_0_relu_output_scale_0, layer2_0_relu_output_zero_point_0, torch.quint8); layer2_0_relu_1 = layer2_0_relu_output_scale_0 = layer2_0_relu_output_zero_point_0 = None
dequantize_25 = quantize_per_tensor_3.dequantize()
layer2_1_conv1_1 = getattr(self.layer2, "1").conv1(dequantize_25); dequantize_25 = None
layer2_1_conv1_scale_0 = self.layer2_1_conv1_scale_0
layer2_1_conv1_zero_point_0 = self.layer2_1_conv1_zero_point_0
quantize_per_tensor_17 = torch.quantize_per_tensor(layer2_1_conv1_1, layer2_1_conv1_scale_0, layer2_1_conv1_zero_point_0, torch.quint8); layer2_1_conv1_1 = layer2_1_conv1_scale_0 = layer2_1_conv1_zero_point_0 = None
dequantize_26 = quantize_per_tensor_17.dequantize(); quantize_per_tensor_17 = None
layer2_1_conv2_1 = getattr(self.layer2, "1").conv2(dequantize_26); dequantize_26 = None
layer2_1_conv2_scale_0 = self.layer2_1_conv2_scale_0
layer2_1_conv2_zero_point_0 = self.layer2_1_conv2_zero_point_0
quantize_per_tensor_18 = torch.quantize_per_tensor(layer2_1_conv2_1, layer2_1_conv2_scale_0, layer2_1_conv2_zero_point_0, torch.quint8); layer2_1_conv2_1 = layer2_1_conv2_scale_0 = layer2_1_conv2_zero_point_0 = None
dequantize_6 = quantize_per_tensor_18.dequantize(); quantize_per_tensor_18 = None
dequantize_7 = quantize_per_tensor_3.dequantize(); quantize_per_tensor_3 = None
add_3 = dequantize_6 + dequantize_7; dequantize_6 = dequantize_7 = None
layer2_1_relu_1 = getattr(self.layer2, "1").relu(add_3); add_3 = None
layer2_1_relu_output_scale_0 = self.layer2_1_relu_output_scale_0
layer2_1_relu_output_zero_point_0 = self.layer2_1_relu_output_zero_point_0
quantize_per_tensor_4 = torch.quantize_per_tensor(layer2_1_relu_1, layer2_1_relu_output_scale_0, layer2_1_relu_output_zero_point_0, torch.quint8); layer2_1_relu_1 = layer2_1_relu_output_scale_0 = layer2_1_relu_output_zero_point_0 = None
dequantize_27 = quantize_per_tensor_4.dequantize()
layer3_0_conv1_1 = getattr(self.layer3, "0").conv1(dequantize_27); dequantize_27 = None
layer3_0_conv1_scale_0 = self.layer3_0_conv1_scale_0
layer3_0_conv1_zero_point_0 = self.layer3_0_conv1_zero_point_0
quantize_per_tensor_19 = torch.quantize_per_tensor(layer3_0_conv1_1, layer3_0_conv1_scale_0, layer3_0_conv1_zero_point_0, torch.quint8); layer3_0_conv1_1 = layer3_0_conv1_scale_0 = layer3_0_conv1_zero_point_0 = None
dequantize_28 = quantize_per_tensor_19.dequantize(); quantize_per_tensor_19 = None
layer3_0_conv2_1 = getattr(self.layer3, "0").conv2(dequantize_28); dequantize_28 = None
layer3_0_conv2_scale_0 = self.layer3_0_conv2_scale_0
layer3_0_conv2_zero_point_0 = self.layer3_0_conv2_zero_point_0
quantize_per_tensor_20 = torch.quantize_per_tensor(layer3_0_conv2_1, layer3_0_conv2_scale_0, layer3_0_conv2_zero_point_0, torch.quint8); layer3_0_conv2_1 = layer3_0_conv2_scale_0 = layer3_0_conv2_zero_point_0 = None
dequantize_29 = quantize_per_tensor_4.dequantize(); quantize_per_tensor_4 = None
layer3_0_downsample_1 = getattr(getattr(self.layer3, "0").downsample, "0")(dequantize_29); dequantize_29 = None
layer3_0_downsample_0_scale_0 = self.layer3_0_downsample_0_scale_0
layer3_0_downsample_0_zero_point_0 = self.layer3_0_downsample_0_zero_point_0
quantize_per_tensor_21 = torch.quantize_per_tensor(layer3_0_downsample_1, layer3_0_downsample_0_scale_0, layer3_0_downsample_0_zero_point_0, torch.quint8); layer3_0_downsample_1 = layer3_0_downsample_0_scale_0 = layer3_0_downsample_0_zero_point_0 = None
dequantize_8 = quantize_per_tensor_20.dequantize(); quantize_per_tensor_20 = None
dequantize_9 = quantize_per_tensor_21.dequantize(); quantize_per_tensor_21 = None
add_4 = dequantize_8 + dequantize_9; dequantize_8 = dequantize_9 = None
layer3_0_relu_1 = getattr(self.layer3, "0").relu(add_4); add_4 = None
layer3_0_relu_output_scale_0 = self.layer3_0_relu_output_scale_0
layer3_0_relu_output_zero_point_0 = self.layer3_0_relu_output_zero_point_0
quantize_per_tensor_5 = torch.quantize_per_tensor(layer3_0_relu_1, layer3_0_relu_output_scale_0, layer3_0_relu_output_zero_point_0, torch.quint8); layer3_0_relu_1 = layer3_0_relu_output_scale_0 = layer3_0_relu_output_zero_point_0 = None
dequantize_30 = quantize_per_tensor_5.dequantize()
layer3_1_conv1_1 = getattr(self.layer3, "1").conv1(dequantize_30); dequantize_30 = None
layer3_1_conv1_scale_0 = self.layer3_1_conv1_scale_0
layer3_1_conv1_zero_point_0 = self.layer3_1_conv1_zero_point_0
quantize_per_tensor_22 = torch.quantize_per_tensor(layer3_1_conv1_1, layer3_1_conv1_scale_0, layer3_1_conv1_zero_point_0, torch.quint8); layer3_1_conv1_1 = layer3_1_conv1_scale_0 = layer3_1_conv1_zero_point_0 = None
dequantize_31 = quantize_per_tensor_22.dequantize(); quantize_per_tensor_22 = None
layer3_1_conv2_1 = getattr(self.layer3, "1").conv2(dequantize_31); dequantize_31 = None
layer3_1_conv2_scale_0 = self.layer3_1_conv2_scale_0
layer3_1_conv2_zero_point_0 = self.layer3_1_conv2_zero_point_0
quantize_per_tensor_23 = torch.quantize_per_tensor(layer3_1_conv2_1, layer3_1_conv2_scale_0, layer3_1_conv2_zero_point_0, torch.quint8); layer3_1_conv2_1 = layer3_1_conv2_scale_0 = layer3_1_conv2_zero_point_0 = None
dequantize_10 = quantize_per_tensor_23.dequantize(); quantize_per_tensor_23 = None
dequantize_11 = quantize_per_tensor_5.dequantize(); quantize_per_tensor_5 = None
add_5 = dequantize_10 + dequantize_11; dequantize_10 = dequantize_11 = None
layer3_1_relu_1 = getattr(self.layer3, "1").relu(add_5); add_5 = None
layer3_1_relu_output_scale_0 = self.layer3_1_relu_output_scale_0
layer3_1_relu_output_zero_point_0 = self.layer3_1_relu_output_zero_point_0
quantize_per_tensor_6 = torch.quantize_per_tensor(layer3_1_relu_1, layer3_1_relu_output_scale_0, layer3_1_relu_output_zero_point_0, torch.quint8); layer3_1_relu_1 = layer3_1_relu_output_scale_0 = layer3_1_relu_output_zero_point_0 = None
dequantize_32 = quantize_per_tensor_6.dequantize()
layer4_0_conv1_1 = getattr(self.layer4, "0").conv1(dequantize_32); dequantize_32 = None
layer4_0_conv1_scale_0 = self.layer4_0_conv1_scale_0
layer4_0_conv1_zero_point_0 = self.layer4_0_conv1_zero_point_0
quantize_per_tensor_24 = torch.quantize_per_tensor(layer4_0_conv1_1, layer4_0_conv1_scale_0, layer4_0_conv1_zero_point_0, torch.quint8); layer4_0_conv1_1 = layer4_0_conv1_scale_0 = layer4_0_conv1_zero_point_0 = None
dequantize_33 = quantize_per_tensor_24.dequantize(); quantize_per_tensor_24 = None
layer4_0_conv2_1 = getattr(self.layer4, "0").conv2(dequantize_33); dequantize_33 = None
layer4_0_conv2_scale_0 = self.layer4_0_conv2_scale_0
layer4_0_conv2_zero_point_0 = self.layer4_0_conv2_zero_point_0
quantize_per_tensor_25 = torch.quantize_per_tensor(layer4_0_conv2_1, layer4_0_conv2_scale_0, layer4_0_conv2_zero_point_0, torch.quint8); layer4_0_conv2_1 = layer4_0_conv2_scale_0 = layer4_0_conv2_zero_point_0 = None
dequantize_34 = quantize_per_tensor_6.dequantize(); quantize_per_tensor_6 = None
layer4_0_downsample_1 = getattr(getattr(self.layer4, "0").downsample, "0")(dequantize_34); dequantize_34 = None
layer4_0_downsample_0_scale_0 = self.layer4_0_downsample_0_scale_0
layer4_0_downsample_0_zero_point_0 = self.layer4_0_downsample_0_zero_point_0
quantize_per_tensor_26 = torch.quantize_per_tensor(layer4_0_downsample_1, layer4_0_downsample_0_scale_0, layer4_0_downsample_0_zero_point_0, torch.quint8); layer4_0_downsample_1 = layer4_0_downsample_0_scale_0 = layer4_0_downsample_0_zero_point_0 = None
dequantize_12 = quantize_per_tensor_25.dequantize(); quantize_per_tensor_25 = None
dequantize_13 = quantize_per_tensor_26.dequantize(); quantize_per_tensor_26 = None
add_6 = dequantize_12 + dequantize_13; dequantize_12 = dequantize_13 = None
layer4_0_relu_1 = getattr(self.layer4, "0").relu(add_6); add_6 = None
layer4_0_relu_output_scale_0 = self.layer4_0_relu_output_scale_0
layer4_0_relu_output_zero_point_0 = self.layer4_0_relu_output_zero_point_0
quantize_per_tensor_7 = torch.quantize_per_tensor(layer4_0_relu_1, layer4_0_relu_output_scale_0, layer4_0_relu_output_zero_point_0, torch.quint8); layer4_0_relu_1 = layer4_0_relu_output_scale_0 = layer4_0_relu_output_zero_point_0 = None
dequantize_35 = quantize_per_tensor_7.dequantize()
layer4_1_conv1_1 = getattr(self.layer4, "1").conv1(dequantize_35); dequantize_35 = None
layer4_1_conv1_scale_0 = self.layer4_1_conv1_scale_0
layer4_1_conv1_zero_point_0 = self.layer4_1_conv1_zero_point_0
quantize_per_tensor_27 = torch.quantize_per_tensor(layer4_1_conv1_1, layer4_1_conv1_scale_0, layer4_1_conv1_zero_point_0, torch.quint8); layer4_1_conv1_1 = layer4_1_conv1_scale_0 = layer4_1_conv1_zero_point_0 = None
dequantize_36 = quantize_per_tensor_27.dequantize(); quantize_per_tensor_27 = None
layer4_1_conv2_1 = getattr(self.layer4, "1").conv2(dequantize_36); dequantize_36 = None
layer4_1_conv2_scale_0 = self.layer4_1_conv2_scale_0
layer4_1_conv2_zero_point_0 = self.layer4_1_conv2_zero_point_0
quantize_per_tensor_28 = torch.quantize_per_tensor(layer4_1_conv2_1, layer4_1_conv2_scale_0, layer4_1_conv2_zero_point_0, torch.quint8); layer4_1_conv2_1 = layer4_1_conv2_scale_0 = layer4_1_conv2_zero_point_0 = None
dequantize_14 = quantize_per_tensor_28.dequantize(); quantize_per_tensor_28 = None
dequantize_15 = quantize_per_tensor_7.dequantize(); quantize_per_tensor_7 = None
add_7 = dequantize_14 + dequantize_15; dequantize_14 = dequantize_15 = None
layer4_1_relu_1 = getattr(self.layer4, "1").relu(add_7); add_7 = None
layer4_1_relu_output_scale_0 = self.layer4_1_relu_output_scale_0
layer4_1_relu_output_zero_point_0 = self.layer4_1_relu_output_zero_point_0
quantize_per_tensor_8 = torch.quantize_per_tensor(layer4_1_relu_1, layer4_1_relu_output_scale_0, layer4_1_relu_output_zero_point_0, torch.quint8); layer4_1_relu_1 = layer4_1_relu_output_scale_0 = layer4_1_relu_output_zero_point_0 = None
dequantize_16 = quantize_per_tensor_8.dequantize(); quantize_per_tensor_8 = None
return dequantize_16
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment