Skip to content

Instantly share code, notes, and snippets.

@jerryzh168
Created August 3, 2021 21:30
Show Gist options
  • Save jerryzh168/1c762e51ddec8c38e642380839c8303c to your computer and use it in GitHub Desktop.
Save jerryzh168/1c762e51ddec8c38e642380839c8303c to your computer and use it in GitHub Desktop.
def forward(self, input): input_1 = input _0_conv1_input_scale_0 = getattr(self, "0_conv1_input_scale_0") _0_conv1_input_zero_point_0 = getattr(self, "0_conv1_input_zero_point_0") quantize_per_tensor = torch.quantize_per_tensor(input_1, _0_conv1_input_scale_0, _0_conv1_input_zero_point_0, torch.qint8); input_1 = _0_conv1_input_scale_0 = _0_conv1_input_zero_point_0 = None dequantize = quantize_per_tensor.dequantize(); quantize_per_tensor = None _0_conv1 = getattr(self, "0").conv1(dequantize) _0_conv1_output_scale_0 = getattr(self, "0_conv1_output_scale_0") _0_conv1_output_zero_point_0 = getattr(self, "0_conv1_output_zero_point_0") quantize_per_tensor_1 = torch.quantize_per_tensor(_0_conv1, _0_conv1_output_scale_0, _0_conv1_output_zero_point_0, torch.qint8); _0_co
nv1 = _0_conv1_output_scale_0 = _0_conv1_output_zero_point_0 = None
dequantize_1 = quantize_per_tensor_1.dequantize(); quantize_per_tensor_1 = None
_0_conv2 = getattr(self, "0").conv2(dequantize_1); dequantize_1 = None
_0_conv2_output_scale_0 = getattr(self, "0_conv2_output_scale_0")
_0_conv2_output_zero_point_0 = getattr(self, "0_conv2_output_zero_point_0")
quantize_per_tensor_2 = torch.quantize_per_tensor(_0_conv2, _0_conv2_output_scale_0, _0_conv2_output_zero_point_0, torch.qint8); _0_co
nv2 = _0_conv2_output_scale_0 = _0_conv2_output_zero_point_0 = None
dequantize_2 = quantize_per_tensor_2.dequantize(); quantize_per_tensor_2 = None
add = dequantize_2 + dequantize; dequantize_2 = dequantize = None
_0_relu_1 = getattr(self, "0").relu(add); add = None
_0_relu_output_scale_0 = getattr(self, "0_relu_output_scale_0")
_0_relu_output_zero_point_0 = getattr(self, "0_relu_output_zero_point_0")
quantize_per_tensor_3 = torch.quantize_per_tensor(_0_relu_1, _0_relu_output_scale_0, _0_relu_output_zero_point_0, torch.qint8); _0_rel
u_1 = _0_relu_output_scale_0 = _0_relu_output_zero_point_0 = None
dequantize_3 = quantize_per_tensor_3.dequantize(); quantize_per_tensor_3 = None
_1_conv1 = getattr(self, "1").conv1(dequantize_3)
_1_conv1_output_scale_0 = getattr(self, "1_conv1_output_scale_0")
_1_conv1_output_zero_point_0 = getattr(self, "1_conv1_output_zero_point_0")
quantize_per_tensor_4 = torch.quantize_per_tensor(_1_conv1, _1_conv1_output_scale_0, _1_conv1_output_zero_point_0, torch.qint8); _1_co
nv1 = _1_conv1_output_scale_0 = _1_conv1_output_zero_point_0 = None
dequantize_4 = quantize_per_tensor_4.dequantize(); quantize_per_tensor_4 = None
_1_conv2 = getattr(self, "1").conv2(dequantize_4); dequantize_4 = None
_1_conv2 = getattr(self, "1").conv2(dequantize_4); dequantize_4 = None
_1_conv2_output_scale_0 = getattr(self, "1_conv2_output_scale_0")
_1_conv2_output_zero_point_0 = getattr(self, "1_conv2_output_zero_point_0")
quantize_per_tensor_5 = torch.quantize_per_tensor(_1_conv2, _1_conv2_output_scale_0, _1_conv2_output_zero_point_0, torch.qint8); _1_co
nv2 = _1_conv2_output_scale_0 = _1_conv2_output_zero_point_0 = None
dequantize_5 = quantize_per_tensor_5.dequantize(); quantize_per_tensor_5 = None
add_1 = dequantize_5 + dequantize_3; dequantize_5 = dequantize_3 = None
_1_relu_1 = getattr(self, "1").relu(add_1); add_1 = None
_1_relu_output_scale_0 = getattr(self, "1_relu_output_scale_0")
_1_relu_output_zero_point_0 = getattr(self, "1_relu_output_zero_point_0")
quantize_per_tensor_6 = torch.quantize_per_tensor(_1_relu_1, _1_relu_output_scale_0, _1_relu_output_zero_point_0, torch.qint8); _1_rel
u_1 = _1_relu_output_scale_0 = _1_relu_output_zero_point_0 = None
dequantize_6 = quantize_per_tensor_6.dequantize(); quantize_per_tensor_6 = None
return dequantize_6
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment