This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch._dynamo as torchdynamo | |
| from torch.ao.quantization import ( | |
| get_default_qconfig, | |
| QConfigMapping, | |
| ) | |
| from torch.ao.quantization._quantize_pt2e import prepare_pt2e, convert_pt2e | |
| class Mod(torch.nn.Module): | |
| def __init__(self,) -> None: |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch._dynamo as torchdynamo | |
| from torch.ao.quantization import ( | |
| get_default_qconfig, | |
| QConfigMapping, | |
| ) | |
| from torch.ao.quantization._quantize_pt2e import prepare_pt2e, convert_pt2e | |
| class Mod(torch.nn.Module): | |
| def __init__(self,) -> None: |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| from torch import _dynamo, _inductor | |
| from torch._inductor import config | |
| import logging | |
| import numpy as np | |
| import random | |
| from torch._inductor import codecache, config, metrics, test_operators | |
| import torch.ao.quantization.fx._decomposed | |
| torch._dynamo.config.verbose = True |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torchvision | |
| import torch._dynamo as torchdynamo | |
| import copy | |
| from torch.ao.quantization._pt2e.quantizer import ( | |
| QNNPackQuantizer, | |
| ) | |
| from torch.ao.quantization._quantize_pt2e import ( | |
| convert_pt2e, | |
| prepare_qat_pt2e_quantizer, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Error Message: | |
| ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ | |
| │ /home/lesliefang/pytorch_1_7_1/torch_inductor/torch_script/inductor/int8/test_dynamo_export_none │ | |
| │ _full_graph/test_Llama_dynamo_export.py:43 in <module> │ | |
| │ │ | |
| │ 40 │ return │ | |
| │ 41 │ | |
| │ 42 if __name__ == "__main__": │ | |
| │ ❱ 43 │ test_Llama() │ | |
| │ 44 │ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| graph(%self : __torch__.torch.fx.graph_module.___torch_mangle_1.GraphModule, | |
| %x : Tensor): | |
| %self._param_constant160 : Float(1000, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant156_bias : Float(2048, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant153_bias : Float(512, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant150_bias : Float(512, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant147_bias : Float(2048, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant144_bias : Float(512, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant141_bias : Float(512, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value=<Tensor>]() | |
| %self._param_constant138_bias : Float(2048, strides=[1], requires_grad=0, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| graph(%self.1 : __torch__.intel_extension_for_pytorch.quantization._quantize_utils.___torch_mangle_194.QuantizationDispatchModule, | |
| %x : Tensor): | |
| %2 : float = prim::Constant[value=0.020705882459878922]() # /home/lesliefang/pytorch_1_7_1/quantization/frameworks.ai.pytorch.private-cpu/torch/_tensor.py:1298:0 | |
| %3 : int = prim::Constant[value=0]() # /home/lesliefang/pytorch_1_7_1/quantization/frameworks.ai.pytorch.private-cpu/torch/_tensor.py:1298:0 | |
| %4 : int = prim::Constant[value=12]() # /home/lesliefang/pytorch_1_7_1/quantization/frameworks.ai.pytorch.private-cpu/torch/_tensor.py:1298:0 | |
| %5 : int = prim::Constant[value=1]() # /home/lesliefang/pytorch_1_7_1/quantization/frameworks.ai.pytorch.private-cpu/torch/_tensor.py:1298:0 | |
| %6 : int[] = prim::Constant[value=[1, 1]]() | |
| %7 : int = prim::Constant[value=-1]() # /home/lesliefang/pytorch_1_7_1/quantization/frameworks.ai.pytorch.private-cpu/torch/_tensor.py:1298:0 | |
| %ret.1 : Tensor = aten::quantize_per_tensor(%x, %2, %3, %4) # /home/lesliefang/pytorch_ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def forward(self, x): | |
| arg0, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec) | |
| _scale_0 = self._scale_0 | |
| _zero_point_0 = self._zero_point_0 | |
| quantize_per_tensor = torch.ops.quantized_decomposed.quantize_per_tensor(arg0, _scale_0, _zero_point_0, 0, 255, torch.uint8); arg0 = None | |
| dequantize_per_tensor = torch.ops.quantized_decomposed.dequantize_per_tensor(quantize_per_tensor, _scale_0, _zero_point_0, 0, 255, torch.uint8); quantize_per_tensor = _scale_0 = _zero_point_0 = None | |
| _param_constant0 = self._param_constant0 | |
| conv_scale_0 = self.conv_scale_0 | |
| conv_zero_point_0 = self.conv_zero_point_0 | |
| quantize_per_channel = torch.ops.quantized_decomposed.quantize_per_channel(_param_constant0, conv_scale_0, conv_zero_point_0, 0, -128, 127, torch.int8); _param_constant0 = None |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch.nn as nn | |
| import torch._dynamo as torchdynamo | |
| import copy | |
| from torch.ao.quantization._quantize_pt2e import ( | |
| convert_pt2e, | |
| ) | |
| from torch._inductor.compile_fx import compile_fx | |
| import torch.ao.quantization._pt2e.quantizer.qnnpack_quantizer as qq |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import copy | |
| import itertools | |
| import operator | |
| from typing import Callable, Dict, List, Optional, Set, Any | |
| import torch | |
| import torch._dynamo as torchdynamo | |
| from torch.ao.quantization._pt2e.quantizer.utils import ( | |
| _annotate_input_qspec_map, | |
| _annotate_output_qspec, |