Created
May 10, 2023 01:12
-
-
Save HDCharles/47449e0dc2512256eb01205dfb8ad144 to your computer and use it in GitHub Desktop.
dequantize_per_tensor triton graph
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
===== __compiled_fn_11 ===== | |
<eval_with_key>.128 class GraphModule(torch.nn.Module): | |
def forward(self, L_int_repr_ : torch.Tensor): | |
l_int_repr_ = L_int_repr_ | |
# File: /fsx/users/cdhernandez/protoquant/ao_experimental/quant_primitives.py:118, code: return (int_repr.to(out_dtype) - zero_point) * scale | |
to = l_int_repr_.to(torch.float64); l_int_repr_ = None | |
sub = to - 9; to = None | |
mul = sub * 34.638118489583334; sub = None | |
return (mul,) | |
[2023-05-10 01:11:47,473] torch._dynamo.output_graph.__graph: [DEBUG] TRACED GRAPH | |
__compiled_fn_11 <eval_with_key>.128 opcode name target args kwargs | |
------------- ----------- ----------------------- ---------------------------- -------- | |
placeholder l_int_repr_ L_int_repr_ () {} | |
call_method to to (l_int_repr_, torch.float64) {} | |
call_function sub <built-in function sub> (to, 9) {} | |
call_function mul <built-in function mul> (sub, 34.638118489583334) {} | |
output output output ((mul,),) {} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment