Created
March 29, 2025 12:44
-
-
Save leslie-fang-intel/385da8a8be045d9355a25c7a8e8b710c to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# TORCHINDUCTOR_FREEZING=1 TORCH_LOGS="+output_code" numactl -C 56-111 -m 1 python test.py | |
import torch | |
import time | |
import random | |
import numpy as np | |
local_seed= 2024 | |
torch.manual_seed(local_seed) # Set PyTorch seed | |
np.random.seed(seed=local_seed) # Set Numpy seed | |
random.seed(local_seed) # Set the Python seed | |
dtype = torch.float32 | |
autocast = True if dtype == torch.bfloat16 else False | |
class M(torch.nn.Module): | |
def __init__(self,): | |
super().__init__() | |
def forward(self, input): | |
relu = torch.relu(input) | |
output = torch.sum(relu, -1).atan() | |
return output | |
if __name__ == "__main__": | |
with torch.no_grad(): | |
m = M().eval().to("xpu") | |
input = torch.randn(1, 1024).to(dtype).to("xpu") | |
# Compiler Path | |
# with torch.autocast(device_type="cpu", dtype=dtype, enabled=autocast): | |
ref_res = m(input) | |
# c_m = torch.compile(m) | |
# inductor_res = c_m(input) | |
# print(torch.allclose(ref_res, inductor_res, rtol=1e-3, atol=1e-3), flush=True) | |
res2 = torch.ops.aten.relu(input) | |
input2 = torch.randn(1, 1024).to(dtype).to("xpu") | |
# res = torch.ops.aten.mm(input, input2) | |
ref_res = input + input2 | |
res = torch.ops.onednn.test_ll(input, input2) | |
# res_cpu = res.to("cpu") | |
print("ref_res is: {}".format(ref_res), flush=True) | |
print("res is: {}".format(res), flush=True) | |
torch.testing.assert_allclose(ref_res, res) | |
print("---- Done ----", flush=True) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment