Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Select an option

  • Save leslie-fang-intel/1f6f5249bdb270a21f05f02e6238cfd0 to your computer and use it in GitHub Desktop.

Select an option

Save leslie-fang-intel/1f6f5249bdb270a21f05f02e6238cfd0 to your computer and use it in GitHub Desktop.
import requests
import torch
print(torch.__version__)
import torch.nn as nn
import os, pickle
import numpy as np
import torch._inductor.config as config
config.freezing = True
config.freezing_discard_parameters = True
config.fx_graph_cache = False
config.fx_graph_remote_cache = False
config.autotune_local_cache = False
dynamic = True
output_channels = 4096 * 1024
class M(torch.nn.Module):
def __init__(self,):
super().__init__()
self.linear = torch.nn.Linear(1024, output_channels)
def forward(self, attn_weights):
attn_weights = self.linear(attn_weights)
return attn_weights
import time
import psutil
if __name__ == "__main__":
time.sleep(30)
print("Init psutil.virtual_memory() is: {}".format(psutil.virtual_memory()), flush=True)
dtype = torch.bfloat16
x = torch.randn(2, 1024).to(dtype)
with torch.no_grad(), torch.autocast(device_type="cpu"):
m = M().eval().to(dtype)
print("After model create psutil.virtual_memory() is: {}".format(psutil.virtual_memory()), flush=True)
cfn = torch.compile(m, dynamic=dynamic)
cfn(x)
import gc
gc.collect()
time.sleep(30)
print("After first run psutil.virtual_memory() is: {}".format(psutil.virtual_memory()), flush=True)
print("Done", flush=True)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment