Skip to content

Instantly share code, notes, and snippets.

@davidberard98
Last active March 31, 2022 01:56
Show Gist options
  • Save davidberard98/72fb700cfaa77a1e7b60bf2ef3796c00 to your computer and use it in GitHub Desktop.
Save davidberard98/72fb700cfaa77a1e7b60bf2ef3796c00 to your computer and use it in GitHub Desktop.
import torch
from torchvision.models import regnet_y_128gf
def run(model, iters: int = 20, bs: int = 64, device="cuda") -> None:
print("Warm up ...")
with torch.no_grad():
for i in range(5):
model(torch.rand(bs, 3, 224, 224, device=device))
print("Start benchmarking...")
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize()
start.record()
torch.cuda.synchronize()
with torch.no_grad():
for i in range(iters):
model(torch.randn(bs, 3, 224, 224, device=device))
torch.cuda.synchronize()
end.record()
torch.cuda.synchronize()
return start.elapsed_time(end) / 1000 # returns seconds
def run_eager_autocasting(with_cudnn_benchmark = True):
torch.backends.cudnn.benchmark = with_cudnn_benchmark
with torch.cuda.amp.autocast(enabled=True, dtype=torch.half):
torch_model = regnet_y_128gf()
torch_model.eval()
torch_model = torch_model.cuda()
return run(torch_model)
def run_jit_autocasting(with_cudnn_benchmark = True):
class Wrapper(torch.nn.Module):
def __init__(self):
super(Wrapper, self).__init__()
self.model = regnet_y_128gf()
def forward(self, x):
with torch.cuda.amp.autocast(enabled=True, dtype=torch.half):
return self.model(x)
torch.backends.cudnn.benchmark = with_cudnn_benchmark
torch_model = Wrapper().eval().cuda()
scripted_mod = torch.jit.script(torch_model)
return run(scripted_mod)
for cudnn in [True, False]:
print(" Test with cudnn =", cudnn)
print("eager:", run_eager_autocasting(cudnn))
print("jit:", run_jit_autocasting(cudnn))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment