Skip to content

Instantly share code, notes, and snippets.

@deeperlearner
Created February 20, 2021 13:13
Show Gist options
  • Save deeperlearner/792bbcdef204e8e110c03d559af8f5da to your computer and use it in GitHub Desktop.
Save deeperlearner/792bbcdef204e8e110c03d559af8f5da to your computer and use it in GitHub Desktop.
torch_gpu_test.py
import torch
import sys
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print("Whether CUDA is available:", torch.cuda.is_available())
print('__CUDA VERSION', )
from subprocess import call
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
# call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())
print('CUDA device')
if torch.cuda.is_available():
device = torch.device('cuda')
var = torch.Tensor([1.0])
print(var.cuda())
print(var.to(device)) # <- preferred
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment