cuda = torch.device('cuda') # Default CUDA device, In our case it's Nvidia RTX 3090
cuda0 = torch.device('cuda:0') # specifies the CUDA device to be NVIDIA GeForce RTX 3090
cuda1 = torch.device('cuda:1') # specifies the CUDA device to be NVIDIA GeForce RTX 3080
Example:
x = torch.tensor([1., 2.], device=cuda0)
# x.device is device(type='cuda', index=0)
available_gpus = [
torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())
]
print(avaliable_gpus)
https://pytorch.org/docs/stable/notes/cuda.html