Last active
December 10, 2023 02:01
-
-
Save yiyuezhuo/8c34f1c5988b9612dda994051dca4461 to your computer and use it in GitHub Desktop.
gpu tensor test
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from tqdm import tqdm | |
print(torch.cuda.is_available()) | |
t_cpu = torch.zeros(10000, 10000) | |
t_cpu2 = torch.zeros(10000, 10000) | |
tt_cpu = t_cpu * t_cpu2 | |
t_gpu = t_cpu.cuda() | |
t_gpu2 = t_cpu2.cuda() | |
with torch.no_grad(): | |
for i in tqdm(range(10000)): | |
tt_gpu = t_gpu * t_gpu2 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torchvision | |
import torch | |
model = torchvision.models.detection.fasterrcnn_resnet50_fpn() | |
# For training | |
images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4) | |
boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4] | |
labels = torch.randint(1, 91, (4, 11)) | |
images = list(image for image in images) | |
targets = [] | |
for i in range(len(images)): | |
d = {} | |
d['boxes'] = boxes[i] | |
d['labels'] = labels[i] | |
targets.append(d) | |
output = model(images, targets) | |
# For inference | |
model.eval() | |
x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] | |
predictions = model(x) | |
from tqdm import tqdm | |
x = [_x.cuda() for _x in x] | |
model = model.cuda() | |
for i in tqdm(range(1000)): | |
predictions = model(x) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/pytorch/vision/blob/main/test/assets/encode_jpeg/grace_hopper_517x606.jpg | |
from torchvision.io import read_image | |
from torchvision.models import resnet50, ResNet50_Weights | |
img = read_image("grace_hopper_517x606.jpg") | |
# Step 1: Initialize model with the best available weights | |
weights = ResNet50_Weights.DEFAULT | |
model = resnet50(weights=weights) | |
model.eval() | |
# Step 2: Initialize the inference transforms | |
preprocess = weights.transforms() | |
# Step 3: Apply inference preprocessing transforms | |
batch = preprocess(img).unsqueeze(0) | |
# Step 4: Use the model and print the predicted category | |
prediction = model(batch).squeeze(0).softmax(0) | |
class_id = prediction.argmax().item() | |
score = prediction[class_id].item() | |
category_name = weights.meta["categories"][class_id] | |
print(f"{category_name}: {100 * score:.1f}%") | |
output_cpu = model(batch) | |
model_cuda = model.cuda() | |
batch_cuda = batch.cuda() | |
output_cuda = model_cuda(batch_cuda) | |
# print(output_cpu) | |
# print(output_cuda) | |
print(output_cpu.allclose(output_cuda.cpu(), atol=1e-05)) | |
from tqdm import tqdm | |
for i in tqdm(range(1000)): | |
output_cuda = model_cuda(batch_cuda) | |
if not output_cpu.allclose(output_cuda.cpu(), atol=1e-05): | |
print(output_cpu - output_cuda.cpu()) | |
print("ERROR") | |
break |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# https://github.com/pytorch/vision/blob/main/gallery/assets/dog1.jpg | |
import torch | |
import torchvision | |
print(torch.__version__) | |
print(torchvision.__version__) | |
from torchvision.io.image import read_image | |
from torchvision.models.segmentation import fcn_resnet50, FCN_ResNet50_Weights | |
from torchvision.transforms.functional import to_pil_image | |
img = read_image("dog1.jpg") | |
# Step 1: Initialize model with the best available weights | |
weights = FCN_ResNet50_Weights.DEFAULT | |
model = fcn_resnet50(weights=weights) | |
model.eval() | |
# Step 2: Initialize the inference transforms | |
preprocess = weights.transforms() | |
# Step 3: Apply inference preprocessing transforms | |
batch = preprocess(img).unsqueeze(0) | |
# Step 4: Use the model and visualize the prediction | |
prediction = model(batch)["out"] | |
normalized_masks = prediction.softmax(dim=1) | |
class_to_idx = {cls: idx for (idx, cls) in enumerate(weights.meta["categories"])} | |
mask = normalized_masks[0, class_to_idx["dog"]] | |
# to_pil_image(mask).show() | |
with torch.no_grad(): | |
model_cuda = model.cuda() | |
batch_cuda = batch.cuda() | |
prediction_cuda_ref = model_cuda(batch_cuda)["out"] | |
print((prediction - prediction_cuda_ref.cpu()).abs().max()) | |
from tqdm import tqdm | |
for i in tqdm(range(1000)): | |
prediction_cuda = model_cuda(batch_cuda)["out"] | |
if not prediction_cuda_ref.allclose(prediction_cuda): | |
print((prediction_cuda_ref - prediction_cuda).abs().max()) | |
print("ERROR") | |
break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment