- torchvision.io.read_image() consistently demonstrated the fastest image reading speed among the tested libraries.
- OpenCV's cv2.imread() came in second place, offering a competitive performance.
- Minimal Difference: PIL's Image.open() (pillow/pillow-simd) and PyPNG's load() exhibited minimal variation in their reading speeds.
-
-
Save linhduongtuan/bfa285dbd98c43098682cb7731d59630 to your computer and use it in GitHub Desktop.
python -m memory_profiler test_perf_torchvision_PIL.py
https://github.com/sharkdp/hyperfine?tab=readme-ov-file#installation
hyperfine --runs 10 --warmup 3 --export-json results_PIL.json 'python test_perf.py' --show-output
I attempted to include PyPNG in the evaluation but encountered an installation error (error: invalid argument '-std=c++17' not allowed with 'C') on my Mac. Here's a code snippet for testing:
import time
import cv2
from torchvision import io
from PIL import Image
from memory_profiler import profile
@profile
def test_torchvision_read_image(AB_path):
start_time = time.time()
AB = io.read_image(AB_path, mode=io.ImageReadMode.RGB)
w = AB.shape[2]
if w > AB.shape[1]:
w2 = int(w / 3)
A = AB[:, :, :w2]
B1 = AB[:, :, w2:w2*2]
B2 = AB[:, :, w2*2:]
else:
w2 = int(w / 2)
A = AB[:, :, :w2]
B1 = AB[:, :, w2:]
B2 = B1
end_time = time.time()
return end_time - start_time
@profile
def test_pillow_open(AB_path):
start_time = time.time()
AB = Image.open(AB_path).convert("RGB")
w, h = AB.size
if w > h:
w2 = int(w / 3)
A = AB.crop((0, 0, w2, h))
B1 = AB.crop((w2, 0, w2 * 2, h))
B2 = AB.crop((w2 * 2, 0, w, h))
else:
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B1 = AB.crop((w2, 0, w, h))
B2 = B1
end_time = time.time()
return end_time - start_time
@profile
def test_pillow_open(AB_path):
start_time = time.time()
AB = Image.open(AB_path).convert("RGB")
w, h = AB.size
if w > h:
w2 = int(w / 3)
A = AB.crop((0, 0, w2, h))
B1 = AB.crop((w2, 0, w2 * 2, h))
B2 = AB.crop((w2 * 2, 0, w, h))
else:
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B1 = AB.crop((w2, 0, w, h))
B2 = B1
end_time = time.time()
return end_time - start_time
@profile
def test_cv2_open(AB_path):
start_time = time.time()
# Read the image using cv2
AB = cv2.imread(AB_path)
# Convert the image from BGR to RGB
AB = cv2.cvtColor(AB, cv2.COLOR_BGR2RGB)
# Get the dimensions of the image
h, w, _ = AB.shape
# Crop the image into A, B1, and B2
if w > h:
w2 = int(w / 3)
A = AB[:, :w2, :]
B1 = AB[:, w2:w2 * 2, :]
B2 = AB[:, w2 * 2:, :]
else:
w2 = int(w / 2)
A = AB[:, :w2, :]
B1 = AB[:, w2:, :]
B2 = B1 # In this case, B2 is the same as B1
end_time = time.time()
return end_time - start_time
def test_pyspng_open(AB_path):
start_time = time.time()
# Open and read the image using pyspng
with open(AB_path, 'rb') as f:
image_data = f.read()
AB = pyspng.load(image_data)
# Convert the image to RGB if necessary (pyspng loads as RGB by default)
h, w, _ = AB.shape
# Split the image into A and B
if w > h:
w2 = int(w / 3)
A = AB[:, :w2, :]
B1 = AB[:, w2:w2 * 2, :]
B2 = AB[:, w2 * 2:, :]
else:
w2 = int(w / 2)
A = AB[:, :w2, :]
B1 = AB[:, w2:, :]
B2 = B1 # In this case, B2 is the same as B1
end_time = time.time()
return end_time - start_time
AB_path = '/Users/linh/Downloads/13.jpg'
# torchvision_time = test_torchvision_read_image(AB_path)
# pillow_time = test_pillow_open(AB_path)
# cv2_time = test_cv2_open(AB_path)
pyspng_time = test_pyspng_open(AB_path)
# print(f"Torchvision execution time: {torchvision_time:.6f} seconds")
# print(f"Pillow execution time: {pillow_time:.6f} seconds")
print(f"CV2 execution time: {cv2_time:.6f} seconds")
print(f"Pyspng execution time: {pyspng_time:.6f} seconds")
# run with this command line
# python -m memory_profiler test_perf_torchvision_PIL.py
# or use hyperfine
# hyperfine --runs 10 --warmup 3 --export-json results_PIL.json 'python test_perf.py' --show-output
I'm working on an image-to-image translation task using aligned/paired images.
The dataset requires preprocessing, including cropping original images into relevant arrays.
I'm evaluating the performance of torchvision, PIL (pillow/pillow-simd), pyspng, and OpenCV2 for image reading.
{ "results": [ { "command": "python test_perf.py", "mean": 1.77952651412, "stddev": 0.057127027268192264, "median": 1.79014369752, "user": 3.0064024, "system": 1.00602026, "min": 1.65052030102, "max": 1.84234846802, "times": [ 1.79769351002, 1.84234846802, 1.74460313502, 1.65052030102, 1.82460009302, 1.8066463850199999, 1.8362743020199999, 1.78259388502, 1.7589878020199998, 1.75099726002 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 1.9844060049999999, "stddev": 0.12320930918698711, "median": 1.9422930507, "user": 3.43447846, "system": 0.52193058, "min": 1.8476288842000002, "max": 2.2078850512000003, "times": [ 1.8476288842000002, 1.8665154252000002, 1.9525924672000001, 2.1377185092, 2.2078850512000003, 1.9002486342, 1.8965385932000003, 1.9319936342, 2.0126511342, 2.0902877172000003 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 1.98895220696, "stddev": 0.12288745169878267, "median": 1.94916674476, "user": 2.9436199200000006, "system": 1.0884397799999999, "min": 1.85365774426, "max": 2.2835599112600002, "times": [ 1.94305628626, 1.95982141126, 1.95527720326, 2.03957991126, 2.2835599112600002, 2.0901101192600002, 1.93371245326, 1.85365774426, 1.91040207726, 1.9203449522600002 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 3.9306572294999995, "stddev": 0.06968444179557738, "median": 3.9307613428000003, "user": 2.69685648, "system": 5.907349099999999, "min": 3.8398317273, "max": 4.0507129723, "times": [ 3.9435819843, 3.9504188153, 3.8709359633, 3.8398317273, 4.0507129723, 4.0168790433, 3.8411159003, 3.9179407013, 3.9621448142999998, 3.9130103733 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 4.469099234120001, "stddev": 0.178491116842775, "median": 4.398898084120001, "user": 2.8865086399999997, "system": 6.15982872, "min": 4.25842812312, "max": 4.774695454120001, "times": [ 4.35635358412, 4.39111814712, 4.375071896120001, 4.2585237971200005, 4.65614447412, 4.25842812312, 4.40667802112, 4.60087311512, 4.61310572912, 4.774695454120001 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 4.13880534828, "stddev": 0.21410074987612554, "median": 4.16791923378, "user": 2.89865028, "system": 5.94443124, "min": 3.88241451678, "max": 4.50758704078, "times": [ 4.24572734178, 4.325509467780001, 4.25913931978, 4.50758704078, 4.09011112578, 4.27461837678, 3.88241451678, 3.90923503078, 3.98451698178, 3.90919428078 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }
{ "results": [ { "command": "python test_perf.py", "mean": 4.43157526626, "stddev": 0.15422918778589978, "median": 4.39789203946, "user": 2.8499490400000003, "system": 6.0216226200000005, "min": 4.20492463946, "max": 4.70384628746, "times": [ 4.39946088746, 4.3963231914600005, 4.36751097946, 4.474174140460001, 4.70384628746, 4.20492463946, 4.48298366046, 4.28701032646, 4.3455761634600005, 4.65394238646 ], "exit_codes": [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] } ] }