Created
November 3, 2025 16:22
-
-
Save xavriley/e28cee328cb6bda7697bd83fbc67b92c to your computer and use it in GitHub Desktop.
Example benchmarking code for librosa's pyin function
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| os.environ['NUMBA_WARNINGS'] = '1' | |
| os.environ['NUMBA_DISABLE_JIT'] = '0' # Keep JIT enabled | |
| os.environ['NUMBA_CACHE_DIR'] = '' # Disable caching to force recompilation | |
| from numba import config | |
| config.NUMBA_DUMP_ANNOTATION = True | |
| # set this before importing librosa | |
| os.environ['NUMBA_NUM_THREADS'] = '4' | |
| import librosa | |
| import timeit | |
| from matplotlib import pyplot as plt | |
| import torch | |
| import torbi | |
| import numpy as np | |
| def torbi_single_thread(observation_probs, transition, p_init): | |
| return torbi.from_probabilities(torch.from_numpy(observation_probs.transpose(0, 2, 1).astype(np.float32)), | |
| transition=torch.from_numpy(transition.astype(np.float32)), | |
| initial=torch.from_numpy(p_init.astype(np.float32)), | |
| num_threads=1, | |
| log_probs=False).numpy() | |
| def torbi_multithreaded(observation_probs, transition, p_init): | |
| return torbi.from_probabilities(torch.from_numpy(observation_probs.transpose(0, 2, 1).astype(np.float32)), | |
| transition=torch.from_numpy(transition.astype(np.float32)), | |
| initial=torch.from_numpy(p_init.astype(np.float32)), | |
| num_threads=16, | |
| log_probs=False).numpy() | |
| def torbi_gpu(observation_probs, transition, p_init): | |
| if not torch.cuda.is_available(): | |
| raise ValueError("CUDA is not available") | |
| device = torch.device("cuda:0") | |
| return torbi.from_probabilities( | |
| torch.from_numpy(observation_probs.transpose(0, 2, 1).astype(np.float32)).to(device), | |
| transition=torch.from_numpy(transition.astype(np.float32)).to(device), | |
| initial=torch.from_numpy(p_init.astype(np.float32)).to(device), | |
| gpu=0, | |
| log_probs=False).cpu().numpy() | |
| n_fft = 2048 | |
| hop_length = 512 | |
| #durations = [10, 30, 60, 120, 240] | |
| durations = [10] | |
| # methods = [librosa.sequence.viterbi, torbi_single_thread, torbi_multithreaded, torbi_gpu] | |
| methods = [librosa.sequence.viterbi, torbi_single_thread, torbi_multithreaded, torbi_gpu] | |
| # Warmup to trigger JIT compilation for viterbi | |
| print("Warming up JIT compilation...") | |
| y_warmup, sr_warmup = librosa.load('deantown.mp3', duration=5) | |
| _ = librosa.pyin(y_warmup, fmin=librosa.note_to_hz('E0'), fmax=librosa.note_to_hz('C6')) | |
| print("JIT compilation complete\n") | |
| # Now print parallel diagnostics | |
| from librosa.sequence import _viterbi | |
| print("=== Numba Parallel Diagnostics ===") | |
| _viterbi.parallel_diagnostics(level=4) | |
| print("===================================\n") | |
| for method in methods: | |
| timings = [] | |
| for duration in durations: | |
| y, sr = librosa.load('deantown.mp3', duration=duration) | |
| pyin_time = min(timeit.repeat( | |
| lambda: librosa.pyin( | |
| y, | |
| fmin=librosa.note_to_hz('E0'), | |
| fmax=librosa.note_to_hz('C6'), | |
| frame_length=n_fft, | |
| hop_length=hop_length, | |
| viterbi_function=method | |
| ), | |
| repeat=1, # 3 | |
| number=1 | |
| )) # best of 3 runs | |
| timings.append(pyin_time) | |
| print(f"{method.__name__} timeit result for {duration} seconds: {pyin_time:.4f} seconds") | |
| plt.plot(durations, timings, label=method.__name__) | |
| plt.legend() | |
| plt.xlabel('Input Duration (s)') | |
| plt.ylabel('Execution Time (s)') | |
| plt.title('pyin timing comparison') | |
| plt.savefig('pyin_timing_comparison.png') | |
| plt.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment