Run benchmark.py
to reproduce the results.
To add a new benchmark, add a BenchmarkConfig
to BENCHMARK_CONFIGS
in configs.py
import numpy as np | |
def haversine_distance_matrix(lats, longs): | |
lats = np.radians(lats.reshape(-1, 1)) | |
longs = np.radians(longs.reshape(1, -1)) | |
return 2 * np.arcsin(np.sqrt((1 - np.cos(lats - lats.T) + np.cos(lats) * np.cos(lats.T) * (1 - np.cos(longs - longs.T))) / 2)) | |
import unittest | |
from concurrent.futures import ThreadPoolExecutor | |
def multi_threaded(*, threads=2): | |
def decorator(test_cls): | |
for name, test_fn in test_cls.__dict__.copy().items(): | |
if not (name.startswith("test") and callable(test_fn)): | |
continue |
from typing import Any | |
from metadata_filter import MetadataFilter, MetadataFilterOperator | |
# https://docs.trychroma.com/usage-guide#using-where-filters | |
OPERATOR_MAP = { | |
MetadataFilterOperator.AND: "$and", | |
MetadataFilterOperator.OR: "$or", | |
MetadataFilterOperator.EQ: "$eq", | |
MetadataFilterOperator.NE: "$ne", |
import functools | |
import pathlib | |
import sys | |
import libcst as cst | |
def main(root): | |
root = pathlib.Path(root) | |
fn = functools.partial(remove_annotations, annotations_remover=AnnotationRemover()) |
import pathlib | |
from urllib.parse import urlparse | |
import requests | |
import tqdm | |
def download(url, root=".", *, name=None, chunk_size=32 * 1024): | |
root = pathlib.Path(root) | |
root.mkdir(exist_ok=True, parents=True) |
import os | |
import pathlib | |
import re | |
import shlex | |
import subprocess | |
import sys | |
def main(): | |
if git("status", "--porcelain"): |
import itertools | |
from typing import Collection | |
import networkx as nx | |
# overwrite this with the array API that you want to test | |
import numpy as array_api | |
def maybe_add_dtype( |
This is a short post about why I think it would be beneficial for PyTorch to not only use pytest
as test runner, but also rely on the other features it provides.
My experience with the PyTorch test suite is limited as of now. Thus, it might very well be that my view on things is too naive. In that case I'm happy to hear about examples where and adoption of pytest
would make a use case significantly harder or outright impossible.