Skip to content

Instantly share code, notes, and snippets.

@tcapelle
Created February 11, 2025 22:08
Show Gist options
  • Save tcapelle/87105b8cbee0964b6ae1cf4d1a6913ec to your computer and use it in GitHub Desktop.
Save tcapelle/87105b8cbee0964b6ae1cf4d1a6913ec to your computer and use it in GitHub Desktop.
from dataclasses import dataclass
import simple_parsing as sp
from typing import Literal
import modal
from modal import Image
from evals import EVALUATIONS_CONFIGS
from evals.runner import run_eval
GPU_TYPE = "L4"
# Create Modal app
app = modal.App("latency-evaluation")
HF_CACHE_DIR = "/hf-cache"
WANDB_CACHE_DIR = "/wandb-cache"
# Create Modal image with required dependencies
image = (Image.debian_slim()
.apt_install("git")
.pip_install_from_requirements("modal_requirements.txt")
.env({"HF_HUB_CACHE": HF_CACHE_DIR,
"WANDB_CACHE_DIR": WANDB_CACHE_DIR})
)
cache_volume = modal.Volume.from_name("hf-hub-cache", create_if_missing=True)
wandb_volume = modal.Volume.from_name("wandb-cache", create_if_missing=True)
@app.function(
image=image,
gpu=GPU_TYPE,
secrets=[modal.Secret.from_name("wandb-api-key")],
timeout=1800,
volumes={HF_CACHE_DIR: cache_volume, WANDB_CACHE_DIR: wandb_volume},
)
def run_eval_modal(model_cls, dataset_url, device, weave_project, debug):
run_eval(
model_cls=model_cls,
dataset_url=dataset_url,
device=device,
weave_project=weave_project,
debug=debug,
)
@app.local_entrypoint()
def main():
iterator = [
(eval_config.model_cls, eval_config.dataset_url, eval_config.device, eval_config.weave_project, True)
for eval_config in EVALUATIONS_CONFIGS.values()
]
for result in run_eval_modal.starmap(iterator):
print(result)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment