Created
April 12, 2025 00:15
-
-
Save LukeMauldin/dcac497d120a69a45605115c84b5ebcb to your computer and use it in GitHub Desktop.
litellm_to_aider.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3.12 | |
""" | |
LiteLLM to Aider Configuration Generator | |
This tool generates configuration files that allow Aider to work seamlessly with LiteLLM models. | |
It can generate both model settings (YAML) and model metadata (JSON) files. | |
The generated configurations correctly handle model names in the format "litellm/provider/model" | |
by using Aider's extra_params feature to pass the proper model name format to LiteLLM. | |
Usage with uv: | |
uv run ./litellm_to_aider.py settings # Generate model settings YAML | |
uv run ./litellm_to_aider.py metadata # Generate model metadata JSON | |
uv run ./litellm_to_aider.py all # Generate both files | |
Example: | |
uv run ./litellm_to_aider.py all --providers openai,anthropic,gemini | |
""" | |
import argparse | |
import json | |
import logging | |
import sys | |
from pathlib import Path | |
from typing import Any, Dict, List, Optional, TypeAlias, Union | |
import requests | |
import yaml | |
# Type aliases | |
ModelData: TypeAlias = Dict[str, Any] | |
ModelDict: TypeAlias = Dict[str, ModelData] | |
# Constants | |
DEFAULT_LITELLM_URL = "https://raw.githubusercontent.com/BerriAI/litellm/refs/heads/main/model_prices_and_context_window.json" | |
DEFAULT_SETTINGS_PATH = "~/.aider.model.settings.yml" | |
DEFAULT_METADATA_PATH = "~/.aider.model.metadata.json" | |
DEFAULT_LOG_PATH = "~/.cache/aider/script.log" | |
DEFAULT_PROVIDERS = ["openai", "anthropic", "gemini"] | |
DEFAULT_EDIT_FORMAT = "diff" | |
DEFAULT_TEMPERATURE = 0.7 | |
def setup_logging(verbose: bool) -> None: | |
"""Set up logging to both file and console.""" | |
log_path = Path(DEFAULT_LOG_PATH).expanduser() | |
log_path.parent.mkdir(parents=True, exist_ok=True) | |
log_level = logging.DEBUG if verbose else logging.INFO | |
logging.basicConfig( | |
level=log_level, | |
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", | |
handlers=[ | |
logging.FileHandler(log_path), | |
logging.StreamHandler(sys.stdout) if verbose else logging.NullHandler() | |
] | |
) | |
def fetch_litellm_data(source: str) -> Dict[str, Any]: | |
"""Fetch model data from URL or local file.""" | |
logging.info(f"Fetching model data from {source}") | |
try: | |
if source.startswith(("http://", "https://")): | |
response = requests.get(source, timeout=30) | |
response.raise_for_status() | |
return response.json() | |
else: | |
with open(Path(source).expanduser(), "r", encoding="utf-8") as file: | |
return json.load(file) | |
except (requests.RequestException, json.JSONDecodeError, FileNotFoundError) as e: | |
logging.error(f"Failed to fetch or parse model data: {e}") | |
sys.exit(1) | |
def create_model_names(model_id: str, provider: str) -> tuple[str, str, str]: | |
"""Create model names in different formats based on provider requirements. | |
Returns: | |
Tuple of (provider_name, display_name, litellm_compatible_name) | |
""" | |
# Create provider/model format (for display and metadata lookup) | |
if "/" not in model_id: | |
provider_name = f"{provider}/{model_id}" | |
else: | |
# Model already has provider prefix | |
provider_name = model_id | |
# Create litellm-prefixed name (for Aider UI) | |
display_name = f"litellm/{provider_name}" | |
# Create litellm-compatible name (actual format LiteLLM expects) | |
# When using litellm in direct mode (not proxy), it needs provider/model format | |
litellm_name = f"{provider}/{model_id}" if "/" not in model_id else model_id | |
# For proxy mode, use the model name as is | |
# This would be used if the user has configured Aider to use litellm_proxy | |
# (Keep this as reference, not used directly in this code) | |
litellm_proxy_name = model_id if "/" not in model_id else model_id.split("/", 1)[1] | |
return provider_name, display_name, litellm_name | |
def process_models_for_settings( | |
data: Dict[str, Any], | |
providers: List[str], | |
edit_format: str, | |
temperature: float | |
) -> List[Dict[str, Any]]: | |
"""Process and filter model data into Aider-compatible settings format.""" | |
logging.info(f"Processing settings for providers: {providers}") | |
aider_models = [] | |
for model_id, model_data in data.items(): | |
provider = model_data.get("litellm_provider") | |
# Skip models without a provider or not in the requested providers list | |
if not provider or provider not in providers: | |
continue | |
# Create model names in different formats | |
provider_name, display_name, litellm_name = create_model_names(model_id, provider) | |
# Create regular model entry (no litellm prefix) | |
regular_model = { | |
"name": provider_name, | |
"edit_format": edit_format, | |
"use_temperature": temperature | |
} | |
# Create litellm prefixed model entry | |
litellm_model = { | |
"name": display_name, | |
"edit_format": edit_format, | |
"use_temperature": temperature, | |
# Store the actual model info to use with LiteLLM | |
"extra_params": { | |
"model": litellm_name # This will override the model name passed to LiteLLM | |
} | |
} | |
# Add the models | |
aider_models.append(regular_model) | |
aider_models.append(litellm_model) | |
logging.info(f"Processed {len(aider_models)} model settings") | |
return aider_models | |
def process_models_for_metadata( | |
data: Dict[str, Any], | |
providers: List[str] | |
) -> Dict[str, Dict[str, Any]]: | |
"""Process and filter model data into Aider-compatible metadata format.""" | |
logging.info(f"Processing metadata for providers: {providers}") | |
aider_metadata = {} | |
for model_id, model_data in data.items(): | |
provider = model_data.get("litellm_provider") | |
# Skip models without a provider or not in the requested providers list | |
if not provider or provider not in providers: | |
continue | |
# Create model names in different formats | |
provider_name, display_name, litellm_name = create_model_names(model_id, provider) | |
# Extract metadata - include all fields that Aider checks | |
metadata = {} | |
# Essential token limit fields | |
if "max_input_tokens" in model_data: | |
metadata["context_window"] = model_data["max_input_tokens"] | |
metadata["max_input_tokens"] = model_data["max_input_tokens"] | |
if "max_tokens" in model_data: | |
metadata["max_tokens"] = model_data["max_tokens"] | |
if "max_output_tokens" in model_data: | |
metadata["max_output_tokens"] = model_data["max_output_tokens"] | |
# Cost fields | |
if "input_cost_per_token" in model_data: | |
metadata["input_cost_per_token"] = model_data["input_cost_per_token"] | |
if "output_cost_per_token" in model_data: | |
metadata["output_cost_per_token"] = model_data["output_cost_per_token"] | |
# Include cache cost fields if available | |
for field in [ | |
"cache_read_input_token_cost", | |
"cache_creation_input_token_cost" | |
]: | |
if field in model_data: | |
metadata[field] = model_data[field] | |
# Include capability flags | |
for field in [ | |
"supports_function_calling", | |
"supports_vision", | |
"supports_system_messages", | |
"supports_prompt_caching", | |
"supports_audio_input", | |
"supports_video_input", | |
"supports_response_schema", | |
"supports_tool_choice" | |
]: | |
if field in model_data and model_data[field]: | |
metadata[field] = model_data[field] | |
# Include mode information | |
if "mode" in model_data: | |
metadata["mode"] = model_data["mode"] | |
# Include provider information | |
metadata["litellm_provider"] = provider | |
# Add model information for proper LiteLLM handling | |
metadata["model"] = litellm_name # The actual model LiteLLM expects | |
# Include extra_params mapping to ensure consistency with settings | |
if not "extra_params" in metadata: | |
metadata["extra_params"] = {} | |
metadata["extra_params"]["model"] = litellm_name | |
# Only add models with essential token info | |
if metadata and "context_window" in metadata: | |
# Add metadata for both formats | |
aider_metadata[provider_name] = metadata | |
aider_metadata[display_name] = metadata | |
logging.info(f"Processed metadata for {len(aider_metadata)} models") | |
return aider_metadata | |
def generate_yaml(models: List[Dict[str, Any]], output_path: str) -> None: | |
"""Generate and save YAML file with model settings.""" | |
logging.info(f"Generating YAML file at {output_path}") | |
# Ensure output directory exists | |
output_file = Path(output_path).expanduser() | |
output_file.parent.mkdir(parents=True, exist_ok=True) | |
# Write the YAML file with proper formatting | |
with open(output_file, "w", encoding="utf-8") as file: | |
yaml.dump(models, file, sort_keys=False, default_flow_style=False, indent=2) | |
logging.info(f"YAML file generated successfully at {output_file}") | |
def generate_json(metadata: Dict[str, Dict[str, Any]], output_path: str) -> None: | |
"""Generate and save JSON file with model metadata.""" | |
logging.info(f"Generating JSON file at {output_path}") | |
# Ensure output directory exists | |
output_file = Path(output_path).expanduser() | |
output_file.parent.mkdir(parents=True, exist_ok=True) | |
# Write the JSON file | |
with open(output_file, "w", encoding="utf-8") as file: | |
json.dump(metadata, file, indent=2) | |
logging.info(f"JSON file generated successfully at {output_file}") | |
def get_example_model(data: Dict[str, Any]) -> str: | |
"""Get an example model name to show in usage examples.""" | |
if not data: | |
return "litellm/openai/gpt-4" | |
# Prefer litellm-prefixed models | |
for name in data.keys(): | |
if name.startswith("litellm/"): | |
return name | |
# Fallback to first model | |
return list(data.keys())[0] | |
def generate_settings(args: argparse.Namespace) -> None: | |
"""Generate Aider model settings YAML file.""" | |
logging.info("Starting LiteLLM to Aider model settings generator") | |
# Fetch model data | |
data = fetch_litellm_data(args.input) | |
# Process models into Aider format | |
aider_models = process_models_for_settings( | |
data, args.providers, args.edit_format, args.temperature | |
) | |
# Generate YAML file | |
generate_yaml(aider_models, args.settings_output) | |
# Get example model | |
example_model = "litellm/gemini/gemini-pro" | |
if aider_models and len(aider_models) > 0: | |
for model in aider_models: | |
if model["name"].startswith("litellm/"): | |
example_model = model["name"] | |
break | |
print(f"Aider model settings generated at: {Path(args.settings_output).expanduser()}") | |
print(f"\nUsage: aider --model={example_model}") | |
def generate_metadata(args: argparse.Namespace) -> None: | |
"""Generate Aider model metadata JSON file.""" | |
logging.info("Starting LiteLLM to Aider model metadata generator") | |
# Fetch model data | |
data = fetch_litellm_data(args.input) | |
# Process models into Aider metadata format | |
aider_metadata = process_models_for_metadata(data, args.providers) | |
# Generate JSON file | |
generate_json(aider_metadata, args.metadata_output) | |
# Get example model | |
example_model = get_example_model(aider_metadata) | |
print(f"Aider model metadata generated at: {Path(args.metadata_output).expanduser()}") | |
print(f"\nUsage: aider --model={example_model} --model-metadata-file={Path(args.metadata_output).expanduser()}") | |
def generate_all(args: argparse.Namespace) -> None: | |
"""Generate both settings and metadata files.""" | |
# First generate settings | |
generate_settings(args) | |
# Then generate metadata | |
generate_metadata(args) | |
# Final usage instructions | |
example_model = "litellm/gemini/gemini-pro" # Fallback | |
metadata_path = Path(args.metadata_output).expanduser() | |
print("\nComplete Configuration Generated!") | |
print("=================================") | |
print(f"\nFor the best experience, create this shell alias:") | |
print(f" alias aider-litellm='aider --model-metadata-file={metadata_path}'") | |
print(f"\nThen use Aider with any LiteLLM model:") | |
print(f" aider-litellm --model={example_model}") | |
def parse_arguments() -> argparse.Namespace: | |
"""Parse command line arguments with subcommands.""" | |
parser = argparse.ArgumentParser( | |
description="Generate Aider configuration files for using LiteLLM models" | |
) | |
# Common arguments | |
parser.add_argument( | |
"--input", | |
help=f"JSON source (URL or file path, default: {DEFAULT_LITELLM_URL})", | |
default=DEFAULT_LITELLM_URL | |
) | |
parser.add_argument( | |
"--providers", | |
help=f"Comma-separated list of providers (default: {','.join(DEFAULT_PROVIDERS)})", | |
default=",".join(DEFAULT_PROVIDERS) | |
) | |
parser.add_argument( | |
"--verbose", | |
action="store_true", | |
help="Enable verbose logging" | |
) | |
# Create subparsers for different commands | |
subparsers = parser.add_subparsers(dest="command", help="Command to run") | |
# Settings subcommand | |
settings_parser = subparsers.add_parser("settings", help="Generate model settings YAML") | |
settings_parser.add_argument( | |
"--edit-format", | |
help=f"Edit format for models (default: {DEFAULT_EDIT_FORMAT})", | |
default=DEFAULT_EDIT_FORMAT | |
) | |
settings_parser.add_argument( | |
"--temperature", | |
help=f"Temperature value for models (default: {DEFAULT_TEMPERATURE})", | |
type=float, | |
default=DEFAULT_TEMPERATURE | |
) | |
settings_parser.add_argument( | |
"--output", | |
dest="settings_output", | |
help=f"Output file path (default: {DEFAULT_SETTINGS_PATH})", | |
default=DEFAULT_SETTINGS_PATH | |
) | |
# Metadata subcommand | |
metadata_parser = subparsers.add_parser("metadata", help="Generate model metadata JSON") | |
metadata_parser.add_argument( | |
"--output", | |
dest="metadata_output", | |
help=f"Output file path (default: {DEFAULT_METADATA_PATH})", | |
default=DEFAULT_METADATA_PATH | |
) | |
# All subcommand | |
all_parser = subparsers.add_parser("all", help="Generate both settings and metadata") | |
all_parser.add_argument( | |
"--settings-output", | |
help=f"Settings output file path (default: {DEFAULT_SETTINGS_PATH})", | |
default=DEFAULT_SETTINGS_PATH | |
) | |
all_parser.add_argument( | |
"--metadata-output", | |
help=f"Metadata output file path (default: {DEFAULT_METADATA_PATH})", | |
default=DEFAULT_METADATA_PATH | |
) | |
all_parser.add_argument( | |
"--edit-format", | |
help=f"Edit format for models (default: {DEFAULT_EDIT_FORMAT})", | |
default=DEFAULT_EDIT_FORMAT | |
) | |
all_parser.add_argument( | |
"--temperature", | |
help=f"Temperature value for models (default: {DEFAULT_TEMPERATURE})", | |
type=float, | |
default=DEFAULT_TEMPERATURE | |
) | |
args = parser.parse_args() | |
# Process providers string into list | |
args.providers = [p.strip() for p in args.providers.split(",")] | |
return args | |
def main() -> None: | |
"""Main entry point for the script.""" | |
args = parse_arguments() | |
setup_logging(args.verbose) | |
if args.command == "settings": | |
generate_settings(args) | |
elif args.command == "metadata": | |
generate_metadata(args) | |
elif args.command == "all": | |
generate_all(args) | |
else: | |
print("Please specify a command: settings, metadata, or all") | |
print("Example: uv run ./litellm_to_aider.py all") | |
sys.exit(1) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment