Last active
September 25, 2024 22:22
-
-
Save hagope/b89abfe256bacf46c9147d33e4f5012e to your computer and use it in GitHub Desktop.
Query multiple online LLMs with a single prompt
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
__pycache__/ | |
*.txt | |
*.jpg | |
*.jpeg | |
*.png | |
llm_out/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from PIL import Image | |
import mimetypes | |
import base64 | |
def determine_image_type(image_path): | |
try: | |
with Image.open(image_path) as img: | |
# Get the format of the image | |
format = img.format | |
# Mapping formats to MIME types | |
format_to_mime = { | |
'JPEG': 'image/jpeg', | |
'PNG': 'image/png', | |
'GIF': 'image/gif', | |
'WEBP': 'image/webp', | |
} | |
# Convert format to MIME type | |
mime_type = format_to_mime.get(format, 'unknown') | |
return mime_type | |
except (IOError, OSError) as e: | |
return f'Error: {e}' | |
# Example usage: | |
# image_path = 'sushi.png' | |
# mime_type = determine_image_type(image_path) | |
# print(f'The MIME type of the image is: {mime_type}') | |
# Function to encode the image | |
def encode_image(image_path): | |
with open(image_path, "rb") as image_file: | |
return base64.b64encode(image_file.read()).decode('utf-8') | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import os | |
from os import getenv | |
def list_gemini_models(): | |
import google.generativeai as genai | |
api_key = getenv("GEMINI_API_KEY") | |
genai.configure(api_key=api_key) | |
for m in genai.list_models(): | |
if 'generateContent' in m.supported_generation_methods: | |
print(m.name) | |
def list_groq_models(): | |
import requests | |
api_key = os.environ.get("GROQ_API_KEY") | |
url = "https://api.groq.com/openai/v1/models" | |
headers = { | |
"Authorization": f"Bearer {api_key}", | |
"Content-Type": "application/json" | |
} | |
response = requests.get(url, headers=headers) | |
print(response.json()) | |
def list_openai_models(): | |
from openai import OpenAI | |
api_key = getenv("OPENAI_API_KEY") | |
proj_key = getenv("OPENAI_PROJECT") | |
client = OpenAI(api_key=api_key, project=proj_key) | |
models = client.models.list() | |
for model in models: | |
print(model.id) | |
def list_claude_models(): | |
import anthropic | |
api_key = getenv("ANTHROPIC_API_KEY") | |
client = anthropic.Anthropic(api_key=api_key) | |
models = client.list_models() | |
for model in models: | |
print(model.name) | |
def main(): | |
parser = argparse.ArgumentParser(description="List models for a specific service.") | |
parser.add_argument('--service', type=str, required=True, choices=['gemini', 'groq', 'openai', 'claude'], | |
help="The service for which to list models.") | |
args = parser.parse_args() | |
if args.service == 'gemini': | |
list_gemini_models() | |
elif args.service == 'groq': | |
list_groq_models() | |
elif args.service == 'openai': | |
list_openai_models() | |
elif args.service == 'claude': | |
list_claude_models() | |
if __name__ == "__main__": | |
main() | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Query multiple online LLMs with a single prompt | |
import argparse | |
from os import getenv, makedirs | |
import pyperclip | |
from simple_term_menu import TerminalMenu | |
import click | |
import tempfile | |
import datetime | |
from helper import determine_image_type, encode_image | |
import platform | |
MAX_TOKENS = 4096 | |
GROQ_MODELS = ["gemma-7b-it", | |
"mixtral-8x7b-32768", | |
"llama3-8b-8192", | |
"llama3-70b-8192", | |
"llama-3.2-90b-text-preview", | |
"llama-3.2-11b-text-preview", | |
"llama-3.1-70b-versatile"] | |
GEMINI_MODELS = ["gemini-pro", | |
"gemini-1.5-flash-latest", | |
"gemini-1.5-pro-latest", | |
"gemini-1.5-pro-exp-0801", | |
"gemini-1.0-pro-latest"] | |
CLAUDE_MODELS = ["claude-3-5-sonnet-20240620", | |
"claude-3-opus-20240229", | |
"claude-3-sonnet-20240229", | |
"claude-3-haiku-20240307"] | |
OPENAI_MODELS = ["gpt-3.5-turbo", | |
"gpt-4-turbo", | |
"gpt-4o-large-2024-08-13", | |
"gpt-4", | |
"gpt-4o", | |
"gpt-4o-mini", | |
"o1-mini", | |
"o1-preview"] | |
SAMBA_NOVA_MODELS = ["Meta-Llama-3.1-405B-Instruct", | |
"Meta-Llama-3.1-80B-Instruct", | |
"Meta-Llama-3.1-8B-Instruct"] | |
REPLICATE_MODELS = ["meta-llama-3.1-405b-instruct"] | |
SERVICES = {"groq" : GROQ_MODELS, | |
"gemini" : GEMINI_MODELS, | |
"claude" : CLAUDE_MODELS, | |
"openai" : OPENAI_MODELS, | |
"replicate": REPLICATE_MODELS, | |
"sambaNova": SAMBA_NOVA_MODELS} | |
SERVICES_VIS = {"gemini" : ["gemini-1.5-flash-latest","gemini-1.5-pro-latest","gemini-pro-vision"], | |
"claude" : CLAUDE_MODELS, | |
"openai" : ["gpt-4-turbo","gpt-4o"], | |
"replicate": REPLICATE_MODELS} | |
def call_groq_api(prompt, model): | |
""" Groq API | |
https://console.groq.com/docs/models | |
""" | |
from groq import Groq | |
api_key = getenv("GROQ_API_KEY") | |
client = Groq(api_key=api_key) | |
chat_completion = client.chat.completions.create( | |
messages=[{"role": "user", "content": prompt}], | |
model=model | |
) | |
return chat_completion.choices[0].message.content | |
def call_gemini_api(prompt, model, image=None): | |
""" Gemini API: | |
https://ai.google.dev/gemini-api/docs/get-started/python | |
""" | |
import google.generativeai as genai | |
import PIL.Image | |
api_key = getenv("GEMINI_API_KEY") | |
genai.configure(api_key=api_key) | |
model = genai.GenerativeModel(model) | |
if image: | |
img = PIL.Image.open(image) | |
return model.generate_content([prompt, img]).text | |
else: | |
return model.generate_content(prompt).text | |
def call_claude_api(prompt, model, image=None): | |
""" Claude API | |
https://docs.anthropic.com/claude/reference/client-sdks | |
""" | |
import anthropic | |
api_key = getenv("ANTHROPIC_API_KEY") | |
client = anthropic.Anthropic(api_key=api_key) | |
messages=[{"role": "user", | |
"content": prompt}] | |
if image: | |
image_data = encode_image(image) | |
image_media_type = determine_image_type(image) | |
messages=[ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "image", | |
"source": { | |
"type": "base64", | |
"media_type": image_media_type, | |
"data": image_data, | |
}, | |
}, | |
{ | |
"type": "text", | |
"text": prompt | |
} | |
], | |
} | |
] | |
message = client.messages.create( | |
model=model, | |
max_tokens=MAX_TOKENS, | |
messages=messages | |
) | |
return message.content[0].text | |
def call_openai_api(prompt, model, image=None): | |
""" OpenAI API | |
https://platform.openai.com/docs/quickstart | |
""" | |
from openai import OpenAI | |
api_key = getenv("OPENAI_API_KEY") | |
proj_key = getenv("OPENAI_PROJECT") | |
client = OpenAI(api_key=api_key, project=proj_key) | |
if image: | |
base64_image = encode_image(image) | |
image_type = determine_image_type(image) | |
messages=[{"role": "user", | |
"content": [{"type": "text", "text": prompt}, | |
{"type": "image_url", "image_url": {"url": f"data:{image_type};base64,{base64_image}"}}] | |
}] | |
else: | |
messages=[{"role": "user", "content": prompt}] | |
chat_completion = client.chat.completions.create( | |
messages=messages, | |
model=model, | |
) | |
return chat_completion.choices[0].message.content | |
def call_replicate_api(prompt, model, image=None): | |
import replicate | |
model = 'meta/' + model | |
if image: | |
response = replicate.run(model, input={"prompt": prompt, "image": open(image, "rb"), "max_tokens": MAX_TOKENS}) | |
else: | |
response = replicate.run(model, input={"prompt": prompt, "max_tokens": MAX_TOKENS}) | |
return ''.join(response) | |
def call_samba_nova_api(prompt, model, image=None): | |
from openai import OpenAI | |
client = OpenAI( | |
api_key=getenv("SAMBA_NOVA_API_TOKEN"), | |
base_url="https://api.sambanova.ai/v1", | |
) | |
response = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user","content": prompt}], | |
temperature = 0.1, | |
top_p = 0.1 | |
) | |
return response.choices[0].message.content | |
def log_response(model, prompt, response): | |
# Get the current date and time | |
current_time = datetime.datetime.now() | |
directory_path = "llm_out" | |
# makedirs(directory_path, exist_ok=True) | |
filename = f"{directory_path}/llm_{model}_{current_time.strftime('%Y-%m-%d-%H-%M')}.md" | |
save_str = f""" | |
{current_time.strftime('%Y-%m-%d %I:%M %p')} | |
### Model | |
{model} | |
### Prompt | |
{prompt} | |
### Response | |
{response} | |
""" | |
# Open the file in append mode | |
with open(filename, "a") as f: | |
# Write the user input to the file | |
f.write(save_str) | |
print(f"Response logged to {filename}") | |
# try: | |
# pyperclip.copy(save_str) | |
# except: | |
# None | |
return None | |
def prompt_from_file(): | |
# Create a temporary text file | |
f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False) | |
# Open the file in a text editor | |
click.edit(require_save=True, filename=f.name) | |
print("Prompt saved to:", f.name) | |
with open(f.name, 'r') as file: | |
lines = file.readlines() | |
return ''.join(lines) | |
def main(): | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--cmd', help='Quickly look up a bash command.') | |
parser.add_argument('--prompt', help='Specify a prompt') | |
parser.add_argument('--image', help='Path to image') | |
parser.add_argument('--file', help='Specify a prompt file') | |
parser.add_argument('--nolog', action='store_true', default=False, help='Do not log to file.') | |
args = parser.parse_args() | |
if args.cmd: | |
cmd = args.cmd | |
prompt = f"Return the bash command to {cmd} using platform {platform.platform()}. Do not return any markdown or explanation. Do not wrap with backticks ``." | |
response = call_groq_api(prompt, "llama3-70b-8192") | |
print(response) | |
quit() | |
elif args.prompt: | |
prompt = args.prompt | |
elif args.file: | |
prompt = open(args.file).read() | |
else: | |
prompt = prompt_from_file() | |
if args.image: | |
service_options = list(SERVICES_VIS.keys()) | |
else: | |
service_options = list(SERVICES.keys()) | |
service_menu = TerminalMenu(service_options, multi_select=True) | |
for service_menu_entry_index in service_menu.show(): | |
service_selection = service_options[service_menu_entry_index] | |
if args.image: | |
model_options = SERVICES_VIS[service_selection] | |
else: | |
model_options = SERVICES[service_selection] | |
model_menu = TerminalMenu(model_options) | |
model_menu_entry_index = model_menu.show() | |
model_selection = model_options[model_menu_entry_index] | |
print("Using model:", model_selection) | |
match service_selection: | |
case "gemini": | |
response = call_gemini_api(prompt, model_selection, args.image) | |
case "groq": | |
response = call_groq_api(prompt, model_selection) | |
case "claude": | |
response = call_claude_api(prompt, model_selection, args.image) | |
case "openai": | |
response = call_openai_api(prompt, model_selection, args.image) | |
case "replicate": | |
response = call_replicate_api(prompt, model_selection, args.image) | |
case "sambaNova": | |
response = call_samba_nova_api(prompt, model_selection, args.image) | |
case _ : | |
print("Invalid model.") | |
print(response) | |
if not args.nolog: | |
log_response(model_selection, prompt, response) | |
if __name__ == "__main__": | |
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
pyperclip | |
simple-term-menu | |
click | |
groq | |
openai | |
anthropic | |
Pillow | |
google-generativeai | |
vertexai | |
replicate |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment