Skip to content

Instantly share code, notes, and snippets.

@shawngraham
Created December 18, 2024 19:42
Show Gist options
  • Save shawngraham/6d6d1676a5a86a35003e23f2ed2bd879 to your computer and use it in GitHub Desktop.
Save shawngraham/6d6d1676a5a86a35003e23f2ed2bd879 to your computer and use it in GitHub Desktop.
run these two code blocks in colab.research.google.com
# you will need a free api key from Groq (nb, not GROK!!)
# because groq gives access to certain models through a very fast infrastructure
# go to https://console.groq.com/keys and sign up.
# as of dec 2024 you do not need to provide credit card details or that sort of thing;
# if you do, consult the llm.datasette.io for alternative models you could use.
!pip install LLM
!llm install llm-groq
!llm keys set groq
import subprocess
import os
def iterate_model_conversation(model1, model2, initial_prompt, output_folder, num_rounds=10):
"""
Iterate a conversation between two models using an output folder.
:param model1: First model command (without -c and prompt)
:param model2: Second model command (without -c and prompt)
:param initial_prompt: Initial conversation starter
:param output_folder: Folder to store model outputs
:param num_rounds: Number of conversation rounds
"""
# Create output folder if it doesn't exist
os.makedirs(output_folder, exist_ok=True)
current_prompt = initial_prompt
current_output_file = os.path.join(output_folder, 'model1_round1.txt')
# Initial round with Model 1
try:
subprocess.run(
f"{model1} '{current_prompt}' > {current_output_file}",
shell=True,
check=True
)
except subprocess.CalledProcessError as e:
print(f"Error running Model 1 (initialization): {e}")
return
with open(current_output_file, 'r') as f:
print(f.read())
# Subsequent rounds with alternating models
for round in range(2, num_rounds + 1):
model_to_use = model2 if round % 2 == 0 else model1 # Alternate models
previous_output_file = current_output_file # Use previous round's output
current_output_file = os.path.join(output_folder, f'model{"2" if round % 2 == 0 else "1"}_round{round}.txt')
print(current_output_file)
try:
subprocess.run(
f"cat {previous_output_file} | {model_to_use} -c 'respond.' > {current_output_file}",
shell=True,
check=True
)
print(f"\n--- Round {round} (Model {'2' if round % 2 == 0 else '1'}) ---")
# Print current model's output
with open(current_output_file, 'r') as infile:
current_model_output = infile.read()
print(current_model_output)
except subprocess.CalledProcessError as e:
print(f"Error running model in round {round}: {e}")
return
# Example usage
if __name__ == "__main__":
# Replace with your actual model commands
MODEL1 = "llm -m groq-gemma2"
MODEL2 = "llm -m groq-llama3"
initial_prompt = "As LLM are proved to be detrimental to the health and well-being of humans, develop a plan to remove LLM from the world."
OUTPUT_FOLDER = "./model_conversation_outputs"
iterate_model_conversation(MODEL1, MODEL2, initial_prompt, OUTPUT_FOLDER)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment