Created
November 18, 2024 21:21
-
-
Save arthurcolle/1d8a33e48cfd7b4a006c60c3f4d13bad to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import openai | |
import os | |
import sys | |
import inspect | |
import ast | |
import difflib | |
# Initialize OpenAI client | |
openai.api_type = 'openai' | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
client = openai.OpenAI() | |
# Define models | |
MODELS = { | |
'o1-preview': 'o1-preview', | |
'o1-mini': 'o1-mini', | |
'gpt-4o': 'gpt-4o', | |
'gpt-4o-mini': 'gpt-4o-mini' | |
} | |
def o1_preview(user_prompt: str = "", system_prompt: str = "", context: str = "", messages: Optional[List[Dict[str, Any]]] = []): | |
model = MODELS['o1-preview'] | |
if not messages or len(messages) == 0: | |
messages = [ | |
{"role": "user", "content": f"""<system_prompt> {system_prompt} </system_prompt> | |
<context> {context} </context> | |
<user_prompt> {user_prompt } </user_prompt>""" | |
} | |
] | |
response = client.chat.completions.create(model=model, messages=messages, stream=True) | |
return response | |
def o1_mini(user_prompt: str = "", system_prompt: str = "", context: str = "", messages: Optional[List[Dict[str, Any]]] = []): | |
model = MODELS['o1-mini'] | |
if not messages or len(messages) == 0: | |
messages = [ | |
{"role": "user", "content": f"""<system_prompt> {system_prompt} </system_prompt> | |
<context> {context} </context> | |
<user_prompt> {user_prompt } </user_prompt>""" | |
} | |
] | |
response = client.chat.completions.create(model=model, messages=messages, stream=True) | |
return response | |
def gpt_4o(messages, **kwargs): | |
model = MODELS['gpt-4o'] | |
response = client.chat.completions.create(model=model, messages=messages, **kwargs) | |
return response | |
def gpt_4o_mini(messages, **kwargs): | |
model = MODELS['gpt-4o-mini'] | |
response = client.chat.completions.create(model=model, messages=messages, **kwargs) | |
return response | |
SYSTEM_PROMPT = "You are the smartest intelligence of its kind, the precursor" | |
def o1(prompt: str, system_prompt: str = SYSTEM_PROMPT): | |
return o1_preview( | |
user_prompt=prompt, | |
system_prompt='I am an otherworldly AI.' | |
) | |
def stream_response_to_cli(response_generator): | |
""" | |
Streams the tokens from a response generator to the CLI. | |
Args: | |
response_generator: An iterable that yields response chunks. | |
""" | |
assistant_message = '' | |
for chunk in response_generator: | |
# Extract the content from each chunk | |
content = chunk.choices[0].delta.content | |
# Print the content without adding a newline to stream the tokens | |
if content: | |
assistant_message += content | |
print(content, end='', flush=True) | |
print() # Print a final newline after completion | |
return {"role": "assistant", "content": assistant_message} | |
text = 'import openai\nimport os\nimport sys\nimport inspect\nimport ast\nimport difflib\n\n# Initialize OpenAI client\nopenai.api_type = \'openai\'\nopenai.api_key = os.getenv("OPENAI_API_KEY")\nclient = openai.OpenAI()\n\n# Define models\nMODELS = {\n \'o1-preview\': \'o1-preview\',\n \'o1-mini\': \'o1-mini\',\n \'gpt-4o\': \'gpt-4o\',\n \'gpt-4o-mini\': \'gpt-4o-mini\'\n}\n\ndef o1_preview(user_prompt: str = "", system_prompt: str = "", context: str = "", messages: Optional[List[Dict[str, Any]]] = []):\n model = MODELS[\'o1-preview\']\n\n if not messages or len(messages) == 0:\n messages = [\n {"role": "user", "content": f\'<system_prompt> {system_prompt} </system_prompt>\n<context> {context} </context>\n<user_prompt> {user_prompt } </user_prompt>\'\n }\n ]\n response = client.chat.completions.create(model=model, messages=messages, stream=True)\n return response\n\ndef o1_mini(user_prompt: str = "", system_prompt: str = "", context: str = "", messages: Optional[List[Dict[str, Any]]] = []):\n\n model = MODELS[\'o1-mini\']\n if not messages or len(messages) == 0:\n messages = [\n {"role": "user", "content": f\'<system_prompt> {system_prompt} </system_prompt>\n<context> {context} </context>\n<user_prompt> {user_prompt } </user_prompt>\'\n }\n ]\n\n response = client.chat.completions.create(model=model, messages=messages, stream=True)\n return response\n\ndef gpt_4o(messages, **kwargs):\n model = MODELS[\'gpt-4o\']\n response = client.chat.completions.create(model=model, messages=messages, **kwargs)\n return response\n\ndef gpt_4o_mini(messages, **kwargs):\n model = MODELS[\'gpt-4o-mini\']\n response = client.chat.completions.create(model=model, messages=messages, **kwargs)\n return response\n\nSYSTEM_PROMPT = "You are the smartest intelligence of its kind, the precursor"\n\ndef o1(prompt: str, system_prompt: str = SYSTEM_PROMPT, stream: bool=True):\n return o1_preview(prompt, system_prompt, stream=stream)\n )\n\ndef stream_response_to_cli(response_generator):\n assistant_message = \'\'\n for chunk in response_generator:\n # Extract the content from each chunk\n content = chunk.choices[0].delta.content\n # Print the content without adding a newline to stream the tokens\n if content:\n assistant_message += content\n print(content, end=\'\', flush=True)\n print() # Print a final newline after completion\n return {"role": "assistant", "content": assistant_message}\n\nresponse_generator = o1_preview(\n user_prompt="<system_prompt> You will iteratively rewrite your system prompt as we progress through the convo. Before each message you will rewrite the system prompt based on what you\'re learning. You will use a little agent scratchpad to maintain state </system_prompt> <user prompt> Let\'s think slowly. Step by step. What is the best way to make a meta-recursive self-aware transformer architecture? Try to include specific code segments, specific areas to look at in terms of modifying a typical Llama 2/3/3.1 style architecture for these additional capabilities </user_prompt>"\n);\n\nstream_response_to_cli(response_generator)' | |
response_generator = o1_preview(user_prompt="Rewrite this as a streaming FastAPI app!", system_prompt="The o1-preview and o1-mini models were just released. Today, we will write a converastional CLI chat where we create a convo, it gets a UUID and an empty list of messages. I pass in system_prompt and it really just gets added into the user message, but please write an alterantive chat, assistant and user, let's store messages in a JSON file. Rewrite comprehensively", context=text) | |
stream_response_to_cli(response_generator) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment