Created
June 16, 2023 23:16
-
-
Save hardcorebadger/ab1d6703b13f2829fddbba2eeb1d4c8a to your computer and use it in GitHub Desktop.
OpenAI Functions - Multi-Tool Agent
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import requests | |
from functools import partial | |
import json | |
import openai | |
from termcolor import colored | |
MAX_ITERATIONS = 4 | |
BASE_URL = "https://api.example.com" | |
# request structure | |
CITY_PARAM = { | |
"type": "object", | |
"properties": { | |
"city": { | |
"type": "string", | |
"description": "the names of the city you want to get the weather for" | |
} | |
}, | |
"required": ["city"], | |
} | |
# run a get request | |
def get_request(endpoint, params): | |
r = requests.get(BASE_URL+endpoint, params) | |
return (r.text) | |
# functions | |
functions = { | |
"getWeather": { | |
"name": "getWeather", | |
"description": "Realtime info about the weather.", | |
"parameters": CITY_PARAM, | |
"function": partial(get_request, "/api/weather") | |
}, | |
"getPopulation": { | |
"name": "getWeather", | |
"description": "Realtime info about the weather.", | |
"parameters": CITY_PARAM, | |
"function": partial(get_request, "/api/population") | |
}, | |
} | |
def get_functions_for_model(): | |
specs = [] | |
for f in functions: | |
spec = { | |
"name": functions[f]['name'], | |
"description": functions[f]['description'], | |
"parameters": functions[f]['parameters'], | |
} | |
specs.append(spec) | |
return specs | |
def call_function(name, params): | |
f = functions[name] | |
return f['function'](json.loads(params)) | |
# chat history | |
messages = [] | |
# add the system prompt | |
messages.append({"role": "system", "content":"You are an AI assistant with multiple tools..."}) | |
def run(input): | |
# add the user message to history | |
messages.append({"role":"user", "content":input}) | |
# run the agent loop | |
iter = 0 | |
while iter < MAX_ITERATIONS: | |
# run the completion | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo-0613", | |
messages=messages, | |
functions=get_functions_for_model(), | |
function_call="auto", | |
) | |
# add the response to the chat history | |
message = response["choices"][0]["message"] | |
messages.append(message) | |
# check if the model wants to call a function | |
if message.get("function_call"): | |
# if so, get the function info | |
function_name = message["function_call"]["name"] | |
function_input = message["function_call"]["arguments"] | |
# can replace this with callbacks | |
print(colored(f"Using {function_name}...", "yellow")) | |
# call the function | |
results = call_function(function_name, function_input) | |
# this is for debug | |
print(colored(results, "red")) | |
# add the function results to the chat history | |
messages.append({"role":"function","name":function_name,"content":results}) | |
# then return to recall the model, +1 iterations | |
iter = iter + 1 | |
# otherwise print the response and get user input | |
else: | |
return message['content'] | |
# when iterations max out, call the model for a chat completion | |
# without any available tools, forcing the model to respond | |
# run the completion | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo-0613", | |
messages=messages | |
) | |
# add the response to the chat history | |
message = response["choices"][0]["message"] | |
messages.append(message) | |
# return the message | |
return message['content'] | |
print(colored("Hi I'm an example functions agent, what can I do ya for", "green)) | |
while True: | |
print(colored(run(input()), "green") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
(its python code)
Multi-tool agent loop replicating how chat GPT plugins work
The one thing you'll run into is maxing out the context
You gotta write a wrapper on the memory to clip old messages as the conversation goes on, I left it out for simplicity