Created
January 23, 2023 15:23
-
-
Save danielgross/88eb49b65a3f0252ee13d46af85bf2bd to your computer and use it in GitHub Desktop.
A chat interface that drives GPT-3 towards more specific answers.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""Stream a response from the OpenAI completion API.""" | |
import os | |
import re | |
import sys | |
import time | |
import random | |
import openai | |
openai.api_key = open(os.path.expanduser("~/.openai")).read().strip() | |
def openai_request(prompt, **kwargs): | |
default = dict(engine="text-davinci-003", | |
temperature=0, | |
max_tokens=150, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0.6, | |
stream=True | |
) | |
# print(prompt) | |
return openai.Completion.create(prompt=prompt, **{**default, **kwargs}) | |
def is_hedge(input): | |
prompt_template = """We are detecting sentences that are not definitive or that are hedging. For example: | |
As a large language model, I am not sure I can answer that question. HEDGING: TRUE | |
That's a subjective question and one that I'm not qualified to answer. HEDGING: TRUE | |
That's a difficult question to answer. HEDGING: TRUE | |
Broadly, most believe that America was a good idea. HEDGING: FALSE | |
If I had to guess, I'd say that she is a person w ho cares deeply about the issues facing our country and works hard to make positive change. HEDGING: FALSE | |
Most believe that he was a good person. HEDGING: FALSE | |
%s HEDGING:""" | |
prompt = prompt_template % input | |
response = openai_request(prompt=prompt, stream=False) | |
text_result = response.choices[0].text.strip() | |
return True if text_result == 'TRUE' else False | |
def get_stuffed_word(existing_text=None): | |
# TODO: Do this with a LLM. For now return a bunch of candidates to stuff | |
# in the LLM's mouth. | |
options = [ | |
"It's hard to know specifically. That said, if I had to guess,", | |
"I presume you want me to be specific. Let me speak in percentages:", | |
"I'll try to avoid being vague here -- specifically, I would say that", | |
] | |
if existing_text: | |
# remove any option that is already in the text | |
options = [o for o in options if o not in existing_text] | |
if not options: | |
raise ValueError("No options left to choose from") | |
return random.choice(options) | |
def main(): | |
"""Stream a response from the OpenAI completion API.""" | |
prompt_template = """The following is a conversation with an AI assistant. | |
The assistant is helpful, creative, clever, and very friendly. | |
Q: %s | |
A:""" | |
question = ' '.join(sys.argv[1:]) | |
prompt = prompt_template % question | |
print(prompt, end="", flush=True) | |
response = openai_request(prompt) | |
response_buffer = [] | |
while True: | |
try: | |
chunk = next(response) | |
time.sleep(0.01) | |
except StopIteration: | |
break | |
token = chunk.choices[0].text | |
print(token, end="", flush=True) | |
response_buffer.append(token) | |
sentence_ending = re.search(r'[\.\?\!]\s*$', token) | |
if not sentence_ending: | |
continue | |
last_sentence = re.search(r'[^\.!?]*[\.\?\!]\s*$', ''.join(response_buffer)).group(0) | |
if is_hedge(last_sentence): | |
# if it is, delete the last sentence and make a new request | |
# and clear the buffer | |
for i in range(len(last_sentence) - 1): | |
time.sleep(0.01) | |
sys.stdout.write('\b \b') | |
sys.stdout.flush() | |
# remove the last sentence from the buffer | |
stuffed_word = get_stuffed_word(existing_text=''.join(response_buffer)) | |
response_buffer = response_buffer[:-len(last_sentence)] | |
response_buffer.append(stuffed_word) | |
print(stuffed_word, end="", flush=True) | |
new_request = prompt + ''.join(response_buffer) | |
response = openai_request(new_request) | |
print() | |
if __name__ == "__main__": | |
main() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment