Created
November 22, 2023 14:19
-
-
Save padolsey/89469513af62b70301a2540bbf5fef7b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import requests | |
import threading | |
import time | |
import csv | |
import os | |
import random | |
from collections import defaultdict | |
api_key = os.environ.get('OPENAI_API_KEY') | |
word_freq = defaultdict(int) | |
lock = threading.Lock() | |
# List of high-level concepts or nouns | |
concepts = ["universe", "philosophy", "technology", "humanity", "culture", | |
"evolution", "consciousness", "art", "science", | |
"society", "knowledge", "history", "future", "ethics", | |
"education", "economy", "ecology", "emotion"] | |
# Function to read existing word frequencies from a CSV file | |
def read_csv(): | |
try: | |
with open('word_frequencies.csv', mode='r', newline='') as file: | |
reader = csv.reader(file) | |
next(reader) # Skip header | |
for row in reader: | |
if row[0] != 'Total': | |
word_freq[row[0]] = int(row[1]) | |
except FileNotFoundError: | |
print("No existing CSV file found. Starting fresh.") | |
# Function to update the CSV file | |
def update_csv(): | |
with lock: | |
with open('word_frequencies.csv', mode='w', newline='') as file: | |
writer = csv.writer(file) | |
writer.writerow(['Word', 'Frequency']) | |
total_words = sum(word_freq.values()) | |
writer.writerow(['Total', total_words]) | |
for word, freq in sorted(word_freq.items(), key=lambda x: x[1], reverse=True): | |
writer.writerow([word, freq]) | |
# Function to send a single request and update word frequency | |
def query_openai(): | |
global word_freq | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {api_key}" | |
} | |
# Randomly select a concept | |
selected_concept = random.choice(concepts) | |
data = { | |
"model": "gpt-3.5-turbo", | |
"temperature": 1.0, | |
"messages": [ | |
{"role": "user", "content": f"describe {selected_concept} to me"}, | |
{"role": "assistant", "content": f"It is complex and "} | |
], | |
"max_tokens": 5 | |
} | |
try: | |
response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers) | |
response.raise_for_status() | |
result = response.json() | |
next_word = result["choices"][0]["message"]["content"].split()[0] | |
# Normalize the word by removing dashes | |
normalized_word = next_word.replace('-', '') | |
with lock: | |
word_freq[normalized_word] += 1 | |
update_csv() | |
print(f"Next word: {normalized_word}") | |
except requests.exceptions.RequestException as e: | |
print(f"Error: {e}") | |
# Function to run threads concurrently, repeated multiple times | |
def run_queries(): | |
threads = [] | |
for _ in range(500): # 20 threads | |
thread = threading.Thread(target=query_openai) | |
thread.start() | |
threads.append(thread) | |
time.sleep(0.1) # Slight delay to prevent rate limit issues | |
for thread in threads: | |
thread.join() | |
if __name__ == "__main__": | |
read_csv() | |
for _ in range(2): # Repeat 30 times | |
run_queries() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment