Skip to content

Instantly share code, notes, and snippets.

@Maxime66410
Created September 12, 2024 10:21
Show Gist options
  • Save Maxime66410/cd8c9308d956d9d7c4d99c445f69e592 to your computer and use it in GitHub Desktop.
Save Maxime66410/cd8c9308d956d9d7c4d99c445f69e592 to your computer and use it in GitHub Desktop.
import pip
import os
os.environ["SUNO_OFFLOAD_CPU"] = "True"
os.environ["SUNO_USE_SMALL_MODELS"] = "True"
try:
import openai
except:
os.system("pip install openai")
import openai
try:
from bark import SAMPLE_RATE, generate_audio, preload_models
except:
os.system("pip install git+https://github.com/suno-ai/bark.git")
from bark import SAMPLE_RATE, generate_audio, preload_models
try:
import torch
except:
os.system("pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117")
import torch
try:
from scipy.io.wavfile import write as write_wav
except:
os.system("pip install scipy")
from scipy.io.wavfile import write as write_wav
try:
from IPython.display import Audio
except:
os.system("pip install ipython")
from IPython.display import Audio
try:
import numpy
except:
os.system("pip install numpy==1.24.1")
import numpy
try:
import soundfile
except:
os.system("pip install soundfile")
import soundfile
# Set up OpenAI API key
#
api_key = ""
openai.api_key = api_key
# Function to send a message to the OpenAI chatbot model and return its response
def send_message(message_log):
# Use OpenAI's ChatCompletion API to get the chatbot's response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # The name of the OpenAI chatbot model to use
messages=message_log, # The conversation history up to this point, as a list of dictionaries
max_tokens=3800, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.7, # The "creativity" of the generated response (higher temperature = more creative)
)
# Find the first response from the chatbot that has text in it (some responses may not have text)
for choice in response.choices:
if "text" in choice:
return choice.text
# If no response with text is found, return the first response's content (which may be empty)
return response.choices[0].message.content
# Main function that runs the chatbot
def main():
# Initialize the conversation history with a message from the chatbot
message_log = [
{"role": "system", "content": "You are a helpful assistant."}
]
# Set a flag to keep track of whether this is the first request in the conversation
first_request = True
# Start a loop that runs until the user types "quit"
while True:
if first_request:
# If this is the first request, get the user's input and add it to the conversation history
user_input = input("You: ")
message_log.append({"role": "user", "content": user_input})
# Send the conversation history to the chatbot and get its response
response = send_message(message_log)
# Add the chatbot's response to the conversation history and print it to the console
message_log.append({"role": "assistant", "content": response})
print(f"AI assistant: {response}")
text_to_prompt = """{response}"""
audio_array = generate_audio(text_to_prompt)
write_wav("AS_generation.wav", SAMPLE_RATE, audio_array)
Audio(audio_array, rate=SAMPLE_RATE)
# Set the flag to False so that this branch is not executed again
first_request = False
else:
# If this is not the first request, get the user's input and add it to the conversation history
user_input = input("You: ")
# If the user types "quit", end the loop and print a goodbye message
if user_input.lower() == "quit":
print("Goodbye!")
break
message_log.append({"role": "user", "content": user_input})
# Send the conversation history to the chatbot and get its response
response = send_message(message_log)
# Add the chatbot's response to the conversation history and print it to the console
message_log.append({"role": "assistant", "content": response})
print(f"AI assistant: {response}")
text_to_prompt = """{response}"""
audio_array = generate_audio(text_to_prompt)
write_wav("AS_generation.wav", SAMPLE_RATE, audio_array)
Audio(audio_array, rate=SAMPLE_RATE)
# Call the main function if this file is executed directly (not imported as a module)
if __name__ == "__main__":
preload_models()
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment