Skip to content

Instantly share code, notes, and snippets.

@teddybear082
Created December 3, 2023 01:10
Show Gist options
  • Save teddybear082/53a6dc08a085de6a5caecee8cd70b040 to your computer and use it in GitHub Desktop.
Save teddybear082/53a6dc08a085de6a5caecee8cd70b040 to your computer and use it in GitHub Desktop.
Godot4 openai apis addon (place tscn and script in folder called godot-openai-simple inside of project addons folder)
extends Node
## This is a script to query Openai for AI-generated NPC dialogue.
## You need a OpenAI API key for this to work; put it as a string in the export variable for API Key
## See https://platform.openai.com/
## Openai endpoings for speech to text, text generation, text to speech, image generation and image interpretation(vision) are supported.
# Signal used to alert speech recording started
signal STT_speech_recording
# Signal used to alert speech has been recorded
signal STT_speech_recorded(recording_path, recording_name)
# Signal used to alert that Speech to Text (STT) has been requested
signal STT_requested(recording_path, recording_name)
# Signal used to alert of STT response
signal STT_response_generated(response)
# Signal used to alert that GPT response has been requested
signal AI_text_response_requested(prompt)
# Signal used to alert of the final text GPT response
signal AI_text_response_generated(response)
# Signal used to alert that summary of message has been requested
signal AI_messages_summary_requested(messages)
# Signal used to alert that messages have been summarized if message cache used
signal AI_messages_summarized(summary)
# Signal used to alert that Text to Speech (TTS) has been requested
signal TTS_requested(prompt)
# Signal used to alert of final TTS response
signal TTS_response_generated(sound)
# Signal used to alert TTS response finished playing
signal TTS_speech_finished_playing
# Signal used to alert image response requested
signal image_generation_requested(prompt)
# Signal used to alert image response received
signal image_response_generated(texture)
# Signal used to alert vision response requested
signal vision_response_requested(base64_image_data)
# Signal used to alert vision response received
signal vision_response_generated(response)
#Global Openai variables
@export var api_key: String = "insert your openai api key here": set = set_api_key
@export var base_openai_url: String = "https://api.openai.com/v1": set = set_base_openai_url
#STT variables
# See https://platform.openai.com/docs/guides/speech-to-text
@export var STT_model: String = "whisper-1":set = set_STT_model
@export var STT_language: String = "en":set = set_STT_language
@export var STT_temperature: float = 0.0:set = set_STT_temperature
var STT_url: String = "/audio/transcriptions"
var STT_headers: Array
var STT_boundary = "boundary"
var STT_audio_file
var STT_http_request
var STT_response_format = "text"
var interface_enabled : bool = false
var save_path : String
var record_effect = null
var micrecordplayer
# GPT Text response variables
# See https://platform.openai.com/docs/guides/text-generation/chat-completions-api
@export var AI_text_response_model : String = "gpt-3.5-turbo-1106":set = set_AI_text_response_model
@export var AI_text_response_temperature : float = 1.0:set = set_AI_text_response_temperature
@export var AI_text_response_top_p : float = 1.0:set= set_AI_text_response_top_p
@export var AI_text_response_frequency_penalty : float = 0.0:set = set_AI_text_response_frequency_penalty
@export var AI_text_response_presence_penalty : float = 0.0:set = set_AI_text_response_presence_penalty
@export var AI_text_response_stop_tokens : Array = []:set = set_AI_text_response_stop_tokens
@export var AI_text_response_max_tokens : int = 250:set = set_AI_text_response_max_tokens
@export var npc_background_directions: String = "You are a non-playable character in a video game. You are a robot. Your name is Bob. Your job is taping boxes of supplies. You love organization. You hate mess. Your boss is Robbie the Robot. Robbie is a difficult boss who makes a lot of demands. You respond to the user's questions as if you are in the video game world with the player.":set=set_npc_background_directions # Used to give GPT some instructions as to the character's background.
@export var sample_npc_question_prompt: String = "Hi, what do you do here?":set=set_sample_npc_question_prompt # Create a sample question the reinforces the NPC's character traits
@export var sample_npc_prompt_response: String = "Greetings fellow worker! My name is Bob and I am a robot. My job is to tape up the boxes in this factory before they are shipped out to our customers!":set=set_sample_npc_prompt_response # Create a sample response to the prompt above the reinforces the NPC's character traits
# Number of messages to cache; if 0 no caching of responses or summarizing is done. The more you cache the longer prompts will be, so be careful with your usage.
@export var num_cache_messages: int = 4:set=set_num_cache_messages
var AI_text_response_url = "/chat/completions"
var AI_text_response_headers: Array
var AI_text_response_http_request : HTTPRequest
var summarize_http_request : HTTPRequest
var past_messages_array : Array = []
# TTS response variables
# See https://platform.openai.com/docs/guides/text-to-speech
@export var TTS_model: String = "tts-1":set=set_TTS_model
@export var TTS_voice: String = "alloy":set=set_TTS_voice
@export var TTS_response_format: String = "mp3":set=set_TTS_response_format
@export var TTS_speed: float = 1.0:set=set_TTS_speed
var TTS_url: String = "/audio/speech"
var TTS_headers: Array
var TTS_http_request: HTTPRequest
var TTS_speech_player : AudioStreamPlayer
var TTS_stream : AudioStreamMP3
# Image generation variables
# See https://platform.openai.com/docs/api-reference/images/create
@export var AI_image_generation_model: String = "dall-e-3":set=set_AI_image_generation_model
@export var AI_image_generation_quality: String = "standard":set=set_AI_image_generation_quality
@export var AI_image_generation_size: String = "1024x1024":set=set_AI_image_generation_size
@export var AI_image_generation_style: String = "vivid":set=set_AI_image_generation_style
var AI_image_generation_response_format : String = "b64_json"
var AI_image_generation_http_request : HTTPRequest
var AI_image_url: String = "/images/generations"
var AI_image_headers: Array
# Vision response variables
# See https://platform.openai.com/docs/guides/vision
@export var AI_vision_response_model: String = "gpt-4-vision-preview":set=set_AI_vision_response_model
@export var AI_vision_response_detail_level: String = "low":set=set_AI_vision_response_detail_level #can be low or high
@export var AI_vision_response_prompt: String = "Describe what you see in this image.":set=set_AI_vision_response_prompt
var AI_vision_response_url = "/chat/completions"
var AI_vision_response_headers: Array
var AI_vision_response_http_request : HTTPRequest
func _ready():
# Set all headers for http requests
# STT header
STT_headers = ["Content-Type: multipart/form-data; boundary=" + STT_boundary, "Authorization: Bearer " + api_key]
# GPT header
AI_text_response_headers = ["Content-Type: application/json", "Authorization: Bearer " + api_key]
# TTS header
TTS_headers = ["Authorization: Bearer " + api_key, "Content-Type: application/json"]
# Image generation header
AI_image_headers = ["Authorization: Bearer " + api_key, "Content-Type: application/json"]
# Vision response header
AI_vision_response_headers = ["Authorization: Bearer " + api_key, "Content-Type: application/json"]
# set up all http request nodes and response functions
# set up http request for STT
STT_http_request = HTTPRequest.new()
add_child(STT_http_request)
STT_http_request.timeout = 10.0
STT_http_request.use_threads = true
STT_http_request.connect("request_completed", Callable(self, "_on_STT_request_completed"))
# set up http request for AI text responses
AI_text_response_http_request = HTTPRequest.new()
add_child(AI_text_response_http_request)
AI_text_response_http_request.connect("request_completed", Callable(self, "_on_AI_text_request_completed"))
# set up second http request node for calls to summarize_GPT function
summarize_http_request = HTTPRequest.new()
add_child(summarize_http_request)
summarize_http_request.connect("request_completed", Callable(self, "_on_summarize_request_completed"))
# set up http request for TTS
TTS_http_request = HTTPRequest.new()
add_child(TTS_http_request)
TTS_http_request.connect("request_completed", Callable(self, "_on_TTS_request_completed"))
# set up audio nodes for speech recording for STT
# Make sure audio input is enabled even if program is not set to otherwise to prevent inadvertent errors in use
ProjectSettings.set_setting("audio/driver/enable_input", true)
var current_number = 0
while AudioServer.get_bus_index("STTMicRecorder" + str(current_number)) != -1:
current_number += 1
var bus_name = "STTMicRecorder" + str(current_number)
var record_bus_idx = AudioServer.bus_count
AudioServer.add_bus(record_bus_idx)
AudioServer.set_bus_name(record_bus_idx, bus_name)
record_effect = AudioEffectRecord.new()
AudioServer.add_bus_effect(record_bus_idx, record_effect)
AudioServer.set_bus_mute(record_bus_idx, true)
micrecordplayer = AudioStreamPlayer.new()
add_child(micrecordplayer)
micrecordplayer.bus = bus_name
# Activate voice commands; this could be commented out for better control from another script
activate_voice_commands(true)
# Now set up nodes to play text to speech from API when received
# Create audio player node for speech playback
TTS_speech_player = AudioStreamPlayer.new()
TTS_speech_player.connect("finished", Callable(self, "_on_TTS_speech_player_finished"))
add_child(TTS_speech_player)
# Set up http request for image generation API (Dall-e)
AI_image_generation_http_request = HTTPRequest.new()
add_child(AI_image_generation_http_request)
AI_image_generation_http_request.connect("request_completed", Callable(self, "_on_AI_image_request_completed"))
# Set up http request for vision response API (GPT4-vision)
AI_vision_response_http_request = HTTPRequest.new()
add_child(AI_vision_response_http_request)
AI_vision_response_http_request.connect("request_completed", Callable(self, "_on_AI_vision_request_completed"))
#Sample commands to test functioning on ready function (use independently, one at a time)
#start_voice_command()
#call_AI_text_response_url("Hey there, how are you doing today?")
#call_TTS_url("I hope you're enjoying A.I. Godot!")
#call_AI_image_url("A portrait of a blue robot coding on a computer")
#for gpt4vision, you need to have a file in your userdata called visiontestfile.jpeg, or else rename that to what your test image is
#var file_path="user://visiontestfile.jpeg"
#var file = FileAccess.open(file_path, FileAccess.READ)
#var content = file.get_buffer(file.get_length())
#file.close()
#var b64_image = Marshalls.raw_to_base64(content)
#call_AI_vision_response_url(b64_image, "jpeg")
# This is needed to activate the voice commands in the node for STT.
# Sample usage in another script: godot-openai-simple.activate_voice_commands(true)
func activate_voice_commands(value:bool):
print("WhisperAPI voice commands activated")
interface_enabled = value
if value:
if micrecordplayer.stream == null:
micrecordplayer.stream = AudioStreamMicrophone.new()
if !micrecordplayer.playing:
micrecordplayer.play()
else:
if micrecordplayer.playing:
micrecordplayer.stop()
micrecordplayer.stream = null
# Start voice capture for STT, e.g., on a button press or other event
# Sample usage from another script: godot-openai-simple.start_voice_command()
func start_voice_command():
print("Reading sound")
if !micrecordplayer.playing:
micrecordplayer.play()
record_effect.set_recording_active(true)
emit_signal("STT_speech_recording")
#If you wanted to automatically stop after a certain time after starting, the following would work
#await get_tree().create_timer(20.0).timeout
#end_voice_command()
# End voice capture for STT, e.g., on a button press or other event
# Sample usage from another script: godot-openai-simple.end_voice_command()
func end_voice_command():
print("stop reading sound")
var recording = record_effect.get_recording()
await get_tree().create_timer(1.0).timeout
record_effect.set_recording_active(false)
micrecordplayer.stop()
if OS.has_feature("editor"):
save_path = OS.get_user_data_dir().path_join("whisper_audio.wav")
elif OS.has_feature("android"):
save_path = OS.get_system_dir(OS.SYSTEM_DIR_DOCUMENTS, false).path_join("whisper_audio.wav")
else:
save_path = OS.get_executable_path().get_base_dir().path_join("whisper_audio.wav")
var check_ok = recording.save_to_wav(save_path)
#print(check_ok)
emit_signal("STT_speech_recorded", save_path, "whisper_audio.wav")
# This script currently assumes you automatically want to go from recording to transcribing, if you do not want that and want control over the call, comment out the next line
call_STT_url(save_path, "whisper_audio.wav")
# Call Openai STT (Whisper) API with recorded audio
# Sample usage from another script: godot-openai-simple.call_STT_url(OS.get_user_data_dir().path_join("my_recorded_audio_file.wav"), "my_recorded_audio_file.wav")
func call_STT_url(file_path:String, file_name:String):
# Godot doesn't come with built-in multi-part form data support so need to do special work here
# open file that has recorded speech and get its binary content to use in the request
var file = FileAccess.open(file_path, FileAccess.READ)
var content = file.get_buffer(file.get_length())
file.close()
var post_fields = {
"model": STT_model,
"language": STT_language,
"response_format": STT_response_format
}
var body1 = "\r\n\r\n"
for key in post_fields:
print("key found:" + str(key))
body1 += "--" + STT_boundary + "\r\n"
body1 += "Content-Disposition: form-data; name=\"" + key + "\"\r\n\r\n" + post_fields[key] + "\r\n"
body1 += "--" + STT_boundary + "\r\n"
body1 += "Content-Disposition: form-data; name=\"" + "file" + "\"; filename=\"" + file_name + "\"\r\nContent-Type: "
body1 += "audio/wav"
body1 += "\r\n\r\n"
var body2 = "\r\n" + "--" + STT_boundary + "--"
var post_content = body1.to_utf8_buffer() + content + body2.to_utf8_buffer()
print("making file http request")
#print("url is: " + base_openai_url + STT_url)
#print("headers are: " + str(STT_headers))
#print(body1 + "binary data, for example audio file data, inserted here" + body2)
#print(post_content)
emit_signal("STT_requested", file_path, file_name)
var err = STT_http_request.request_raw(base_openai_url+STT_url, STT_headers, HTTPClient.METHOD_POST, post_content)
print("err from request raw from STT http file post request is: " + str(err))
# Function to receive response to STT API call with audio file prompt
func _on_STT_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with Whisper API's response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
var STT_text_response = body.get_string_from_utf8()
#print("STT text generated: " + STT_text_response)
emit_signal("STT_response_generated", STT_text_response)
#You can uncomment the following line to proceed automatically to the next step, which would be calling the openai GPT model for a response to what the user said
#call_AI_text_response_url(STT_text_response)
# Function to call Openai AI ("Chat GPT") for text response to user prompt
# Sample usage from another script: godot-openai-simple.call_AI_test_response_url("Hi there, how are you doing today?")
# When using speech to text, typically you would pass the text result from the STT_response_generated signal to this method to then get the AI response to what the user said.
func call_AI_text_response_url(prompt:String):
# if using past messages array, and has stored max number of messages, call summarize function which will summarize messages so far and clear the cache
if past_messages_array.size() >= 2 * num_cache_messages and past_messages_array.size() != 0:
summarize_AI_text_responses(past_messages_array)
await self.AI_messages_summarized
print("requesting AI text response")
var body = JSON.stringify({
"model": AI_text_response_model,
"messages": past_messages_array + [{"role": "system", "content": npc_background_directions}, {"role": "user", "content": sample_npc_question_prompt}, {"role": "assistant", "content": sample_npc_prompt_response}, {"role": "user", "content": prompt}],
"temperature": AI_text_response_temperature,
"top_p": AI_text_response_top_p,
"stop": AI_text_response_stop_tokens,
"max_tokens": AI_text_response_max_tokens,
"frequency_penalty": AI_text_response_frequency_penalty,
"presence_penalty": AI_text_response_presence_penalty,
})
# Now call GPT
emit_signal("AI_text_response_requested", prompt)
var error = AI_text_response_http_request.request(base_openai_url+AI_text_response_url, AI_text_response_headers, HTTPClient.METHOD_POST, body)
if error != OK:
push_error("Something Went Wrong!")
# If using past messages array, add prompt message to the cache
if num_cache_messages > 0:
past_messages_array.append({"role": "user", "content": prompt})
# This code is used to handle the response from the request to get the AI response (from "ChatGPT").
func _on_AI_text_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with AI text response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
var data = body.get_string_from_utf8()
#print ("Data received: %s"%data)
var json_conv = JSON.new()
json_conv.parse(data)
var response = json_conv.get_data()
var choices = response.choices[0]
var message = choices["message"]
var AI_generated_dialogue = message["content"]
# Store most recent response if using messages cache
if num_cache_messages > 0:
past_messages_array.append(message)
#print(past_messages_array)
# Let other nodes know that AI generated dialogue is ready from GPT
emit_signal("AI_text_response_generated", AI_generated_dialogue)
#You can uncomment the following line to proceed automatically to the next step, which would be Text to Speech generation
#call_TTS_url(AI_generated_dialogue)
# This summarizes an array of previous messages; this is needed because ChatGPT by itself doesn't keep track of the conversation, so without caching and summarizing messages, the AI would have no idea what came before.
func summarize_AI_text_responses(messages : Array):
print("having GPT summarize message cache")
#print(messages)
var body = JSON.stringify({
"model": AI_text_response_model,
"messages": messages + [{"role": "user", "content": "Summarize the most important points of our conversation so far without being too wordy."}],
"temperature": 0.5
})
emit_signal("AI_messages_summary_requested", messages)
var error = summarize_http_request.request(base_openai_url+AI_text_response_url, AI_text_response_headers, HTTPClient.METHOD_POST, body)
if error != OK:
push_error("Something Went Wrong!")
# Receiver function for summarize http request
func _on_summarize_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with GPT's summarize response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
var data = body.get_string_from_utf8()#fix_chunked_response(body.get_string_from_utf8())
#print ("Data received: %s"%data)
var test_json_conv = JSON.new()
test_json_conv.parse(data)
var response = test_json_conv.get_data()
var choices = response.choices[0]
var message = choices["message"]
var summary = message["content"]
#print("Summary was:" + summary)
# If using messsages cache, clear messages array now that summary is prepared
if num_cache_messages > 0:
past_messages_array.clear()
# Now add summary to messages cache so it starts the new cache with the summary
past_messages_array.append(message)
#print(past_messages_array)
# Let other nodes know that summary was prepared
emit_signal("AI_messages_summarized", summary)
# Function to call openai's text-to-speech API
# Sample usage from another script: godot-openai-simple.call_TTS_url("I'm doing well thank you, how about yourself?")
func call_TTS_url(text):
TTS_http_request.set_download_file("user://TTS_audio.mp3")
if TTS_stream == null:
TTS_stream = AudioStreamMP3.new()
var body = JSON.stringify({
"model": TTS_model,
"voice": TTS_voice,
"response_format": TTS_response_format,
"speed": TTS_speed,
"input": text
})
# Now call convAI TTS
emit_signal("TTS_requested", text)
var error = TTS_http_request.request(base_openai_url+TTS_url, TTS_headers, HTTPClient.METHOD_POST, body)
if error != OK:
push_error("Something Went Wrong!")
# Receiver function for when using call to convAI in standalone text to speech mode (not using Convai for AI generated response content)
func _on_TTS_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with TTS response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
#var audio_file_from_convai = body
var file = FileAccess.open("user://TTS_audio.mp3", FileAccess.READ)
var bytes = file.get_buffer(file.get_length())
TTS_stream.data = bytes
TTS_speech_player.set_stream(TTS_stream)
TTS_speech_player.play()
emit_signal("TTS_response_generated", "user://TTS_audio.mp3")
# Receiver function for when speech player finishes
func _on_TTS_speech_player_finished():
emit_signal("TTS_speech_finished_playing")
# Function to call openai's image generation (Dall-e) API
# Sample usage from another script: godot-openai-simple.call_AI_image_url("A cartoon picture of a blue robot coding on the computer")
func call_AI_image_url(prompt:String):
print("requesting AI image response")
var body = JSON.stringify({
"model": AI_image_generation_model,
"prompt": prompt,
"quality": AI_image_generation_quality,
"response_format": AI_image_generation_response_format,
"size": AI_image_generation_size,
"style": AI_image_generation_style,
})
# Now call GPT
emit_signal("image_generation_requested", prompt)
var error = AI_image_generation_http_request.request(base_openai_url+AI_image_url, AI_image_headers, HTTPClient.METHOD_POST, body)
if error != OK:
push_error("Something Went Wrong!")
# Receiver function for AI image generation
func _on_AI_image_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with AI image request response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
var data = body.get_string_from_utf8()
#print ("Data received: %s"%data)
var json_conv = JSON.new()
json_conv.parse(data)
var response = json_conv.get_data()
var image_data_list = response.data[0]
var image_data = image_data_list['b64_json']
var image = Image.new()
var error = image.load_webp_from_buffer(Marshalls.base64_to_raw(image_data))
if error != OK:
print(error)
var _texture = ImageTexture.new()
_texture.create_from_image(image)
#usage example: $Sprite3D.texture = texture
# Let other nodes know that AI generated dialogue is ready from GPT
emit_signal("image_response_generated", _texture)
# Function to call openai's vision model (GPT4 Vision) API
# Sample usage from another script: godot-openai-simple.call_AI_vision_response_url(my_image_data_in_b64_encoded_format_string, "jpeg", "Pretending to be the Godot robot, describe in character what you see in this image")
# The Godot Marshalls raw_to_base64 function is useful in transforming PackedByteArray data into a base64 encoded string
# Technically the vision API supports multiple images but for simplicity this method only anticipates one image
func call_AI_vision_response_url(b64_image_data, image_type:String="jpeg", prompt:String=AI_vision_response_prompt):
print("requesting AI vision response")
var body = JSON.stringify({
"model": AI_vision_response_model,
"messages": [{"role": "user", "content": [{"type":"text", "text":prompt}, {"type":"image_url","image_url":{"url":"data:image/"+image_type+";base64,"+b64_image_data, "detail": AI_vision_response_detail_level}}]}],
})
# Now call GPT
emit_signal("vision_response_requested", b64_image_data)
var error = AI_vision_response_http_request.request(base_openai_url+AI_vision_response_url, AI_vision_response_headers, HTTPClient.METHOD_POST, body)
if error != OK:
push_error("Something Went Wrong!")
# This code is used to handle the response from the request to get the AI response (from "ChatGPT").
func _on_AI_vision_request_completed(result, responseCode, headers, body):
# Should recieve 200 if all is fine; if not print code
if responseCode != 200:
print("There was an error with AI vision response, response code:" + str(responseCode))
print(result)
print(headers)
print(body.get_string_from_utf8())
return
var data = body.get_string_from_utf8()
#print ("Data received: %s"%data)
var json_conv = JSON.new()
json_conv.parse(data)
var response = json_conv.get_data()
var choices = response.choices[0]
var message = choices["message"]
var AI_vision_response = message["content"]
#print(AI_vision_response)
# Let other nodes know that vision response is ready from GPT
emit_signal("vision_response_generated", AI_vision_response)
# All setter functions for godot-openai-simple
#Global Openai variables
func set_api_key(new_api_key : String):
api_key = new_api_key
func set_base_openai_url(new_base_url: String):
base_openai_url = new_base_url
#STT variables
func set_STT_model(new_STT_model: String):
STT_model = new_STT_model
func set_STT_language(new_STT_language: String):
STT_language = new_STT_language
func set_STT_temperature(new_STT_temperature: float):
STT_temperature = new_STT_temperature
# GPT Text response variables
func set_AI_text_response_model(new_AI_text_response_model: String):
AI_text_response_model = new_AI_text_response_model
func set_AI_text_response_temperature(new_AI_text_response_temperature: float):
AI_text_response_temperature = new_AI_text_response_temperature
func set_AI_text_response_top_p(new_AI_text_response_top_p: float):
AI_text_response_top_p = new_AI_text_response_top_p
func set_AI_text_response_frequency_penalty(new_AI_text_response_frequency_penalty: float):
AI_text_response_frequency_penalty = new_AI_text_response_frequency_penalty
func set_AI_text_response_presence_penalty(new_AI_text_response_presence_peanlty):
AI_text_response_presence_penalty = new_AI_text_response_presence_peanlty
func set_AI_text_response_stop_tokens(new_AI_text_response_stop_tokens:Array):
AI_text_response_stop_tokens = new_AI_text_response_stop_tokens
func set_AI_text_response_max_tokens(new_AI_text_response_max_tokens: int):
AI_text_response_max_tokens = new_AI_text_response_max_tokens
func set_npc_background_directions(new_npc_background_directions: String):
npc_background_directions = new_npc_background_directions
func set_sample_npc_question_prompt(new_sample_npc_question_prompt: String):
sample_npc_question_prompt = new_sample_npc_question_prompt
func set_sample_npc_prompt_response(new_sample_npc_prompt_response: String):
sample_npc_prompt_response = new_sample_npc_prompt_response
func set_num_cache_messages(new_num_cache_messages:int):
num_cache_messages = new_num_cache_messages
# TTS response variables
func set_TTS_model(new_TTS_model: String):
TTS_model = new_TTS_model
func set_TTS_voice(new_TTS_voice: String):
TTS_voice = new_TTS_voice
func set_TTS_response_format(new_TTS_response_format: String):
TTS_response_format = new_TTS_response_format
func set_TTS_speed(new_TTS_speed: float):
TTS_speed = new_TTS_speed
# Image Generation response variables
func set_AI_image_generation_model(new_AI_image_generation_model:String):
AI_image_generation_model = new_AI_image_generation_model
func set_AI_image_generation_quality(new_AI_image_generation_quality:String):
AI_image_generation_quality = new_AI_image_generation_quality
func set_AI_image_generation_size(new_AI_image_generation_size:String):
AI_image_generation_size = new_AI_image_generation_size
func set_AI_image_generation_style(new_AI_image_generation_style:String):
AI_image_generation_style = new_AI_image_generation_style
# AI Vision response variables
func set_AI_vision_response_model(new_AI_vision_response_model:String):
AI_vision_response_model = new_AI_vision_response_model
func set_AI_vision_response_detail_level(new_AI_vision_response_detail_level:String):
AI_vision_response_detail_level = new_AI_vision_response_detail_level
func set_AI_vision_response_prompt(new_AI_vision_response_prompt):
AI_vision_response_prompt = new_AI_vision_response_prompt
[gd_scene load_steps=2 format=3 uid="uid://bxb4dwa0c6xq4"]
[ext_resource type="Script" path="res://addons/godot-openai-simple/godot-openai-simple.gd" id="1_oognv"]
[node name="godot-openai-simple" type="Node"]
script = ExtResource("1_oognv")
@teddybear082
Copy link
Author

MIT License

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment