Last active
February 29, 2024 10:26
-
-
Save Lissy93/54b3d34ce376e63efb36cbf61620298b to your computer and use it in GitHub Desktop.
A REST API for fetching the repsponse from a custom GPT assistant via the OpenAI API. Runs as a Cloudflare Worker.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* A REST API for fetching the repsponse from | |
* a custom GPT assistant via the OpenAI API | |
* Deployed as a simple Cloudflare Worker | |
* Licensed under MIT (C) Alicia Sykes 2024 | |
*/ | |
(() => { | |
addEventListener("fetch", (event) => { | |
event.respondWith(handleRequest(event.request)); | |
}); | |
async function fetchOpenAI(url, options) { | |
const response = await fetch(url, options); | |
if (!response.ok) { | |
throw new Error(`OpenAI API request failed: ${response.statusText}`); | |
} | |
return response.json(); | |
} | |
async function handleRequest(request) { | |
const allowedOrigins = JSON.parse(ALLOWED_ORIGINS || "[]") | |
const origin = request.headers.get("Origin"); | |
let responseHeaders = { | |
"Content-Type": "application/json", | |
"Access-Control-Allow-Origin": "null", | |
"Access-Control-Allow-Methods": "GET, POST, OPTIONS", | |
"Access-Control-Allow-Headers": "Content-Type, Authorization" | |
}; | |
if (allowedOrigins.length <1 || allowedOrigins.includes(origin)) { | |
responseHeaders["Access-Control-Allow-Origin"] = origin; | |
} | |
if (!OPENAI_API_KEY) { | |
throw new Error("OPENAI_API_KEY is required"); | |
} | |
if (!ASSISTANT_ID) { | |
throw new Error("ASSISTANT_ID is required"); | |
} | |
const assistantId = ASSISTANT_ID; | |
const headers = { | |
"Authorization": `Bearer ${OPENAI_API_KEY}`, | |
"Content-Type": "application/json", | |
"OpenAI-Beta": "assistants=v1" | |
}; | |
const thread = await fetchOpenAI(`https://api.openai.com/v1/threads`, { | |
method: "POST", | |
headers | |
}); | |
const url = new URL(request.url); | |
const pathSegments = url.pathname.split("/"); | |
const userPrompt = pathSegments[pathSegments.length - 1]; | |
const isValidInput = typeof userPrompt === "string" && userPrompt.length > 0 && userPrompt.length <= 200; | |
if (!isValidInput) { | |
return new Response("[]", { status: 400 }); | |
} | |
await fetchOpenAI(`https://api.openai.com/v1/threads/${thread.id}/messages`, { | |
method: "POST", | |
headers, | |
body: JSON.stringify({ role: "user", content: userPrompt }) | |
}); | |
const run = await fetchOpenAI(`https://api.openai.com/v1/threads/${thread.id}/runs`, { | |
method: "POST", | |
headers, | |
body: JSON.stringify({ assistant_id: assistantId }) | |
}); | |
let runStatus = run.status; | |
while (runStatus !== "completed" && runStatus !== "failed") { | |
await new Promise((resolve) => setTimeout(resolve, 10)); | |
console.log("wating"); | |
const updatedRun = await fetchOpenAI(`https://api.openai.com/v1/threads/${thread.id}/runs/${run.id}`, { | |
method: "GET", | |
headers | |
}); | |
runStatus = updatedRun.status; | |
} | |
const messages = await fetchOpenAI(`https://api.openai.com/v1/threads/${thread.id}/messages`, { | |
method: "GET", | |
headers | |
}); | |
const assistantMessages = messages.data | |
.filter((message) => message.role === "assistant") | |
.map((message) => message.content); | |
const theEnd = assistantMessages?.[0]?.[0]?.text?.value ?? "[]"; | |
return new Response(JSON.stringify(theEnd), { | |
headers: { "Content-Type": "application/json", ...responseHeaders } | |
}); | |
} | |
})(); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
About
You can use a custom GPT programmatically by creating an Assistant through the OpenAI dashboard (platform.openai.com/assistants).
Passing a message, and getting a response from such an assistant requires several steps, and as such it makes sense to create a little micro-service to handle these interactions. This is especially true if you plan on using this through a frontend application, as it wouldn't be secure to directly make requests to OpenAI, as your API key would be out in the clear.
The above script is just an example, to get you started. You'll likely need to modify it to fit your requirements.
Deployment
npm i -g wrangler
wrangler init sponsors-api
cd sponsors-api
andnpm install
wrangler.toml
add your API key to theGITHUB_TOKEN
env varwrangler dev
wrangler deploy
to deploy your app to the world!Alternatively, just log in to Cloudflare, GO to "Workers" then "New" / "Quick Edit".
And then paste the above script in, and then hit "Save & Deploy". Done :)
The following environmental variables will be required:
OPENAI_API_KEY
- Your OpenAI API keyASSISTANT_ID
- The ID of the assistant you've createdYou can now make requests to
https://[project-name].workers.dev/[users-question]
🚀License
Licensed under MIT, © Alicia Sykes 2024