Last active
November 26, 2023 02:22
-
-
Save ChenZhongPu/e826346e42494b6840a7f9361b6060a3 to your computer and use it in GitHub Desktop.
gpt stream
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import "./App.css"; | |
import React, { useState, useEffect } from "react"; | |
import Markdown from "react-markdown"; | |
import remarkGfm from "remark-gfm"; | |
function App() { | |
const [typingText, setTypingText] = useState(""); | |
useEffect(() => { | |
const apiEndpoint = | |
"http://127.0.0.1:8000"; // Replace with your actual streaming API endpoint | |
const fetchData = async () => { | |
try { | |
const response = await fetch(apiEndpoint); | |
if (!response.ok) { | |
throw new Error("Network response was not ok"); | |
} | |
const reader = response.body.getReader(); | |
let result = await reader.read(); | |
let text = ""; | |
const updateText = () => { | |
setTypingText((prevText) => prevText + text); | |
}; | |
const processChunk = () => { | |
text = new TextDecoder().decode(result.value); | |
updateText(); | |
if (!result.done) { | |
reader.read().then((nextResult) => { | |
result = nextResult; | |
processChunk(); | |
}); | |
} | |
}; | |
processChunk(); | |
} catch (error) { | |
console.error( | |
"There was a problem fetching the streaming text:", | |
error, | |
); | |
} | |
}; | |
fetchData(); | |
}, []); | |
return ( | |
<div> | |
<Markdown remarkPlugins={[remarkGfm]}>{typingText}</Markdown> | |
</div> | |
); | |
} | |
export default App; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
client = OpenAI(api_key="xxx", | |
base_url="xxx") | |
def stream_gpt(): | |
response = client.chat.completions.create( | |
model="gpt-3.5-turbo", | |
messages=[ | |
{"role": "system", "content": "You are a helpful assistant."}, | |
{"role": "user", "content": "Tell me a jork. At least 200 words in Markdown format using head, bold, italic, list and link."}, | |
], | |
stream=True, | |
) | |
for chunk in response: | |
content = chunk.choices[0].delta.content | |
if content is not None: | |
yield content | |
@app.get("/") | |
async def web(): | |
from fastapi.responses import StreamingResponse | |
return StreamingResponse(stream_gpt(), media_type="text/html") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment