Skip to content

Instantly share code, notes, and snippets.

@johnlindquist
Created November 22, 2024 02:16
Show Gist options
  • Save johnlindquist/fd5fe47b379626101faaabee70da5452 to your computer and use it in GitHub Desktop.
Save johnlindquist/fd5fe47b379626101faaabee70da5452 to your computer and use it in GitHub Desktop.
/*
# Chat with ChatGPT
## <span class="text-primary">👉 Note: LangChain is still in development. This script will keep updating to use the latest APIs</span>
Use `Kit` -> `Manage npm Packages` -> `Update a Package` -> `langchain` to update to install the latest version.
- Opens the `chat` component
- Type a message and press `enter` to send
- The message is sent to the OpenAI API
- The response from OpenAI is displayed in the chat
- Repeat!
*/
// Name: ChatGPT
// Description: Have a Conversation with an AI
// Author: John Lindquist
// Twitter: @johnlindquist
import "@johnlindquist/kit";
import { ChatOpenAI } from "@langchain/openai";
import {
ChatPromptTemplate,
MessagesPlaceholder,
} from "@langchain/core/prompts";
import {
START,
END,
MessagesAnnotation,
StateGraph,
MemorySaver,
} from "@langchain/langgraph";
import { trimMessages } from "@langchain/core/messages";
// Initialize the chat model
let id: NodeJS.Timeout | number = -1; // Updated type to handle setTimeout
let currentMessage: string = ``;
const chatsDir = kenvPath("chats");
await ensureDir(chatsDir);
const chatFile = kenvPath(
`chats`,
`${formatDate(new Date(), "yyyy-MM-dd-T-HH-mm-ss")}.md`
);
const llm = new ChatOpenAI({
modelName: "gpt-4o-mini",
temperature: 0.7,
streaming: true,
callbacks: [
{
handleLLMStart: async () => {
id = setTimeout(() => {
chat.setMessage(
-1,
md(`### Sorry, the AI is taking a long time to respond.`)
);
setLoading(true);
}, 3000);
currentMessage = ``;
chat.addMessage("");
},
handleLLMNewToken: async (token: string) => {
clearTimeout(id);
setLoading(false);
if (!token) return;
currentMessage += token;
let htmlMessage = md(currentMessage);
chat.setMessage(-1, htmlMessage);
appendFile(chatFile, token);
},
handleLLMError: async (err: Error) => {
clearTimeout(id);
warn(`error`, JSON.stringify(err));
running = false;
},
handleLLMEnd: async () => {
clearTimeout(id);
running = false;
},
},
],
});
// Create a chat prompt template with system message
const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful and friendly AI assistant. Answer all questions to the best of your ability while being concise.",
],
new MessagesPlaceholder("messages"),
]);
// Create a message trimmer to manage conversation history
const trimmer = trimMessages({
maxTokens: 4000, // Adjust based on your needs
strategy: "last",
tokenCounter: (msgs) => msgs.length * 4, // Rough estimate
includeSystem: true,
allowPartial: false,
startOn: "human",
});
// Define the function that calls the model
const callModel = async (state: typeof MessagesAnnotation.State) => {
const trimmedMessages = await trimmer.invoke(state.messages);
const chain = prompt.pipe(llm);
const response = await chain.invoke(
{ messages: trimmedMessages },
{ signal: controller?.signal }
);
return { messages: [response] };
};
// Define the graph
const workflow = new StateGraph(MessagesAnnotation)
.addNode("model", callModel)
.addEdge(START, "model")
.addEdge("model", END);
// Add memory and compile the app
const memory = new MemorySaver();
const app = workflow.compile({ checkpointer: memory });
// Create a unique thread ID for this chat session
const config = { configurable: { thread_id: uuid() } };
// Main chat loop
let controller: AbortController | null = null;
let running = false;
await chat({
placeholder: "Chat with AI",
shortcuts: [
{
name: "Open chat log",
key: `${cmd}+l`,
onPress: async () => {
await edit(chatFile);
},
bar: "right",
},
{
name: "Close",
key: `${cmd}+w`,
onPress: () => {
process.exit();
},
bar: "right",
},
],
onEscape: async () => {
if (running) {
controller?.abort();
running = false;
clearTimeout(id);
}
},
onSubmit: async (input = "") => {
if (!input) return;
running = true;
controller = new AbortController();
try {
// Create input message
const messages = [
{
role: "user",
content: input,
},
];
appendFile(chatFile, `\n\n${input}\n\n`);
await app.invoke({ messages }, config);
} catch (error) {
console.log(`Error: ${error.message}`);
} finally {
running = false;
}
},
});
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment