Skip to content

Instantly share code, notes, and snippets.

@joenandez
Created February 15, 2025 00:20
Show Gist options
  • Save joenandez/e1744e1e04b9d3a3e86422f4ef41fbc1 to your computer and use it in GitHub Desktop.
Save joenandez/e1744e1e04b9d3a3e86422f4ef41fbc1 to your computer and use it in GitHub Desktop.
Chat Route
import { NextResponse } from 'next/server'
import { streamText, createIdGenerator, appendResponseMessages, appendClientMessage, Message } from 'ai'
import { ToolCallRepairOptions } from '@/types/tools'
import { emitDocumentEvent } from '@/lib/events/documentEvents'
import { openai } from '@ai-sdk/openai'
import { getSupabaseServer } from '@/lib/supabase-server'
import { toolSet } from '@/lib/ai/toolDefs'
import { createOrUpdateNote } from '@/lib/notesClient'
import { saveChat } from '@/lib/threads/messageStore'
import { getPromptComponents } from '@/lib/ai/contextManager'
import { type AIMessage } from '@/types/thread'
import { loadWorkflowConfig } from '@/utils/workflowLoader'
// Create ID generators with recommended format
const generateClientId = createIdGenerator({ prefix: 'msgc', size: 16 })
const generateServerId = createIdGenerator({ prefix: 'msgs', size: 16 })
// Helper to extract doc content from message
function extractDocContent(content: string) {
const startMarker = '[DOC_CONTENT_START]'
const endMarker = '[DOC_CONTENT_END]'
const startIndex = content.indexOf(startMarker)
const endIndex = content.indexOf(endMarker)
if (startIndex === -1 || endIndex === -1) {
return null
}
return content.slice(
startIndex + startMarker.length,
endIndex
).trim()
}
// Helper to get output doc name from workflow state
function getOutputDoc(workflowState: any, workflowConfig: any) {
try {
const { currentStageIndex, currentSectionIndex } = workflowState
const stage = workflowConfig.workflow[currentStageIndex]
const section = stage?.sections[currentSectionIndex]
return section?.output
} catch (error) {
console.error('Failed to get output doc:', error)
return null
}
}
export async function POST(req: Request, context: { params: { threadId: string } }) {
try {
// Get authenticated user
const supabase = await getSupabaseServer()
const { data: { user }, error: authError } = await supabase.auth.getUser()
if (authError || !user) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
// Get threadId from route params
const { threadId } = await context.params
if (!threadId) {
return NextResponse.json({ error: 'Missing thread ID' }, { status: 400 })
}
// Parse and validate the request body
let body
try {
body = await req.json()
} catch (error) {
console.error('Failed to parse request body:', error)
return NextResponse.json(
{ error: 'Invalid request body' },
{ status: 400 }
)
}
// Verify messages array exists in body
if (!body || !Array.isArray(body.messages)) {
console.error('Missing messages array in request body:', body)
return NextResponse.json(
{ error: 'Missing messages array in request body' },
{ status: 400 }
)
}
// Get the latest user message
const userMessage = body.messages[body.messages.length - 1] as Message
console.log('userMessage', userMessage)
// Validate message format
if (!userMessage || !['user', 'assistant'].includes(userMessage.role) ||
typeof userMessage.content !== 'string') {
console.error('Invalid message format:', userMessage)
return NextResponse.json(
{ error: 'Invalid message format. Expected { role: "user"|"assistant", content: string }' },
{ status: 400 }
)
}
// Build the prompt & get truncated conversation using our context manager
const { systemPrompt: baseSystemPrompt, messages: contextMessages, truncated } = await getPromptComponents(threadId)
// Get workflow configuration if provided in the request
let workflowPrompt = ''
if (body.workflowState) {
try {
const { currentStageIndex, currentSectionIndex } = body.workflowState
const workflowConfig = loadWorkflowConfig()
if (currentStageIndex !== undefined && currentSectionIndex !== undefined) {
const stage = workflowConfig.workflow[currentStageIndex]
if (stage && stage.sections[currentSectionIndex]) {
const section = stage.sections[currentSectionIndex]
workflowPrompt = `
Current Stage: ${stage.stage}
Section: ${section.type}
User Responsibility: ${section.user_responsibility}
AI Task: ${section.ai_responsibility.description}
${section.ai_responsibility.prompt}
`
}
}
} catch (error) {
console.error('Failed to load workflow prompt:', error)
// Continue without workflow prompt rather than failing
}
}
console.log('workflowPrompt: ', workflowPrompt)
// Add workspaceId to system prompt for tool context
const workspaceContext = `workspaceId: ${threadId}`
// Combine base system prompt with workflow prompt and workspace context
const systemPrompt = [
baseSystemPrompt,
workflowPrompt,
workspaceContext
].filter(Boolean).join('\n\n')
// Convert AIMessage[] to Message[] and ensure IDs
const messages = (contextMessages as AIMessage[]).map(msg => ({
...msg,
id: msg.role === 'user' ? generateClientId() : generateServerId()
})) as Message[]
// Add the user message to the conversation
const messagesWithUser = appendClientMessage({
messages,
message: {
...userMessage,
id: generateClientId()
}
})
// Determine if tools should be required based on section type
const isToolRequired = body.workflowState && (() => {
try {
const { currentStageIndex, currentSectionIndex } = body.workflowState
const workflowConfig = loadWorkflowConfig()
const section = workflowConfig.workflow[currentStageIndex]?.sections[currentSectionIndex]
return section?.type === 'brainstorming' || section?.type === 'review'
} catch {
return false
}
})()
// Stream GPT response with tool support
const result = streamText({
model: openai('gpt-4o'),
system: systemPrompt,
messages: messagesWithUser,
tools: toolSet,
maxSteps: 5, // Allow multi-step tool calls
experimental_generateMessageId: generateServerId,
toolChoice: isToolRequired ? 'required' : 'auto', // Force tool usage in certain stages
experimental_repairToolCall: async (options) => {
// Cast to our known type structure
const toolCall = {
name: (options.toolCall as any).function?.name,
arguments: (options.toolCall as any).function?.arguments
}
try {
// Handle missing workspaceId
if (toolCall.name === 'createDocument' || toolCall.name === 'updateDocument') {
const parsedArgs = JSON.parse(toolCall.arguments)
if (!parsedArgs.workspaceId) {
return JSON.stringify({
...parsedArgs,
workspaceId: threadId
})
}
}
return toolCall.arguments
} catch (error) {
console.error('Error repairing tool call:', error)
return toolCall.arguments // Return original args if repair fails
}
},
onStepFinish: ({ text, toolCalls, toolResults }) => {
// Log each step for debugging
console.log('Step completed:', {
text: text?.slice(0, 100) + '...',
toolCalls: toolCalls?.length,
toolResults: toolResults?.length
})
},
onFinish: async ({ response, steps }) => {
try {
// Log the complete interaction
console.log('Chat interaction completed:', {
messageCount: response.messages.length,
stepCount: steps?.length || 0
})
// Handle final message content if no tool was called
if (!steps?.length && response.messages.length > 0) {
const lastMessage = response.messages[response.messages.length - 1]
if (lastMessage.role === 'assistant' && typeof lastMessage.content === 'string') {
const docContent = extractDocContent(lastMessage.content)
if (docContent) {
// Get output doc name from workflow state
const outputDoc = getOutputDoc(body.workflowState, loadWorkflowConfig())
if (outputDoc) {
console.log('Found doc content in message, updating:', outputDoc)
const note = await createOrUpdateNote(threadId, outputDoc, docContent)
// Emit document updated event
emitDocumentEvent({
documentId: note.id,
type: 'updated',
workspaceId: threadId
})
}
}
}
}
// Process all steps in sequence
if (steps?.length) {
for (const step of steps) {
const stepResult = step as any
// Handle document operations
if (stepResult.tool === 'createDocument' || stepResult.tool === 'updateDocument') {
const result = stepResult.output as { success: boolean, message: string }
if (result.success) {
// Extract doc name from success message
const match = result.message.match(/Document "([^"]+)" was (?:created|updated) successfully/)
if (match) {
// Emit appropriate document event
emitDocumentEvent({
documentId: match[1],
type: stepResult.tool === 'createDocument' ? 'created' : 'updated',
workspaceId: threadId
})
}
}
}
// Log step completion for monitoring
console.log('Step processed:', {
tool: stepResult.tool,
success: stepResult.output?.success,
timestamp: new Date().toISOString()
})
}
}
// Persist the entire conversation in DB with normalized messages
await saveChat({
id: threadId,
messages: appendResponseMessages({
messages: messagesWithUser,
responseMessages: response.messages,
}),
userId: user.id,
})
} catch (error) {
console.error('Failed to process chat response:', error)
}
},
})
// Create response with truncation header if needed
const streamResponse = result.toDataStreamResponse()
const headers = new Headers(streamResponse.headers)
// Set truncation header if needed
if (truncated) {
console.log('Setting truncation header to true')
headers.set('x-note-truncated', 'true')
// Also set CORS headers to ensure header is accessible
headers.set('Access-Control-Expose-Headers', 'x-note-truncated')
}
// Create and return the response with headers
const response = new Response(streamResponse.body, {
status: 200,
headers
})
// Log the headers for debugging
console.log('Response headers:', Object.fromEntries(headers.entries()))
return response
} catch (error) {
console.error('Chat error:', error)
if (error instanceof Error && error.message.includes('Could not load note content')) {
return NextResponse.json(
{ error: 'Could not load note content. Please refresh the page.' },
{ status: 500 }
)
}
return NextResponse.json(
{ error: 'Internal Server Error' },
{ status: 500 }
)
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment