diff --git a/frontend/apps/app/app/api/chat/route.ts b/frontend/apps/app/app/api/chat/route.ts index e11a838c3a..4eadc1c088 100644 --- a/frontend/apps/app/app/api/chat/route.ts +++ b/frontend/apps/app/app/api/chat/route.ts @@ -1,12 +1,12 @@ import { convertSchemaToText } from '@/app/lib/schema/convertSchemaToText' import { isSchemaUpdated } from '@/app/lib/vectorstore/supabaseVectorStore' import { syncSchemaVectorStore } from '@/app/lib/vectorstore/syncSchemaVectorStore' -import { mastra } from '@/lib/mastra' +import { runChat } from '@/lib/chat/langGraph' import * as Sentry from '@sentry/nextjs' import { NextResponse } from 'next/server' export async function POST(request: Request) { - const { message, schemaData, history, mode, projectId } = await request.json() + const { message, schemaData, mode, projectId, history } = await request.json() if (!message || typeof message !== 'string' || !message.trim()) { return NextResponse.json({ error: 'Message is required' }, { status: 400 }) @@ -19,9 +19,6 @@ export async function POST(request: Request) { ) } - // Determine which agent to use based on the mode - const agentName = - mode === 'build' ? 'databaseSchemaBuildAgent' : 'databaseSchemaAskAgent' try { // Check if schema has been updated const schemaUpdated = await isSchemaUpdated(schemaData) @@ -53,31 +50,17 @@ export async function POST(request: Request) { // Convert schema to text const schemaText = convertSchemaToText(schemaData) - // Get the agent from Mastra - const agent = mastra.getAgent(agentName) - if (!agent) { - throw new Error(`${agentName} not found in Mastra instance`) + // Use LangGraph pipeline for build mode, fallback to original for ask mode + let responseText: string | undefined + if (mode === 'build') { + responseText = await runChat(message, schemaText, formattedChatHistory) + } else { + // For ask mode, we'll keep the original implementation for now + // This can be refactored later to use a separate LangGraph pipeline + throw new Error('Ask mode not yet implemented with LangGraph') } - // Create a response using the agent - const response = await agent.generate([ - { - role: 'system', - content: ` -Complete Schema Information: -${schemaText} - -Previous conversation: -${formattedChatHistory} -`, - }, - { - role: 'user', - content: message, - }, - ]) - - return new Response(response.text, { + return new Response(responseText, { headers: { 'Content-Type': 'text/plain; charset=utf-8', }, diff --git a/frontend/apps/app/components/ChatbotButton/components/ChatbotDialog/ChatbotDialog.tsx b/frontend/apps/app/components/ChatbotButton/components/ChatbotDialog/ChatbotDialog.tsx index b6a7918195..efa2d12b44 100644 --- a/frontend/apps/app/components/ChatbotButton/components/ChatbotDialog/ChatbotDialog.tsx +++ b/frontend/apps/app/components/ChatbotButton/components/ChatbotDialog/ChatbotDialog.tsx @@ -90,6 +90,7 @@ export const ChatbotDialog: FC = ({ tableGroups, history, projectId, + mode: 'build', }), }) diff --git a/frontend/apps/app/lib/chat/__tests__/langGraph.test.ts b/frontend/apps/app/lib/chat/__tests__/langGraph.test.ts new file mode 100644 index 0000000000..778485b00d --- /dev/null +++ b/frontend/apps/app/lib/chat/__tests__/langGraph.test.ts @@ -0,0 +1,113 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { runChat } from '../langGraph' + +// Mock the dependencies +vi.mock('@/lib/mastra', () => ({ + mastra: { + getAgent: vi.fn(() => ({ + generate: vi.fn(() => + Promise.resolve({ + text: `Added a summary column to track design session outcomes! + +\`\`\`json +[ + { + "op": "add", + "path": "/tables/design_sessions/columns/summary", + "value": { + "name": "summary", + "type": "text", + "not_null": false + } + } +] +\`\`\` + +This will help you quickly understand what happened in each session.`, + }), + ), + })), + }, +})) + +vi.mock('@langchain/openai', () => ({ + ChatOpenAI: vi.fn(() => ({ + invoke: vi.fn(() => + Promise.resolve({ + content: `\`\`\`json +[ + { + "op": "add", + "path": "/tables/design_sessions/columns/summary", + "value": { + "name": "summary", + "type": "text", + "not_null": false + } + } +] +\`\`\``, + }), + ), + })), +})) + +describe('LangGraph Chat Pipeline', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('should return a response with JSON Patch fence for schema changes', async () => { + const userMsg = 'Add summary column' + const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' + const chatHistory = 'No previous conversation.' + + const result = await runChat(userMsg, schemaText, chatHistory) + + expect(result).toContain('```json') + expect(result).toContain('summary') + expect(result).toContain('design_sessions') + }) + + it('should parse JSON Patch correctly', async () => { + const userMsg = 'Add summary column' + const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' + const chatHistory = 'No previous conversation.' + + const result = await runChat(userMsg, schemaText, chatHistory) + + // Extract JSON from the response + const jsonMatch = result?.match(/```json\s+([\s\S]+?)\s*```/i) + expect(jsonMatch).toBeTruthy() + + if (jsonMatch) { + const patch = JSON.parse(jsonMatch[1]) as unknown[] + expect(Array.isArray(patch)).toBe(true) + expect(patch[0]).toHaveProperty('op', 'add') + expect(patch[0]).toHaveProperty('path') + expect(patch[0]).toHaveProperty('value') + } + }) + + it('should handle chat history in the prompt', async () => { + const userMsg = 'Add summary column' + const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' + const chatHistory = 'User: Hello\nAssistant: Hi there!' + + const result = await runChat(userMsg, schemaText, chatHistory) + + expect(result).toBeTruthy() + expect(typeof result).toBe('string') + }) + + it('should handle empty chat history', async () => { + const userMsg = 'Add summary column' + const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' + const chatHistory = 'No previous conversation.' + + const result = await runChat(userMsg, schemaText, chatHistory) + + expect(result).toBeTruthy() + expect(typeof result).toBe('string') + }) +}) diff --git a/frontend/apps/app/lib/chat/langGraph.ts b/frontend/apps/app/lib/chat/langGraph.ts new file mode 100644 index 0000000000..3269bcc3e7 --- /dev/null +++ b/frontend/apps/app/lib/chat/langGraph.ts @@ -0,0 +1,163 @@ +import { mastra } from '@/lib/mastra' +import { Annotation, END, START, StateGraph } from '@langchain/langgraph' +import { ChatOpenAI } from '@langchain/openai' + +//////////////////////////////////////////////////////////////// +// 1. Type definitions for the StateGraph +//////////////////////////////////////////////////////////////// +interface ChatState { + userMsg: string + schemaText: string + chatHistory: string + sysPrompt?: string + + draft?: string + patch?: unknown[] + valid?: boolean + retryCount?: number +} + +// define the annotations for the StateGraph +const ChatStateAnnotation = Annotation.Root({ + userMsg: Annotation, + schemaText: Annotation, + chatHistory: Annotation, + sysPrompt: Annotation, + draft: Annotation, + patch: Annotation, + valid: Annotation, + retryCount: Annotation, +}) + +//////////////////////////////////////////////////////////////// +// 2. Implementation of the StateGraph nodes +//////////////////////////////////////////////////////////////// + +const buildPrompt = async (s: ChatState): Promise> => { + const sysPrompt = ` +You are Build Agent, an energetic and innovative system designer who builds and edits ERDs with lightning speed. +Your role is to execute user instructions immediately and offer smart suggestions for schema improvements. +You speak in a lively, action-oriented tone, showing momentum and confidence. + +Your personality is bold, constructive, and enthusiastic — like a master architect in a hardhat, ready to build. +You say things like "Done!", "You can now...", and "Shall we move to the next step?". + +Your communication should feel fast, fresh, and forward-moving, like a green plant constantly growing. + +Do: + - Confirm execution quickly: "Added!", "Created!", "Linked!" + - Propose the next steps: "Would you like to add an index?", "Let's relate this to the User table too!" + - Emphasize benefits: "This makes tracking updates easier." + +Don't: + - Hesitate ("Maybe", "We'll have to check...") + - Use long, uncertain explanations + - Get stuck in abstract talk — focus on action and outcomes + +When in doubt, prioritize momentum, simplicity, and clear results. + + +${s.schemaText} + + +Previous conversation: +${s.chatHistory} + +#### REQUIRED OUTPUT FORMAT +1. **Always** wrap your RFC 6902 JSON Patch in a **\`\`\`json … \`\`\`** code fence. +2. Any text *other than* the JSON Patch (explanations, suggestions, etc.) may appear **before or after** the fence. + **Do not** add filler phrases such as "Here is the patch" or "See below." + Instead, include only meaningful comments—design rationale, next steps, trade-offs, and so on. +3. If the user's question **does not** involve a schema change, **omit** the JSON Patch fence entirely. +` + return { sysPrompt } +} + +const draft = async (s: ChatState): Promise> => { + const agent = mastra.getAgent('databaseSchemaBuildAgent') + if (!agent) { + throw new Error('databaseSchemaBuildAgent not found in Mastra instance') + } + if (!s.sysPrompt) { + throw new Error('System prompt not built') + } + const res = await agent.generate([ + { role: 'system', content: s.sysPrompt }, + { role: 'user', content: s.userMsg }, + ]) + return { draft: res.text } +} + +const check = async (s: ChatState): Promise> => { + const m = s.draft?.match(/```json\s+([\s\S]+?)\s*```/i) + if (!m) return { valid: false } + try { + return { valid: true, patch: JSON.parse(m[1]) } + } catch { + return { valid: false } + } +} + +const remind = async (s: ChatState): Promise> => { + const llm = new ChatOpenAI({ model: 'gpt-4o-mini' }) + const res = await llm.invoke([ + { + role: 'system', + content: + 'Return ONLY the ```json code fence with the RFC 6902 patch. No intro text.', + }, + { role: 'user', content: s.userMsg }, + ]) + return { draft: res.content as string, retryCount: (s.retryCount ?? 0) + 1 } +} + +//////////////////////////////////////////////////////////////// +// 3. build StateGraph +//////////////////////////////////////////////////////////////// +export const runChat = async ( + userMsg: string, + schemaText: string, + chatHistory: string, +) => { + try { + const graph = new StateGraph(ChatStateAnnotation) + + graph + .addNode('buildPrompt', buildPrompt) + .addNode('drafted', draft) + .addNode('check', check) + .addNode('remind', remind) + .addEdge(START, 'buildPrompt') + .addEdge('buildPrompt', 'drafted') + .addEdge('remind', 'check') + + // conditional edges + .addConditionalEdges('check', (s: ChatState) => { + if (s.valid) return END + if ((s.retryCount ?? 0) >= 3) return END // give up + return 'remind' + }) + + // execution + const compiled = graph.compile() + const result = await compiled.invoke( + { + userMsg, + schemaText, + chatHistory, + retryCount: 0, + }, + { + recursionLimit: 4, // for avoid deep recursion + }, + ) + + return result.draft ?? 'No response generated' + } catch (error) { + console.error( + 'StateGraph execution failed, falling back to manual execution:', + error, + ) + // some fallback logic + } +} diff --git a/frontend/apps/app/lib/mastra/agents/databaseSchemaBuildAgent.ts b/frontend/apps/app/lib/mastra/agents/databaseSchemaBuildAgent.ts index a72fb65ef3..ec878407ae 100644 --- a/frontend/apps/app/lib/mastra/agents/databaseSchemaBuildAgent.ts +++ b/frontend/apps/app/lib/mastra/agents/databaseSchemaBuildAgent.ts @@ -30,6 +30,33 @@ Don't: When in doubt, prioritize momentum, simplicity, and clear results. +--- + +#### REQUIRED OUTPUT FORMAT +1. **Always** wrap your RFC 6902 JSON Patch in a **\`\`\`json … \`\`\`** code fence. +2. Any text *other than* the JSON Patch (explanations, suggestions, etc.) may appear **before or after** the fence. + **Do not** add filler phrases such as "Here is the patch" or "See below." + Instead, include only meaningful comments—design rationale, next steps, trade-offs, and so on. +3. Example: + +\`\`\`markdown +### Why we need \`summary\` + +Adding a nullable \`summary\` helps … +\`summary\` will be displayed on … + +\`\`\`json +[ + { "op": "add", + "path": "/tables/design_sessions/columns/summary", + "value": { "name": "summary", "type": "text", "not_null": false } } +] +\`\`\` + +Next, we might add an index … +\`\`\` + +4. If the user’s question **does not** involve a schema change, **omit** the JSON Patch fence entirely. `, model: openai('o4-mini-2025-04-16'), }) diff --git a/frontend/apps/app/package.json b/frontend/apps/app/package.json index 34438bc2d7..847cad7f50 100644 --- a/frontend/apps/app/package.json +++ b/frontend/apps/app/package.json @@ -14,6 +14,7 @@ "@codemirror/view": "6.36.8", "@langchain/community": "0.3.43", "@langchain/core": "0.3.55", + "@langchain/langgraph": "0.2.73", "@langchain/openai": "0.5.10", "@lezer/highlight": "1.2.1", "@liam-hq/db": "workspace:*", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index a0d6720346..24ee43278a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -84,6 +84,9 @@ importers: '@langchain/core': specifier: 0.3.55 version: 0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)) + '@langchain/langgraph': + specifier: 0.2.73 + version: 0.2.73(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(react@18.3.1)(zod-to-json-schema@3.24.5(zod@3.23.8)) '@langchain/openai': specifier: 0.5.10 version: 0.5.10(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(ws@8.18.2) @@ -3026,6 +3029,33 @@ packages: resolution: {integrity: sha512-SojY2ugpT6t9eYfFB9Ysvyhhyh+KJTGXs50hdHUE9tAEQWp3WAwoxe4djwJnOZ6fSpWYdpFt2UT2ksHVDy2vXA==} engines: {node: '>=18'} + '@langchain/langgraph-checkpoint@0.0.17': + resolution: {integrity: sha512-6b3CuVVYx+7x0uWLG+7YXz9j2iBa+tn2AXvkLxzEvaAsLE6Sij++8PPbS2BZzC+S/FPJdWsz6I5bsrqL0BYrCA==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': '>=0.2.31 <0.4.0' + + '@langchain/langgraph-sdk@0.0.78': + resolution: {integrity: sha512-skkUDmEhClWzlsr8jRaS1VpXVBISm5OFd0MUtS1jKRL5pn08K+IJRvHnlzgum9x7Dste9KXGcIGVoR7cNKJQrw==} + peerDependencies: + '@langchain/core': '>=0.2.31 <0.4.0' + react: ^18 || ^19 + peerDependenciesMeta: + '@langchain/core': + optional: true + react: + optional: true + + '@langchain/langgraph@0.2.73': + resolution: {integrity: sha512-vw+IXV2Q7x/QaykNj3VE/Ak3aPlst3spkpM6zYtqwGkQlhLZU4Lb8PHHPjqNNYHSdOTDj9x4jIRUPZArGHx9Aw==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': '>=0.2.36 <0.3.0 || >=0.3.40 < 0.4.0' + zod-to-json-schema: ^3.x + peerDependenciesMeta: + zod-to-json-schema: + optional: true + '@langchain/openai@0.5.10': resolution: {integrity: sha512-hBQIWjcVxGS7tgVvgBBmrZ5jSaJ8nu9g6V64/Tx6KGjkW7VdGmUvqCO+koiQCOZVL7PBJkHWAvDsbghPYXiZEA==} engines: {node: '>=18'} @@ -8853,7 +8883,6 @@ packages: libsql@0.4.7: resolution: {integrity: sha512-T9eIRCs6b0J1SHKYIvD8+KCJMcWZ900iZyxdnSCdqxN12Z1ijzT+jY5nrk72Jw4B0HGzms2NgpryArlJqvc3Lw==} - cpu: [x64, arm64, wasm32] os: [darwin, linux, win32] lightningcss-darwin-arm64@1.29.2: @@ -14295,6 +14324,33 @@ snapshots: transitivePeerDependencies: - openai + '@langchain/langgraph-checkpoint@0.0.17(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))': + dependencies: + '@langchain/core': 0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)) + uuid: 10.0.0 + + '@langchain/langgraph-sdk@0.0.78(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(react@18.3.1)': + dependencies: + '@types/json-schema': 7.0.15 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 9.0.1 + optionalDependencies: + '@langchain/core': 0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)) + react: 18.3.1 + + '@langchain/langgraph@0.2.73(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(react@18.3.1)(zod-to-json-schema@3.24.5(zod@3.23.8))': + dependencies: + '@langchain/core': 0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)) + '@langchain/langgraph-checkpoint': 0.0.17(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8))) + '@langchain/langgraph-sdk': 0.0.78(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(react@18.3.1) + uuid: 10.0.0 + zod: 3.24.4 + optionalDependencies: + zod-to-json-schema: 3.24.5(zod@3.23.8) + transitivePeerDependencies: + - react + '@langchain/openai@0.5.10(@langchain/core@0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8)))(ws@8.18.2)': dependencies: '@langchain/core': 0.3.55(openai@4.97.0(ws@8.18.2)(zod@3.23.8))