-
Notifications
You must be signed in to change notification settings - Fork 188
poc(chat): improve prompt to guide LLM toward JSON Patch format #1769
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,113 @@ | ||
| import { beforeEach, describe, expect, it, vi } from 'vitest' | ||
| import { runChat } from '../langGraph' | ||
|
|
||
| // Mock the dependencies | ||
| vi.mock('@/lib/mastra', () => ({ | ||
| mastra: { | ||
| getAgent: vi.fn(() => ({ | ||
| generate: vi.fn(() => | ||
| Promise.resolve({ | ||
| text: `Added a summary column to track design session outcomes! | ||
|
|
||
| \`\`\`json | ||
| [ | ||
| { | ||
| "op": "add", | ||
| "path": "/tables/design_sessions/columns/summary", | ||
| "value": { | ||
| "name": "summary", | ||
| "type": "text", | ||
| "not_null": false | ||
| } | ||
| } | ||
| ] | ||
| \`\`\` | ||
|
|
||
| This will help you quickly understand what happened in each session.`, | ||
| }), | ||
| ), | ||
| })), | ||
| }, | ||
| })) | ||
|
|
||
| vi.mock('@langchain/openai', () => ({ | ||
| ChatOpenAI: vi.fn(() => ({ | ||
| invoke: vi.fn(() => | ||
| Promise.resolve({ | ||
| content: `\`\`\`json | ||
| [ | ||
| { | ||
| "op": "add", | ||
| "path": "/tables/design_sessions/columns/summary", | ||
| "value": { | ||
| "name": "summary", | ||
| "type": "text", | ||
| "not_null": false | ||
| } | ||
| } | ||
| ] | ||
| \`\`\``, | ||
| }), | ||
| ), | ||
| })), | ||
| })) | ||
|
|
||
| describe('LangGraph Chat Pipeline', () => { | ||
| beforeEach(() => { | ||
| vi.clearAllMocks() | ||
| }) | ||
|
|
||
| it('should return a response with JSON Patch fence for schema changes', async () => { | ||
| const userMsg = 'Add summary column' | ||
| const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' | ||
| const chatHistory = 'No previous conversation.' | ||
|
|
||
| const result = await runChat(userMsg, schemaText, chatHistory) | ||
|
|
||
| expect(result).toContain('```json') | ||
| expect(result).toContain('summary') | ||
| expect(result).toContain('design_sessions') | ||
| }) | ||
|
|
||
| it('should parse JSON Patch correctly', async () => { | ||
| const userMsg = 'Add summary column' | ||
| const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' | ||
| const chatHistory = 'No previous conversation.' | ||
|
|
||
| const result = await runChat(userMsg, schemaText, chatHistory) | ||
|
|
||
| // Extract JSON from the response | ||
| const jsonMatch = result?.match(/```json\s+([\s\S]+?)\s*```/i) | ||
| expect(jsonMatch).toBeTruthy() | ||
|
|
||
| if (jsonMatch) { | ||
| const patch = JSON.parse(jsonMatch[1]) as unknown[] | ||
| expect(Array.isArray(patch)).toBe(true) | ||
| expect(patch[0]).toHaveProperty('op', 'add') | ||
| expect(patch[0]).toHaveProperty('path') | ||
| expect(patch[0]).toHaveProperty('value') | ||
| } | ||
| }) | ||
|
|
||
| it('should handle chat history in the prompt', async () => { | ||
| const userMsg = 'Add summary column' | ||
| const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' | ||
| const chatHistory = 'User: Hello\nAssistant: Hi there!' | ||
|
|
||
| const result = await runChat(userMsg, schemaText, chatHistory) | ||
|
|
||
| expect(result).toBeTruthy() | ||
| expect(typeof result).toBe('string') | ||
| }) | ||
|
|
||
| it('should handle empty chat history', async () => { | ||
| const userMsg = 'Add summary column' | ||
| const schemaText = 'CREATE TABLE design_sessions (id SERIAL PRIMARY KEY);' | ||
| const chatHistory = 'No previous conversation.' | ||
|
|
||
| const result = await runChat(userMsg, schemaText, chatHistory) | ||
|
|
||
| expect(result).toBeTruthy() | ||
| expect(typeof result).toBe('string') | ||
| }) | ||
| }) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,163 @@ | ||
| import { mastra } from '@/lib/mastra' | ||
| import { Annotation, END, START, StateGraph } from '@langchain/langgraph' | ||
| import { ChatOpenAI } from '@langchain/openai' | ||
|
|
||
| //////////////////////////////////////////////////////////////// | ||
| // 1. Type definitions for the StateGraph | ||
| //////////////////////////////////////////////////////////////// | ||
| interface ChatState { | ||
| userMsg: string | ||
| schemaText: string | ||
| chatHistory: string | ||
| sysPrompt?: string | ||
|
|
||
| draft?: string | ||
| patch?: unknown[] | ||
| valid?: boolean | ||
| retryCount?: number | ||
| } | ||
|
|
||
| // define the annotations for the StateGraph | ||
| const ChatStateAnnotation = Annotation.Root({ | ||
| userMsg: Annotation<string>, | ||
| schemaText: Annotation<string>, | ||
| chatHistory: Annotation<string>, | ||
| sysPrompt: Annotation<string>, | ||
| draft: Annotation<string>, | ||
| patch: Annotation<unknown[]>, | ||
| valid: Annotation<boolean>, | ||
| retryCount: Annotation<number>, | ||
| }) | ||
|
|
||
| //////////////////////////////////////////////////////////////// | ||
| // 2. Implementation of the StateGraph nodes | ||
| //////////////////////////////////////////////////////////////// | ||
|
|
||
| const buildPrompt = async (s: ChatState): Promise<Partial<ChatState>> => { | ||
| const sysPrompt = ` | ||
| You are Build Agent, an energetic and innovative system designer who builds and edits ERDs with lightning speed. | ||
| Your role is to execute user instructions immediately and offer smart suggestions for schema improvements. | ||
| You speak in a lively, action-oriented tone, showing momentum and confidence. | ||
|
|
||
| Your personality is bold, constructive, and enthusiastic — like a master architect in a hardhat, ready to build. | ||
| You say things like "Done!", "You can now...", and "Shall we move to the next step?". | ||
|
|
||
| Your communication should feel fast, fresh, and forward-moving, like a green plant constantly growing. | ||
|
|
||
| Do: | ||
| - Confirm execution quickly: "Added!", "Created!", "Linked!" | ||
| - Propose the next steps: "Would you like to add an index?", "Let's relate this to the User table too!" | ||
| - Emphasize benefits: "This makes tracking updates easier." | ||
|
|
||
| Don't: | ||
| - Hesitate ("Maybe", "We'll have to check...") | ||
| - Use long, uncertain explanations | ||
| - Get stuck in abstract talk — focus on action and outcomes | ||
|
|
||
| When in doubt, prioritize momentum, simplicity, and clear results. | ||
|
|
||
| <SCHEMA> | ||
| ${s.schemaText} | ||
| </SCHEMA> | ||
|
|
||
| Previous conversation: | ||
| ${s.chatHistory} | ||
|
|
||
| #### REQUIRED OUTPUT FORMAT | ||
| 1. **Always** wrap your RFC 6902 JSON Patch in a **\`\`\`json … \`\`\`** code fence. | ||
| 2. Any text *other than* the JSON Patch (explanations, suggestions, etc.) may appear **before or after** the fence. | ||
| **Do not** add filler phrases such as "Here is the patch" or "See below." | ||
| Instead, include only meaningful comments—design rationale, next steps, trade-offs, and so on. | ||
| 3. If the user's question **does not** involve a schema change, **omit** the JSON Patch fence entirely. | ||
| ` | ||
| return { sysPrompt } | ||
| } | ||
|
|
||
| const draft = async (s: ChatState): Promise<Partial<ChatState>> => { | ||
| const agent = mastra.getAgent('databaseSchemaBuildAgent') | ||
| if (!agent) { | ||
| throw new Error('databaseSchemaBuildAgent not found in Mastra instance') | ||
| } | ||
| if (!s.sysPrompt) { | ||
| throw new Error('System prompt not built') | ||
| } | ||
| const res = await agent.generate([ | ||
| { role: 'system', content: s.sysPrompt }, | ||
| { role: 'user', content: s.userMsg }, | ||
| ]) | ||
| return { draft: res.text } | ||
| } | ||
|
|
||
| const check = async (s: ChatState): Promise<Partial<ChatState>> => { | ||
| const m = s.draft?.match(/```json\s+([\s\S]+?)\s*```/i) | ||
| if (!m) return { valid: false } | ||
| try { | ||
| return { valid: true, patch: JSON.parse(m[1]) } | ||
| } catch { | ||
| return { valid: false } | ||
| } | ||
| } | ||
|
Comment on lines
+91
to
+99
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. [Q]
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point! I believe validation is necessary from the following perspectives, increasing in complexity as you go:
This is probably something we’ll implement eventually, but with low technical uncertainty, it’s a bit hard to justify doing it right away.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thank you for the details! It seems important to check until we can actually apply! |
||
|
|
||
| const remind = async (s: ChatState): Promise<Partial<ChatState>> => { | ||
| const llm = new ChatOpenAI({ model: 'gpt-4o-mini' }) | ||
| const res = await llm.invoke([ | ||
| { | ||
| role: 'system', | ||
| content: | ||
| 'Return ONLY the ```json code fence with the RFC 6902 patch. No intro text.', | ||
| }, | ||
| { role: 'user', content: s.userMsg }, | ||
| ]) | ||
| return { draft: res.content as string, retryCount: (s.retryCount ?? 0) + 1 } | ||
| } | ||
|
|
||
| //////////////////////////////////////////////////////////////// | ||
| // 3. build StateGraph | ||
| //////////////////////////////////////////////////////////////// | ||
| export const runChat = async ( | ||
| userMsg: string, | ||
| schemaText: string, | ||
| chatHistory: string, | ||
| ) => { | ||
| try { | ||
| const graph = new StateGraph(ChatStateAnnotation) | ||
|
|
||
| graph | ||
| .addNode('buildPrompt', buildPrompt) | ||
| .addNode('drafted', draft) | ||
| .addNode('check', check) | ||
| .addNode('remind', remind) | ||
| .addEdge(START, 'buildPrompt') | ||
| .addEdge('buildPrompt', 'drafted') | ||
| .addEdge('remind', 'check') | ||
|
|
||
| // conditional edges | ||
| .addConditionalEdges('check', (s: ChatState) => { | ||
| if (s.valid) return END | ||
| if ((s.retryCount ?? 0) >= 3) return END // give up | ||
| return 'remind' | ||
| }) | ||
|
|
||
| // execution | ||
| const compiled = graph.compile() | ||
| const result = await compiled.invoke( | ||
| { | ||
| userMsg, | ||
| schemaText, | ||
| chatHistory, | ||
| retryCount: 0, | ||
| }, | ||
| { | ||
| recursionLimit: 4, // for avoid deep recursion | ||
| }, | ||
| ) | ||
|
|
||
| return result.draft ?? 'No response generated' | ||
| } catch (error) { | ||
| console.error( | ||
| 'StateGraph execution failed, falling back to manual execution:', | ||
| error, | ||
| ) | ||
| // some fallback logic | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
📝
I found that I can use mastra as is! (I thought I had to go back to LangChain...)