Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions packages/backend/src/helpers/pair/get-prompt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import { langfuseClient } from '@/helpers/langfuse'

export const getPrompt = async (promptName: string, version?: string) => {
const prompt = await langfuseClient.prompt.get(
promptName,
version ? { label: version } : undefined,
)
return prompt
}
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@ import type { Request, Response } from 'express'
import { Router } from 'express'

import appConfig from '@/config/app'
import { langfuseClient } from '@/helpers/langfuse'
import { getLdFlagValue } from '@/helpers/launch-darkly'
import logger from '@/helpers/logger'
import { model, MODEL_TYPE } from '@/helpers/pair'
import { getPrompt } from '@/helpers/pair/get-prompt'

import { getAuthenticatedContext } from './middleware/authentication'
import { getAuthenticatedContext } from '../middleware/authentication'

import chatReadinessRouter from './readiness'

interface ChatRequest {
messages: Array<{
Expand Down Expand Up @@ -56,9 +58,7 @@ async function handleChatStream(req: Request, res: Response) {
.pop()

// Get the prompt from Langfuse
const prompt = await langfuseClient.prompt.get(chatPrompt, {
label: version,
})
const prompt = await getPrompt(chatPrompt, version)

let traceId = ''

Expand Down Expand Up @@ -108,7 +108,6 @@ async function handleChatStream(req: Request, res: Response) {

trace.update({
output: { result: event.text },
level: 'DEFAULT',
})

generation
Expand Down Expand Up @@ -175,5 +174,6 @@ async function handleChatStream(req: Request, res: Response) {
const router = Router()

router.post('/', handleChatStream)
router.use('/readiness', chatReadinessRouter)

export default router
93 changes: 93 additions & 0 deletions packages/backend/src/routes/api/chat/readiness.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import { startActiveObservation } from '@langfuse/tracing'
import { generateObject } from 'ai'
import type { Request, Response } from 'express'
import { Router } from 'express'
import z from 'zod/v3'

import appConfig from '@/config/app'
import logger from '@/helpers/logger'
import { model, MODEL_TYPE } from '@/helpers/pair'
import { getPrompt } from '@/helpers/pair/get-prompt'

import { getAuthenticatedContext } from '../middleware/authentication'

interface ChatReadinessRequest {
message: string
sessionId: string
}

const handleChatReadiness = async (
req: Request,
res: Response,
): Promise<void> => {
const context = getAuthenticatedContext(req)

try {
const { message: rawMessage } = req.body as ChatReadinessRequest

// Get the prompt from Langfuse
const prompt = await getPrompt('ai-builder/chat-to-form-check')
const { prompt: systemPrompt } = prompt

const result = await startActiveObservation(
'chat-readiness',
async (trace) => {
trace.updateTrace({
userId: context.currentUser.email,
environment: appConfig.appEnv,
tags: ['ai-builder', 'is-chat-ready'],
})

trace.update({
input: {
message: rawMessage,
},
})

const generation = trace.startObservation(
'is-chat-ready',
{
model: MODEL_TYPE,
input: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: rawMessage },
],
},
{ asType: 'generation' },
)

generation.update({ prompt })

const { object } = await generateObject({
model,
schema: z.object({
isReady: z.boolean(),
}),
system: systemPrompt,
prompt: rawMessage,
experimental_telemetry: {
isEnabled: true,
functionId: 'is-chat-ready',
},
})

trace.update({ output: object })

generation.update({ output: object }).end()

return object
},
)

res.json({ isReady: result.isReady })
} catch (error) {
logger.error('Error in chat readiness', { error })
res.status(500).json({ error: 'Internal server error' })
}
}

const router = Router()

router.post('/', handleChatReadiness)

export default router
96 changes: 64 additions & 32 deletions packages/frontend/src/hooks/useChatStream.ts
Original file line number Diff line number Diff line change
@@ -1,23 +1,37 @@
import { useCallback, useMemo } from 'react'
import { useLocation, useNavigate } from 'react-router-dom'
import type { UIMessage } from '@ai-sdk/react'
import { useChat } from '@ai-sdk/react'
import { useToast } from '@opengovsg/design-system-react'
import { DefaultChatTransport } from 'ai'

import * as URLS from '@/config/urls'
import {
deduplicateMessages,
extractTextContent,
transformMessages,
} from '@/pages/AiBuilder/helpers'

export interface Message {
id: string // this is auto-generated by the AI SDK
text: string
traceId?: string
generationId?: string
traceId?: string // only assistant messages have this
isUser: boolean
}

// Custom message type with metadata
type CustomUIMessage = UIMessage<{
export type CustomUIMessage = UIMessage<{
traceId?: string
}>

export function useChatStream() {
export interface UseChatStreamOptions {
initialMessages?: Message[]
}

export function useChatStream(options?: UseChatStreamOptions) {
const toast = useToast()
const navigate = useNavigate()
const location = useLocation()

const {
messages: aiMessages,
Expand All @@ -30,9 +44,19 @@ export function useChatStream() {
api: '/api/chat',
credentials: 'include',
prepareSendMessagesRequest: ({ messages }) => {
// Send all messages to maintain conversation context
// Convert initialMessages to the format expected by the API
const initialMsgs = (options?.initialMessages || []).map((msg) => ({
id: msg.id,
role: msg.isUser ? 'user' : 'assistant',
parts: [{ type: 'text', text: msg.text }],
...(msg.traceId && { metadata: { traceId: msg.traceId } }),
}))

// Prepend initial messages to maintain full conversation context
const allMessages = [...initialMsgs, ...messages]

const body = {
messages: messages,
messages: allMessages,
sessionId: '',
}
return { body }
Expand All @@ -47,21 +71,37 @@ export function useChatStream() {
position: 'top',
})
},
onFinish: ({ messages }) => {
// transform the messages and save to location state
// so that user can still access it if they refresh the page
const transformedMessages = transformMessages(messages)

// Combine initial messages with new messages to preserve full history
const allMessages = deduplicateMessages([
...(options?.initialMessages || []),
...transformedMessages,
])

navigate(`${URLS.EDITOR}/ai`, {
state: {
...location.state,
isFormMode: false,
chatInput: allMessages[allMessages.length - 1].text,
chatMessages: allMessages,
},
replace: true,
})
},
})

// Helper function to extract text content from UIMessage
const extractTextContent = useCallback((msg: CustomUIMessage): string => {
return msg.parts
.filter((part) => part.type === 'text')
.map((part) => (part as any).text)
.join('')
}, [])

// Transform AI SDK messages to our Message format
const messages = useMemo<Message[]>(() => {
const isActivelyStreaming = status === 'streaming' || status === 'submitted'

// Filter user and assistant messages
// Start with initial messages if provided
const initialMsgs = options?.initialMessages || []

// Filter user and assistant messages from AI SDK
let messagesToTransform = aiMessages.filter(
(msg) => msg.role === 'user' || msg.role === 'assistant',
)
Expand All @@ -74,17 +114,13 @@ export function useChatStream() {
}
}

return messagesToTransform.map((msg) => {
// Extract traceId from message metadata
const traceId = msg.metadata?.traceId

return {
text: extractTextContent(msg),
isUser: msg.role === 'user',
traceId: traceId,
}
})
}, [aiMessages, extractTextContent, status])
const transformedMessages = transformMessages(messagesToTransform)
const allMessages = deduplicateMessages([
...initialMsgs,
...transformedMessages,
])
return allMessages
}, [aiMessages, options?.initialMessages, status])

// Get the current streaming response (last assistant message that's still being streamed)
const currentResponse = useMemo(() => {
Expand All @@ -99,7 +135,7 @@ export function useChatStream() {
return extractTextContent(lastMessage)
}
return ''
}, [aiMessages, status, extractTextContent])
}, [aiMessages, status])

// Wrapper for sendMessage that matches the expected signature
const sendMessageWrapper = useCallback(
Expand All @@ -112,16 +148,12 @@ export function useChatStream() {
[sendMessage],
)

const cancelStream = useCallback(() => {
stop()
}, [stop])

return {
messages,
currentResponse,
isStreaming: status === 'submitted' || status === 'streaming',
error: aiError?.message || null,
sendMessage: sendMessageWrapper,
cancelStream,
cancelStream: stop,
}
}
15 changes: 15 additions & 0 deletions packages/frontend/src/pages/AiBuilder/AiBuilderContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,16 @@ import { useIsMobile } from '@opengovsg/design-system-react'
import PrimarySpinner from '@/components/PrimarySpinner'
import { GET_APPS } from '@/graphql/queries/get-apps'
import { getStepGroupTypeAndCaption, getStepStructure } from '@/helpers/toolbox'
import { Message } from '@/hooks/useChatStream'

interface AIBuilderSharedProps {
flowName: string
formInput: {
trigger: string
actions: string
}
chatInput: string
chatMessages: Message[]
isFormMode: boolean
output: {
trigger: IStep
Expand Down Expand Up @@ -51,11 +54,21 @@ export const useAiBuilderContext = () => {

interface AiBuilderContextProviderProps extends AIBuilderSharedProps {
children: React.ReactNode
flowName: string
formInput: {
trigger: string
actions: string
}
chatInput: string
chatMessages: Message[]
isFormMode: boolean
}

export const AiBuilderContextProvider = ({
children,
flowName = 'Name your Pipe', // default to Name your Pipe if no flow name is provided
chatInput,
chatMessages,
formInput,
isFormMode,
output,
Expand Down Expand Up @@ -104,6 +117,8 @@ export const AiBuilderContextProvider = ({
allApps,
flowName,
formInput,
chatInput,
chatMessages,
isFormMode,
output,
isMobile,
Expand Down
29 changes: 29 additions & 0 deletions packages/frontend/src/pages/AiBuilder/assets/AiBuilderError.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,9 @@ import { Box, Flex, Icon, Text, Textarea } from '@chakra-ui/react'

import pairLogo from '@/assets/pair-logo.svg'
import { ImageBox } from '@/components/FlowStepConfigurationModal/ChooseAndAddConnection/ConfigureExcelConnection'
import IdeaButtons from '@/pages/AiBuilder/components/IdeaButtons'
import { AI_CHAT_IDEAS, AiChatIdea, AiFormIdea } from '@/pages/Flows/constants'

import IdeaButtons from '../IdeaButtons'

interface PromptInputProps {
isStreaming: boolean
showIdeas?: boolean
Expand Down
Loading
Loading