@@ -7,7 +7,7 @@ import appConfig from '@/config/app'
77import { BadUserInputError , ForbiddenError } from '@/errors/graphql-errors'
88import { langfuseClient } from '@/helpers/langfuse'
99import { getLdFlagValue } from '@/helpers/launch-darkly'
10- import { model } from '@/helpers/pair'
10+ import { model , MODEL_TYPE } from '@/helpers/pair'
1111import JSONObject from '@/types/interfaces/json-object'
1212
1313import { MutationResolvers } from '../../__generated__/types.generated'
@@ -36,7 +36,10 @@ const generateAiSteps: MutationResolvers['generateAiSteps'] = async (
3636 throw new ForbiddenError ( 'Not authorised!' )
3737 }
3838
39- const { prompt : systemPrompt } = await langfuseClient . prompt . get ( promptName )
39+ // NOTE: we get the entire prompt object so that we can pass it to generation.update
40+ // to link the generation to the prompt in Rome (Langfuse)
41+ const prompt = await langfuseClient . prompt . get ( promptName )
42+ const { prompt : systemPrompt } = prompt
4043
4144 const result = await startActiveObservation (
4245 'generate-ai-steps' ,
@@ -53,6 +56,20 @@ const generateAiSteps: MutationResolvers['generateAiSteps'] = async (
5356 } ,
5457 } )
5558
59+ const generation = trace . startObservation (
60+ 'ai-stream-generation' ,
61+ {
62+ model : MODEL_TYPE ,
63+ input : [
64+ { role : 'system' , content : systemPrompt } ,
65+ { role : 'user' , content : userPrompt } ,
66+ ] ,
67+ } ,
68+ { asType : 'generation' } ,
69+ )
70+
71+ generation . update ( { prompt } )
72+
5673 const { object } = await generateObject ( {
5774 model,
5875 schema : z . object ( {
@@ -74,6 +91,8 @@ const generateAiSteps: MutationResolvers['generateAiSteps'] = async (
7491 } ,
7592 } )
7693
94+ generation . update ( { output : object } ) . end ( )
95+
7796 return object
7897 } ,
7998 )
0 commit comments