77
88const { geminiApiKey } = require ( '../../../lib/symbols' )
99const {
10- LlmChatCompletionMessage,
11- LlmChatCompletionSummary,
12- LlmEmbedding,
13- LlmErrorMessage
10+ LlmChatCompletionMessage,
11+ LlmChatCompletionSummary,
12+ LlmEmbedding,
13+ LlmErrorMessage
1414} = require ( '../../../lib/llm-events/google-genai' )
1515const { RecorderSpec } = require ( '../../../lib/shim/specs' )
1616const { extractLlmContext } = require ( '../../util/llm-utils' )
@@ -28,12 +28,12 @@ let TRACKING_METRIC = GEMINI.TRACKING_PREFIX
2828 * @param {object } params.msg LLM event
2929 */
3030function recordEvent ( { agent, type, msg } ) {
31- const llmContext = extractLlmContext ( agent )
31+ const llmContext = extractLlmContext ( agent )
3232
33- agent . customEventAggregator . add ( [
34- { type, timestamp : Date . now ( ) } ,
35- Object . assign ( { } , msg , llmContext )
36- ] )
33+ agent . customEventAggregator . add ( [
34+ { type, timestamp : Date . now ( ) } ,
35+ Object . assign ( { } , msg , llmContext )
36+ ] )
3737}
3838
3939/**
@@ -44,8 +44,8 @@ function recordEvent({ agent, type, msg }) {
4444 * @param {Transaction } params.transaction active transaction
4545 */
4646function addLlmMeta ( { agent, transaction } ) {
47- agent . metrics . getOrCreateMetric ( TRACKING_METRIC ) . incrementCallCount ( )
48- transaction . trace . attributes . addAttribute ( DESTINATIONS . TRANS_EVENT , 'llm' , true )
47+ agent . metrics . getOrCreateMetric ( TRACKING_METRIC ) . incrementCallCount ( )
48+ transaction . trace . attributes . addAttribute ( DESTINATIONS . TRANS_EVENT , 'llm' , true )
4949}
5050
5151/**
@@ -65,103 +65,223 @@ function addLlmMeta({ agent, transaction }) {
6565 * @param {Transaction } params.transaction active transaction
6666 */
6767function recordChatCompletionMessages ( {
68+ agent,
69+ shim,
70+ segment,
71+ request,
72+ response,
73+ err,
74+ transaction
75+ } ) {
76+ if ( ! response ) {
77+ // If we get an error, it is possible that `response = null`.
78+ // In that case, we define it to be an empty object.
79+ response = { }
80+ }
81+
82+ // response.headers = segment[]
83+ // explicitly end segment to consistent duration
84+ // for both LLM events and the segment
85+ segment . end ( )
86+ const completionSummary = new LlmChatCompletionSummary ( {
6887 agent,
69- shim,
7088 segment,
89+ transaction,
7190 request,
7291 response,
73- err,
74- transaction
75- } ) {
76- if ( ! response ) {
77- // If we get an error, it is possible that `response = null`.
78- // In that case, we define it to be an empty object.
79- response = { }
80- }
92+ withError : err != null
93+ } )
8194
82- // response.headers = segment[]
83- // explicitly end segment to consistent duration
84- // for both LLM events and the segment
85- segment . end ( )
86- const completionSummary = new LlmChatCompletionSummary ( {
87- agent,
88- segment,
89- transaction,
90- request,
91- response,
92- withError : err != null
95+ // Only take the first response message and append to input messages
96+ const messages = [ request . contents , response ?. candidates ?. [ 0 ] ?. content ?. parts ?. [ 0 ] ]
97+ messages . forEach ( ( message , index ) => {
98+ const completionMsg = new LlmChatCompletionMessage ( {
99+ agent,
100+ segment,
101+ transaction,
102+ request,
103+ response,
104+ index,
105+ completionId : completionSummary . id ,
106+ message
93107 } )
94108
95- // Only take the first response message and append to input messages
96- const messages = [ ...request . messages , response ?. choices ?. [ 0 ] ?. message ]
97- messages . forEach ( ( message , index ) => {
98- const completionMsg = new LlmChatCompletionMessage ( {
109+ recordEvent ( { agent, type : 'LlmChatCompletionMessage' , msg : completionMsg } )
110+ } )
111+
112+ recordEvent ( { agent, type : 'LlmChatCompletionSummary' , msg : completionSummary } )
113+
114+ if ( err ) {
115+ const llmError = new LlmErrorMessage ( { cause : err , summary : completionSummary , response } )
116+ agent . errors . add ( transaction , err , llmError )
117+ }
118+
119+ delete response . headers
120+ }
121+
122+ module . exports = function initialize ( agent , googleGenAi , moduleName , shim ) {
123+ if ( agent ?. config ?. ai_monitoring ?. enabled !== true ) {
124+ shim . logger . debug ( 'config.ai_monitoring.enabled is set to false. Skipping instrumentation.' )
125+ return
126+ }
127+ // Update the tracking metric name with the version of the library
128+ // being instrumented. We do not have access to the version when
129+ // initially declaring the variable.
130+ TRACKING_METRIC = `${ TRACKING_METRIC } /${ shim . pkgVersion } `
131+
132+ const models = googleGenAi . Models
133+ // TODO: why is generateContentInternal and generateContentStreamInternal
134+ // exposed but not generateContent or generateContentStream?
135+
136+ /**
137+ * Instrumentation is only done to get the response headers and attach
138+ * to the active segment as @google/genai hides the headers from the functions
139+ * we are trying to instrument.
140+ * see: https://github.com/googleapis/js-genai/blob/cd0454862b4a0251d2606eeca8500b3b76004944/src/models.ts#L200
141+ *
142+ * TODO: Do we even need the headers?
143+ */
144+ shim . wrap ( models . prototype , 'processParamsForMcpUsage' , function wrapProcessParamsForMcpUsage ( shim , original ) {
145+ return async function wrappedProcessParamsForMcpUsage ( ...args ) {
146+ // Call the original function and capture the result
147+ const newParams = await original . apply ( this , arguments )
148+
149+ // Inspect the headers in newParams
150+ const headers = newParams ?. config ?. httpOptions ?. headers
151+ shim . logger . debug ( 'Headers in newParams:' , headers )
152+
153+ // Return the modified newParams
154+ return newParams
155+ }
156+ } )
157+
158+ /**
159+ * Instruments chat completion creation
160+ * and creates the LLM events
161+ *
162+ * **Note**: Currently only for promises. streams will come later
163+ */
164+ shim . record ( models . prototype , 'generateContentInternal' ,
165+ function wrapGenerateContent ( shim , func , name , args ) {
166+ const [ request ] = args
167+
168+ return new RecorderSpec ( {
169+ name : GEMINI . COMPLETION ,
170+ promise : true ,
171+ after ( { error : err , result : response , segment, transaction } ) {
172+ recordChatCompletionMessages ( {
99173 agent,
174+ shim,
100175 segment,
101176 transaction,
102177 request,
103178 response,
104- index,
105- completionId : completionSummary . id ,
106- message
107- } )
179+ err
180+ } )
181+
182+ addLlmMeta ( { agent, transaction } )
183+ }
184+ } )
185+ }
186+ )
187+
188+ shim . record ( models . prototype , 'generateContentStreamInternal' , function wrapGenerateContentStream ( shim , func , name , args ) {
189+ if ( ! agent . config . ai_monitoring . streaming . enabled ) {
190+ shim . logger . warn (
191+ '`ai_monitoring.streaming.enabled` is set to `false`, stream will not be instrumented.'
192+ )
193+ agent . metrics . getOrCreateMetric ( AI . STREAMING_DISABLED ) . incrementCallCount ( )
194+ return
195+ }
108196
109- recordEvent ( { agent, type : 'LlmChatCompletionMessage' , msg : completionMsg } )
197+ shim . wrap ( response , 'iterator' , function wrapIterator ( shim , orig ) {
198+ return async function * wrappedIterator ( ) {
199+ let content = ''
200+ let role = ''
201+ let chunk
202+ let err
203+ try {
204+ const iterator = orig . apply ( this , arguments )
205+
206+ for await ( chunk of iterator ) {
207+ if ( chunk . choices ?. [ 0 ] ?. delta ?. role ) {
208+ role = chunk . choices [ 0 ] . delta . role
209+ }
210+
211+ content += chunk . choices ?. [ 0 ] ?. delta ?. content ?? ''
212+ yield chunk
213+ }
214+ } catch ( streamErr ) {
215+ err = streamErr
216+ throw err
217+ } finally {
218+ chunk . choices [ 0 ] . message = { role, content }
219+ // update segment duration since we want to extend the time it took to
220+ // handle the stream
221+ segment . touch ( )
222+
223+ recordChatCompletionMessages ( {
224+ agent : shim . agent ,
225+ shim,
226+ segment,
227+ transaction,
228+ request,
229+ response : chunk ,
230+ err
231+ } )
232+ }
233+ }
110234 } )
235+ } )
111236
112- recordEvent ( { agent, type : 'LlmChatCompletionSummary' , msg : completionSummary } )
237+ /**
238+ * Instruments embedding creation
239+ * and creates LlmEmbedding event
240+ */
241+ shim . record (
242+ models . prototype ,
243+ 'embedContent' ,
244+ function wrapEmbedContent ( shim , func , name , args ) {
245+ const [ request ] = args
113246
114- if ( err ) {
115- const llmError = new LlmErrorMessage ( { cause : err , summary : completionSummary , response } )
116- agent . errors . add ( transaction , err , llmError )
117- }
247+ return new RecorderSpec ( {
248+ name : GEMINI . EMBEDDING ,
249+ promise : true ,
250+ after ( { error : err , result : response , segment, transaction } ) {
251+ addLlmMeta ( { agent, transaction } )
118252
119- delete response . headers
120- }
253+ if ( ! response ) {
254+ // If we get an error, it is possible that `response = null`.
255+ // In that case, we define it to be an empty object.
256+ response = { }
257+ }
121258
122- module . exports = function initialize ( agent , googleGenAi , moduleName , shim ) {
123- if ( agent ?. config ?. ai_monitoring ?. enabled !== true ) {
124- shim . logger . debug ( 'config.ai_monitoring.enabled is set to false. Skipping instrumentation.' )
125- return
126- }
259+ // explicitly end segment to get consistent duration
260+ // for both LLM events and the segment
261+ segment . end ( )
127262
128- // Update the tracking metric name with the version of the library
129- // being instrumented. We do not have access to the version when
130- // initially declaring the variable.
131- TRACKING_METRIC = `${ TRACKING_METRIC } /${ shim . pkgVersion } `
132-
133- const models = googleGenAi . Models
134- /**
135- * Instruments chat completion creation
136- * and creates the LLM events
137- *
138- * **Note**: Currently only for promises. streams will come later
139- */
140- shim . record (
141- models . prototype ,
142- 'generateContentInternal' ,
143- function wrapGenerateContent ( shim , func , name , args ) {
144- const [ request ] = args
145- const model = request ?. model
146- const contents = request ?. contents // the prompt
147-
148- return new RecorderSpec ( {
149- name : GEMINI . COMPLETION ,
150- promise : true ,
151- after ( { error : err , result : response , segment, transaction } ) {
152- recordChatCompletionMessages ( {
153- agent,
154- shim,
155- segment,
156- transaction,
157- request,
158- response,
159- err
160- } )
161-
162- addLlmMeta ( { agent, transaction } )
163- }
164- } )
263+ const embedding = new LlmEmbedding ( {
264+ agent,
265+ segment,
266+ transaction,
267+ request,
268+ response,
269+ withError : err != null
270+ } )
271+
272+ recordEvent ( { agent, type : 'LlmEmbedding' , msg : embedding } )
273+
274+ if ( err ) {
275+ const llmError = new LlmErrorMessage ( { cause : err , embedding, response } )
276+ shim . agent . errors . add ( transaction , err , llmError )
277+ }
278+
279+ // cleanup keys on response before returning to user code
280+ // delete response.headers
165281 }
166- )
167- }
282+ } )
283+ }
284+ )
285+
286+ // TODO: shim.record generateContentStreamInternal
287+ }
0 commit comments