Skip to content

Commit 7b5bc64

Browse files
committed
wip: tests
1 parent 733982f commit 7b5bc64

File tree

9 files changed

+282
-73
lines changed

9 files changed

+282
-73
lines changed

lib/instrumentation/@google/genai.js

Lines changed: 55 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
'use strict'
77

8+
const { geminiApiKey, googleGenAiHeaders } = require('../../../lib/symbols')
89
const {
910
LlmChatCompletionMessage,
1011
LlmChatCompletionSummary,
@@ -119,42 +120,67 @@ function recordChatCompletionMessages({
119120
delete response.headers
120121
}
121122

123+
/**
124+
* Adds apiKey and response headers to the active segment
125+
* on symbols
126+
*
127+
* @param {object} params input params
128+
* @param {Shim} params.shim instance of shim
129+
* @param {object} params.result from openai request
130+
* @param {string} params.apiKey api key from openai client
131+
*/
132+
function decorateSegment({ shim, result, apiKey }) {
133+
const segment = shim.getActiveSegment()
134+
135+
if (segment) {
136+
segment[geminiApiKey] = apiKey
137+
138+
const headers = result?.response?.headers
139+
? Object.fromEntries(result.response.headers)
140+
: { ...result?.headers }
141+
segment[googleGenAiHeaders] = headers
142+
}
143+
}
144+
122145
module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
123146
if (agent?.config?.ai_monitoring?.enabled !== true) {
124-
shim.logger.debug('config.ai_monitoring.enabled is set to false. Skipping instrumentation.')
147+
shim.logger.debug('config.ai_monitoring.enabled is set to false.')
125148
return
126149
}
127150
// Update the tracking metric name with the version of the library
128151
// being instrumented. We do not have access to the version when
129152
// initially declaring the variable.
130153
TRACKING_METRIC = `${TRACKING_METRIC}/${shim.pkgVersion}`
131154

132-
const models = googleGenAi.Models
133-
// TODO: why is generateContentInternal and generateContentStreamInternal
134-
// exposed but not generateContent or generateContentStream?
135-
136155
/**
137156
* Instrumentation is only done to get the response headers and attach
138157
* to the active segment as @google/genai hides the headers from the functions
139158
* we are trying to instrument.
140159
* see: https://github.com/googleapis/js-genai/blob/cd0454862b4a0251d2606eeca8500b3b76004944/src/models.ts#L200
141-
*
142-
* TODO: Do we even need the headers?
143160
*/
144-
shim.wrap(models.prototype, 'processParamsForMcpUsage', function wrapProcessParamsForMcpUsage(shim, original) {
145-
return async function wrappedProcessParamsForMcpUsage(...args) {
146-
// Call the original function and capture the result
147-
const newParams = await original.apply(this, arguments)
148-
149-
// Inspect the headers in newParams
150-
const headers = newParams?.config?.httpOptions?.headers
151-
shim.logger.debug('Headers in newParams:', headers)
152-
153-
// Return the modified newParams
154-
return newParams
161+
const httpResponse = googleGenAi.HttpResponse
162+
shim.wrap(httpResponse.prototype, 'json', function wrapJson(shim, func) {
163+
return async function wrappedJson() {
164+
const response = func.apply(this, arguments)
165+
if (await response) {
166+
// TODO: this does get some headers but not 'x-goog*'
167+
const headers = this.headers
168+
if (headers) {
169+
decorateSegment({
170+
shim,
171+
result: response,
172+
apiKey: this[geminiApiKey]
173+
})
174+
}
175+
}
176+
return response
155177
}
156178
})
157179

180+
const models = googleGenAi.Models
181+
// TODO: why is generateContentInternal and generateContentStreamInternal
182+
// exposed but not generateContent or generateContentStream?
183+
158184
/**
159185
* Instruments chat completion creation
160186
* and creates the LLM events
@@ -209,47 +235,18 @@ module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
209235
name: GEMINI.COMPLETION,
210236
promise: true,
211237
after({ error: err, result: response, segment, transaction }) {
212-
// Symbol.asyncIterator
213-
// FIXME: it's causing recursion
214-
shim.wrap(response, Symbol.asyncIterator, function wrapIterator(shim, orig) {
215-
const originalAsyncIterator = orig
216-
return async function * wrappedIterator() {
217-
let content = ''
218-
let role = ''
219-
let chunk
220-
let err
221-
try {
222-
const iterator = originalAsyncIterator.apply(this, arguments)
223-
for await (chunk of iterator) {
224-
if (chunk.choices?.[0]?.delta?.role) {
225-
role = chunk.choices[0].delta.role
226-
}
227-
228-
content += chunk.choices?.[0]?.delta?.content ?? ''
229-
yield chunk
230-
}
231-
} catch (streamErr) {
232-
err = streamErr
233-
} finally {
234-
chunk.choices[0].message = { role, content }
235-
// update segment duration since we want to extend the time it took to
236-
// handle the stream
237-
segment.touch()
238-
239-
recordChatCompletionMessages({
240-
agent: shim.agent,
241-
shim,
242-
segment,
243-
transaction,
244-
request,
245-
response: chunk,
246-
err
247-
})
248-
249-
addLlmMeta({ agent, transaction })
250-
}
251-
}
238+
// TODO: actually need to handle the stream
239+
recordChatCompletionMessages({
240+
agent,
241+
shim,
242+
segment,
243+
transaction,
244+
request,
245+
response,
246+
err
252247
})
248+
249+
addLlmMeta({ agent, transaction })
253250
}
254251
})
255252
})

lib/llm-events/google-genai/chat-completion-message.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ module.exports = class LlmChatCompletionMessage extends LlmEvent {
1818
transaction
1919
}) {
2020
super({ agent, segment, request, response, transaction })
21-
this.id = `${response.id}-${index}`
21+
this.id = `${response.responseId}-${index}`
2222
this.role = message?.role
2323
this.sequence = index
2424
this.completion_id = completionId

lib/llm-events/google-genai/chat-completion-summary.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ module.exports = class LlmChatCompletionSummary extends LlmEvent {
1818
}
1919
this['response.number_of_messages'] = requestMessagesLength + (response?.candidates?.length || 0)
2020
this['response.choices.finish_reason'] = response?.candidates?.[0]?.finishReason
21-
this['request.max_tokens'] = request.max_tokens
22-
this['request.temperature'] = request.temperature
21+
this['request.max_tokens'] = request.config?.maxOutputTokens
22+
this['request.temperature'] = request.config?.temperature
2323
}
2424
}

lib/llm-events/google-genai/embedding.js

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -20,19 +20,18 @@ const defaultParams = {}
2020
class LlmEmbedding extends LlmEvent {
2121
constructor(params = defaultParams) {
2222
super(params)
23-
const { agent, input } = params
24-
const tokenCb = agent?.llm?.tokenCountCallback
23+
const { agent, request } = params
2524

25+
// TODO: idk if this is correct for input
2626
this.input = agent.config?.ai_monitoring?.record_content?.enabled
27-
? input
27+
? request?.contents
2828
: undefined
2929
this.error = params.isError
3030
this.duration = params.segment.getDurationInMillis()
31-
32-
// Even if not recording content, we should use the local token counting callback to record token usage
33-
if (typeof tokenCb === 'function') {
34-
this.token_count = tokenCb(this.bedrockCommand.modelId, input)
35-
}
31+
// TODO: idk if this is correct for token count
32+
this.token_count = Array.isArray(params.response?.embeddings)
33+
? params.response.embeddings.reduce((sum, e) => sum + (e?.values?.length || 0), 0)
34+
: undefined
3635
}
3736
}
3837

lib/llm-events/google-genai/event.js

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,11 @@ module.exports = class LlmEvent extends BaseEvent {
1414

1515
this.id = makeId(36)
1616
this.appName = agent.config.applications()[0]
17-
this.request_id = response?.headers?.['x-request-id']
17+
this.request_id = response?.headers?.['x-request-id'] // TODO: what is this in Gemini?
1818
this.trace_id = transaction?.traceId
1919
this.span_id = segment?.id
20-
this['response.model'] = response.modelVersion
21-
this['request.model'] = request.model
20+
this['response.model'] = response?.modelVersion
21+
this['request.model'] = request?.model
2222
this.vendor = 'gemini'
2323
this.ingest_source = 'Node'
2424
this.metadata = agent
@@ -37,6 +37,7 @@ module.exports = class LlmEvent extends BaseEvent {
3737

3838
responseAttrs(response) {
3939
// TODO: no response.headers?
40+
// relevant headers will be prefixed with 'x-goog'?
4041
this['response.organization'] = response?.headers?.['google-organization']
4142
this['response.headers.llmVersion'] = response?.headers?.['gemini-version']
4243
this['response.headers.ratelimitLimitRequests'] =

lib/symbols.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ module.exports = {
2525
openAiHeaders: Symbol('openAiHeaders'),
2626
openAiApiKey: Symbol('openAiApiKey'),
2727
geminiApiKey: Symbol('geminiApiKey'),
28+
googleGenAiHeaders: Symbol('googleGenAiHeaders'),
2829
parentSegment: Symbol('parentSegment'),
2930
langchainRunId: Symbol('runId'),
3031
otelSynthesis: Symbol('otelSynthesis'),
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
/*
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
3+
* SPDX-License-Identifier: Apache-2.0
4+
*/
5+
6+
'use strict'
7+
const test = require('node:test')
8+
const assert = require('node:assert')
9+
const helper = require('../../../lib/agent_helper')
10+
const GenericShim = require('../../../../lib/shim/shim')
11+
const sinon = require('sinon')
12+
13+
test('@google/genai unit.tests', async (t) => {
14+
t.beforeEach(function (ctx) {
15+
ctx.nr = {}
16+
const sandbox = sinon.createSandbox()
17+
const agent = helper.loadMockedAgent()
18+
agent.config.ai_monitoring = { enabled: true, streaming: { enabled: true } }
19+
const shim = new GenericShim(agent, '@google/genai')
20+
sandbox.stub(shim.logger, 'debug')
21+
sandbox.stub(shim.logger, 'warn')
22+
23+
ctx.nr.agent = agent
24+
ctx.nr.shim = shim
25+
ctx.nr.sandbox = sandbox
26+
ctx.nr.initialize = require('../../../../lib/instrumentation/@google/genai.js')
27+
})
28+
29+
t.afterEach(function (ctx) {
30+
helper.unloadAgent(ctx.nr.agent)
31+
ctx.nr.sandbox.restore()
32+
})
33+
34+
function getMockModule() {
35+
function GoogleGenAi() {}
36+
GoogleGenAi.HttpResponse = function () {}
37+
GoogleGenAi.HttpResponse.prototype.json = async function () {}
38+
GoogleGenAi.Models = function () {}
39+
GoogleGenAi.Models.prototype.generateContentInternal = async function () {}
40+
GoogleGenAi.Models.prototype.generateContentStreamInternal = async function () {}
41+
GoogleGenAi.Models.prototype.embedContent = async function () {}
42+
return GoogleGenAi
43+
}
44+
45+
await t.test('should instrument @google/genai', (t, end) => {
46+
const { shim, agent, initialize } = t.nr
47+
const MockGoogleGenAi = getMockModule()
48+
initialize(agent, MockGoogleGenAi, '@google/genai', shim)
49+
assert.equal(shim.logger.debug.callCount, 0, 'should not log debug messages')
50+
const isWrapped = shim.isWrapped(MockGoogleGenAi.Models.prototype.generateContentInternal)
51+
assert.equal(isWrapped, true, 'should wrap models generateContentInternal')
52+
end()
53+
})
54+
55+
await t.test(
56+
'should not instrument generate content streams if ai_monitoring.streaming.enabled is false',
57+
(t, end) => {
58+
const { shim, agent, initialize } = t.nr
59+
agent.config.ai_monitoring.streaming.enabled = false
60+
const MockGoogleGenAi = getMockModule()
61+
initialize(agent, MockGoogleGenAi, '@google/genai', shim)
62+
const models = new MockGoogleGenAi.Models()
63+
64+
helper.runInTransaction(agent, async () => {
65+
await models.generateContentStreamInternal()
66+
assert.equal(
67+
shim.logger.warn.args[0][0],
68+
'`ai_monitoring.streaming.enabled` is set to `false`, stream will not be instrumented.'
69+
)
70+
end()
71+
})
72+
}
73+
)
74+
75+
await t.test(
76+
'should not register instrumentation if ai_monitoring.enabled is false',
77+
(t, end) => {
78+
const { shim, agent, initialize } = t.nr
79+
const MockGoogleGenAi = getMockModule()
80+
agent.config.ai_monitoring = { enabled: false }
81+
82+
initialize(agent, MockGoogleGenAi, '@google/genai', shim)
83+
assert.equal(shim.logger.debug.callCount, 1, 'should log 1 debug message')
84+
assert.equal(shim.logger.debug.args[0][0], 'config.ai_monitoring.enabled is set to false.')
85+
const isWrapped = shim.isWrapped(MockGoogleGenAi.Models.prototype.generateContentInternal)
86+
assert.equal(isWrapped, false, 'should not wrap models generate content internal')
87+
end()
88+
}
89+
)
90+
})

test/unit/llm-events/google-genai/chat-completion-message.test.js

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,42 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
// TODO
6+
'use strict'
7+
8+
const test = require('node:test')
9+
const assert = require('node:assert')
10+
const LlmChatCompletionMessage = require('../../../../lib/llm-events/google-genai/chat-completion-message')
11+
const helper = require('../../../lib/agent_helper')
12+
const { req, res, getExpectedResult } = require('./common')
13+
14+
test.beforeEach((ctx) => {
15+
ctx.nr = {}
16+
ctx.nr.agent = helper.loadMockedAgent()
17+
})
18+
19+
test.afterEach((ctx) => {
20+
helper.unloadAgent(ctx.nr.agent)
21+
})
22+
23+
test('should create a LlmChatCompletionMessage event', (t, end) => {
24+
const { agent } = t.nr
25+
const api = helper.getAgentApi()
26+
helper.runInTransaction(agent, (tx) => {
27+
api.startSegment('fakeSegment', false, () => {
28+
const segment = api.shim.getActiveSegment()
29+
const chatMessageEvent = new LlmChatCompletionMessage({
30+
transaction: tx,
31+
agent,
32+
segment,
33+
request: req,
34+
response: res,
35+
message: req.contents,
36+
index: 0
37+
})
38+
// TODO: add the expected result
39+
const expected = getExpectedResult(tx, {}, 'message')
40+
assert.deepEqual(chatMessageEvent, expected)
41+
end()
42+
})
43+
})
44+
})

0 commit comments

Comments
 (0)