Skip to content

Commit a908467

Browse files
committed
wip: tests
1 parent 7b5bc64 commit a908467

File tree

4 files changed

+211
-34
lines changed

4 files changed

+211
-34
lines changed

lib/instrumentation/@google/genai.js

Lines changed: 12 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
'use strict'
77

8-
const { geminiApiKey, googleGenAiHeaders } = require('../../../lib/symbols')
8+
const { googleGenAiHeaders } = require('../../../lib/symbols')
99
const {
1010
LlmChatCompletionMessage,
1111
LlmChatCompletionSummary,
@@ -80,7 +80,7 @@ function recordChatCompletionMessages({
8080
response = {}
8181
}
8282

83-
// response.headers = segment[]
83+
response.headers = segment[googleGenAiHeaders]
8484
// explicitly end segment to consistent duration
8585
// for both LLM events and the segment
8686
segment.end()
@@ -120,28 +120,6 @@ function recordChatCompletionMessages({
120120
delete response.headers
121121
}
122122

123-
/**
124-
* Adds apiKey and response headers to the active segment
125-
* on symbols
126-
*
127-
* @param {object} params input params
128-
* @param {Shim} params.shim instance of shim
129-
* @param {object} params.result from openai request
130-
* @param {string} params.apiKey api key from openai client
131-
*/
132-
function decorateSegment({ shim, result, apiKey }) {
133-
const segment = shim.getActiveSegment()
134-
135-
if (segment) {
136-
segment[geminiApiKey] = apiKey
137-
138-
const headers = result?.response?.headers
139-
? Object.fromEntries(result.response.headers)
140-
: { ...result?.headers }
141-
segment[googleGenAiHeaders] = headers
142-
}
143-
}
144-
145123
module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
146124
if (agent?.config?.ai_monitoring?.enabled !== true) {
147125
shim.logger.debug('config.ai_monitoring.enabled is set to false.')
@@ -161,16 +139,17 @@ module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
161139
const httpResponse = googleGenAi.HttpResponse
162140
shim.wrap(httpResponse.prototype, 'json', function wrapJson(shim, func) {
163141
return async function wrappedJson() {
164-
const response = func.apply(this, arguments)
165-
if (await response) {
142+
const response = await func.apply(this, arguments)
143+
if (response) {
166144
// TODO: this does get some headers but not 'x-goog*'
167145
const headers = this.headers
168146
if (headers) {
169-
decorateSegment({
170-
shim,
171-
result: response,
172-
apiKey: this[geminiApiKey]
173-
})
147+
// decorate the segment with the headers
148+
const segment = shim.getActiveSegment()
149+
150+
if (segment) {
151+
segment[googleGenAiHeaders] = headers
152+
}
174153
}
175154
}
176155
return response
@@ -181,6 +160,8 @@ module.exports = function initialize(agent, googleGenAi, moduleName, shim) {
181160
// TODO: why is generateContentInternal and generateContentStreamInternal
182161
// exposed but not generateContent or generateContentStream?
183162

163+
// TODO: look at computeTokens and countTokens?
164+
184165
/**
185166
* Instruments chat completion creation
186167
* and creates the LLM events

lib/llm-events/google-genai/embedding.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ class LlmEmbedding extends LlmEvent {
2121
constructor(params = defaultParams) {
2222
super(params)
2323
const { agent, request } = params
24+
// see: https://ai.google.dev/api/embeddings#v1beta.ContentEmbedding
2425

25-
// TODO: idk if this is correct for input
2626
this.input = agent.config?.ai_monitoring?.record_content?.enabled
2727
? request?.contents
2828
: undefined

test/unit/llm-events/google-genai/chat-completion-summary.test.js

Lines changed: 77 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,80 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
// TODO
6+
'use strict'
7+
8+
const test = require('node:test')
9+
const assert = require('node:assert')
10+
const LlmChatCompletionSummary = require('../../../../lib/llm-events/google-genai/chat-completion-summary')
11+
const helper = require('../../../lib/agent_helper')
12+
const { req, res, getExpectedResult } = require('./common')
13+
14+
test.beforeEach((ctx) => {
15+
ctx.nr = {}
16+
ctx.nr.agent = helper.loadMockedAgent()
17+
})
18+
19+
test.afterEach((ctx) => {
20+
helper.unloadAgent(ctx.nr.agent)
21+
})
22+
23+
test('should properly create a LlmChatCompletionSummary event', (t, end) => {
24+
const { agent } = t.nr
25+
const api = helper.getAgentApi()
26+
helper.runInTransaction(agent, (tx) => {
27+
api.startSegment('fakeSegment', false, () => {
28+
const segment = api.shim.getActiveSegment()
29+
segment.end()
30+
const chatSummaryEvent = new LlmChatCompletionSummary({
31+
agent,
32+
segment,
33+
transaction: tx,
34+
request: req,
35+
response: res // TODO: res is different
36+
})
37+
const expected = getExpectedResult(tx, chatSummaryEvent, 'summary')
38+
assert.deepEqual(chatSummaryEvent, expected)
39+
end()
40+
})
41+
})
42+
})
43+
44+
test('should set error to true', (ctx, end) => {
45+
const { agent } = ctx.nr
46+
helper.runInTransaction(agent, () => {
47+
const chatSummaryEvent = new LlmChatCompletionSummary({
48+
agent,
49+
transaction: null,
50+
segment: null,
51+
request: {},
52+
response: {},
53+
withError: true
54+
})
55+
assert.equal(true, chatSummaryEvent.error)
56+
end()
57+
})
58+
})
59+
60+
test('should set `llm.` attributes from custom attributes', (t, end) => {
61+
const { agent } = t.nr
62+
const api = helper.getAgentApi()
63+
const conversationId = 'convo-id'
64+
helper.runInTransaction(agent, () => {
65+
api.addCustomAttribute('llm.conversation_id', conversationId)
66+
api.addCustomAttribute('llm.foo', 'bar')
67+
api.addCustomAttribute('llm.bar', 'baz')
68+
api.addCustomAttribute('rando-key', 'rando-value')
69+
const chatSummaryEvent = new LlmChatCompletionSummary({
70+
agent,
71+
segment: null,
72+
transaction: null,
73+
request: {},
74+
response: {}
75+
})
76+
assert.equal(chatSummaryEvent['llm.conversation_id'], conversationId)
77+
assert.equal(chatSummaryEvent['llm.foo'], 'bar')
78+
assert.equal(chatSummaryEvent['llm.bar'], 'baz')
79+
assert.ok(!chatSummaryEvent['rando-key'])
80+
end()
81+
})
82+
})

test/unit/llm-events/google-genai/embedding.test.js

Lines changed: 121 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,124 @@
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

6-
// TODO
6+
'use strict'
7+
8+
const test = require('node:test')
9+
const assert = require('node:assert')
10+
const LlmEmbedding = require('../../../../lib/llm-events/openai/embedding')
11+
const helper = require('../../../lib/agent_helper')
12+
const { res, getExpectedResult } = require('./common')
13+
14+
test.beforeEach((ctx) => {
15+
ctx.nr = {}
16+
ctx.nr.agent = helper.loadMockedAgent()
17+
})
18+
19+
test.afterEach((ctx) => {
20+
helper.unloadAgent(ctx.nr.agent)
21+
})
22+
23+
test('should properly create a LlmEmbedding event', (t, end) => {
24+
const { agent } = t.nr
25+
const req = {
26+
contents: 'This is my test contents',
27+
model: 'gemini-2.0-flash'
28+
}
29+
30+
const api = helper.getAgentApi()
31+
helper.runInTransaction(agent, (tx) => {
32+
api.startSegment('fakeSegment', false, () => {
33+
const segment = api.shim.getActiveSegment()
34+
segment.end()
35+
const embeddingEvent = new LlmEmbedding({
36+
agent,
37+
segment,
38+
transaction: tx,
39+
request: req,
40+
response: res
41+
})
42+
const expected = getExpectedResult(tx, embeddingEvent, 'embedding')
43+
assert.deepEqual(embeddingEvent, expected)
44+
end()
45+
})
46+
})
47+
})
48+
;[
49+
{ type: 'string', value: 'test contents', expected: 'test contents' },
50+
{
51+
type: 'array of strings',
52+
value: ['test contents', 'test input2'],
53+
expected: 'test contents,test input2'
54+
},
55+
{ type: 'array of numbers', value: [1, 2, 3, 4], expected: '1,2,3,4' },
56+
{
57+
type: 'array of array of numbers',
58+
value: [
59+
[1, 2],
60+
[3, 4],
61+
[5, 6]
62+
],
63+
expected: '1,2,3,4,5,6'
64+
}
65+
].forEach(({ type, value, expected }) => {
66+
test(`should properly serialize contents when it is a ${type}`, (t, end) => {
67+
const { agent } = t.nr
68+
const embeddingEvent = new LlmEmbedding({
69+
agent,
70+
segment: null,
71+
transaction: null,
72+
request: { contents: value },
73+
response: {}
74+
})
75+
assert.equal(embeddingEvent.contents, expected)
76+
end()
77+
})
78+
})
79+
80+
test('should set error to true', (t, end) => {
81+
const { agent } = t.nr
82+
const req = {
83+
contents: 'This is my test contents',
84+
model: 'gemini-2.0-flash'
85+
}
86+
87+
const api = helper.getAgentApi()
88+
helper.runInTransaction(agent, () => {
89+
api.startSegment('fakeSegment', false, () => {
90+
const segment = api.shim.getActiveSegment()
91+
const embeddingEvent = new LlmEmbedding({
92+
agent,
93+
segment,
94+
request: req,
95+
response: res,
96+
withError: true
97+
})
98+
assert.equal(true, embeddingEvent.error)
99+
end()
100+
})
101+
})
102+
})
103+
104+
test('respects record_content', (t, end) => {
105+
const { agent } = t.nr
106+
const req = {
107+
contents: 'This is my test contents',
108+
model: 'gemini-2.0-flash'
109+
}
110+
agent.config.ai_monitoring.record_content.enabled = false
111+
112+
const api = helper.getAgentApi()
113+
helper.runInTransaction(agent, () => {
114+
const segment = api.shim.getActiveSegment()
115+
const embeddingEvent = new LlmEmbedding({
116+
agent,
117+
segment,
118+
request: req,
119+
response: res
120+
})
121+
assert.equal(embeddingEvent.contents, undefined)
122+
end()
123+
})
124+
})
125+
126+
// TODO: tokens tests?

0 commit comments

Comments
 (0)