Skip to content

Commit 5259671

Browse files
committed
all versioned tests ok except for streaming
1 parent 01ec03a commit 5259671

File tree

5 files changed

+40
-119
lines changed

5 files changed

+40
-119
lines changed

lib/instrumentation/@google/genai.js

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -154,28 +154,31 @@ function instrumentStream ({ agent, shim, request, response, segment, transactio
154154
// time it took to handle the stream
155155
segment.touch()
156156

157+
// result will be {value: undefined, done: true}
158+
// when the stream is done, so we need to create
159+
// a mock GenerateContentResponse object with
160+
// the entire message
161+
//
162+
// also need to enter this block if there was an
163+
// error, so we can record it
157164
if (result?.done || err) {
158-
// result will be {value: undefined, done: true}
159-
// when the stream is done, so we need to create
160-
// a mock GenerateContentResponse object with
161-
// the entire message
162-
//
163-
// also need to enter this block if there was an
164-
// error, so we can record it
165-
content.parts[0].text = entireMessage
166-
result.value = {
167-
candidates: [
168-
{ content, finishReason }
169-
],
170-
modelVersion
165+
if (content) {
166+
content.parts[0].text = entireMessage
167+
result.value = {
168+
candidates: [
169+
{ content, finishReason }
170+
],
171+
modelVersion
172+
}
171173
}
174+
172175
recordChatCompletionMessages({
173176
agent: shim.agent,
174177
shim,
175178
segment,
176179
transaction,
177180
request,
178-
response: result.value,
181+
response: result?.value,
179182
err
180183
})
181184
}

test/versioned/google-genai/chat-completions.test.js

Lines changed: 13 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ test.afterEach((ctx) => {
5959
})
6060

6161
test('should create span on successful models generateContent', (t, end) => {
62-
const { client, agent, host, port } = t.nr
62+
const { client, agent } = t.nr
6363
helper.runInTransaction(agent, async (tx) => {
6464
const result = await client.models.generateContent({
6565
model: 'gemini-2.0-flash',
@@ -69,20 +69,18 @@ test('should create span on successful models generateContent', (t, end) => {
6969
assert.equal(result.headers, undefined, 'should remove response headers from user result')
7070
assert.equal(result.candidates[0].content.parts[0].text, '1 plus 2 is 3.')
7171

72-
const name = `External/${host}:${port}/chat/completions`
7372
assertSegments(
7473
tx.trace,
7574
tx.trace.root,
76-
[GEMINI.COMPLETION, [name]],
75+
[GEMINI.COMPLETION],
7776
{ exact: false }
7877
)
7978

8079
tx.end()
8180
assertSpanKind({
8281
agent,
8382
segments: [
84-
{ name: GEMINI.COMPLETION, kind: 'internal' },
85-
{ name, kind: 'client' }
83+
{ name: GEMINI.COMPLETION, kind: 'internal' }
8684
]
8785
})
8886
end()
@@ -151,12 +149,14 @@ test('should create span on successful models generateContentStream', (t, end) =
151149
let chunk = {}
152150
let res = ''
153151
for await (chunk of stream) {
154-
res += chunk?.text
152+
assert.ok(chunk.text, 'should have text in chunk')
153+
res += chunk.text
155154
}
155+
156156
assert.equal(chunk.headers, undefined, 'should remove response headers from user result')
157157
assert.equal(chunk.candidates[0].content.role, 'model')
158158
const expectedRes = responses.get(content)
159-
assert.equal(chunk.candidates[0].content.parts[0].text, expectedRes.streamData)
159+
assert.equal(chunk.candidates[0].content.parts[0].text, expectedRes.candidates[0].content.parts[0].text)
160160
assert.equal(chunk.candidates[0].content.parts[0].text, res)
161161

162162
assertSegments(
@@ -186,16 +186,9 @@ test('should create chat completion message and summary for every message sent i
186186
})
187187

188188
let res = ''
189-
190-
let i = 0
191189
for await (const chunk of stream) {
192-
res += chunk?.text
193-
194-
// break stream
195-
if (i === 10) {
196-
break
197-
}
198-
i++
190+
assert.ok(chunk.text, 'should have text in chunk')
191+
res += chunk.text
199192
}
200193

201194
const events = agent.customEventAggregator.events.toArray()
@@ -246,7 +239,8 @@ test('should call the tokenCountCallback in streaming', (t, end) => {
246239
})
247240

248241
for await (const chunk of stream) {
249-
res += chunk.choices[0]?.delta?.content
242+
assert.ok(chunk.text, 'should have text in chunk')
243+
res += chunk.text
250244
}
251245

252246
const events = agent.customEventAggregator.events.toArray()
@@ -272,20 +266,15 @@ test('handles error in stream', (t, end) => {
272266
const content = 'bad stream'
273267
const model = 'gemini-2.0-flash'
274268
const stream = await client.models.generateContentStream({
275-
config: {
276-
maxOutputTokens: 100,
277-
temperature: 0.5
278-
},
279269
model,
280-
contents: [content, 'What does 1 plus 1 equal?'],
281-
stream: true
270+
contents: [content, 'What does 1 plus 1 equal?']
282271
})
283272

284273
let res = ''
285274

286275
try {
287276
for await (const chunk of stream) {
288-
res += chunk.choices[0]?.delta?.content
277+
if (chunk.text) res += chunk?.text
289278
}
290279
} catch (err) {
291280
assert.ok(res)

test/versioned/google-genai/embeddings.test.js

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ test.afterEach((ctx) => {
5656
})
5757

5858
test('should create span on successful embedding create', (t, end) => {
59-
const { client, agent, host, port } = t.nr
59+
const { client, agent } = t.nr
6060
helper.runInTransaction(agent, async (tx) => {
6161
const results = await client.models.embedContent({
6262
contents: 'This is an embedding test.',
@@ -65,11 +65,10 @@ test('should create span on successful embedding create', (t, end) => {
6565

6666
assert.equal(results.headers, undefined, 'should remove response headers from user result')
6767

68-
const name = `External/${host}:${port}/embeddings`
6968
assertSegments(
7069
tx.trace,
7170
tx.trace.root,
72-
[GEMINI.EMBEDDING, [name]],
71+
[GEMINI.EMBEDDING],
7372
{
7473
exact: false
7574
}
@@ -78,8 +77,7 @@ test('should create span on successful embedding create', (t, end) => {
7877
assertSpanKind({
7978
agent,
8079
segments: [
81-
{ name: GEMINI.EMBEDDING, kind: 'internal' },
82-
{ name, kind: 'client' }
80+
{ name: GEMINI.EMBEDDING, kind: 'internal' }
8381
]
8482
})
8583
end()

test/versioned/google-genai/mock-server.js

Lines changed: 4 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright 2023 New Relic Corporation. All rights reserved.
2+
* Copyright 2025 New Relic Corporation. All rights reserved.
33
* SPDX-License-Identifier: Apache-2.0
44
*/
55

@@ -8,9 +8,7 @@
88
module.exports = GoogleGenAIMockServer
99

1010
const http = require('node:http')
11-
const { Readable } = require('node:stream')
1211
const RESPONSES = require('./mock-responses')
13-
const crypto = require('crypto')
1412

1513
/**
1614
* Build a mock server that listens on a 127.0.0.1 and a random port that
@@ -68,80 +66,13 @@ function handler(req, res) {
6866

6967
const { code, body } = RESPONSES.get(prompt)
7068
res.statusCode = code
69+
res.write(JSON.stringify(body))
70+
res.end()
7171

72-
if (prompt.toLowerCase().includes('stream')) {
73-
const streamData = body.candidates[0].content.parts[0].text
74-
let asyncGen
75-
if (streamData !== 'do random') {
76-
asyncGen = finiteAsyncGen(streamData, { ...body })
77-
} else {
78-
asyncGen = randomAsyncGen({ ...body })
79-
}
80-
// return asyncGen
81-
82-
// Write each chunk from the async iterator to the response
83-
(async () => {
84-
try {
85-
for await (const chunk of asyncGen) {
86-
if (chunk?.text) res.write(chunk.text)
87-
}
88-
res.end()
89-
} catch (err) {
90-
res.destroy(err)
91-
}
92-
})()
93-
} else {
94-
res.write(JSON.stringify(body))
95-
res.end()
96-
}
72+
// TODO: Mock streaming responses
9773
})
9874
}
9975

100-
/**
101-
* Mocks the Google GenAI streaming API by returning a Promise that resolves
102-
* to an async generator, which yields response chunks as the real API would.
103-
*
104-
* @param {string} dataToStream The string to split and stream.
105-
* @param {object} chunkTemplate The template for each chunk.
106-
* @returns {object} An async generator that yields chunks of data.
107-
*/
108-
function finiteAsyncGen(dataToStream, chunkTemplate) {
109-
const parts = dataToStream.split(' ')
110-
const asyncGen = (async function * () {
111-
for (let i = 0; i < parts.length; i++) {
112-
const content = i === parts.length - 1 ? parts[i] : `${parts[i]} `
113-
const chunk = chunkTemplate
114-
chunk.candidates[0].content.parts[0].text = content
115-
chunk.text = content
116-
yield chunk
117-
}
118-
// End the stream
119-
yield undefined
120-
})()
121-
return asyncGen
122-
}
123-
124-
/**
125-
* Creates a stream that will stream an infinite number of GoogleGenAI stream data
126-
* chunks using an async generator.
127-
*
128-
* @param {object} chunkTemplate An object that is shaped like a GoogleGenAI stream
129-
* data object.
130-
* @returns {object} An async generator that yields chunks of data.
131-
*/
132-
function randomAsyncGen(chunkTemplate) {
133-
const asyncGen = (async function * () {
134-
while (true) {
135-
const data = crypto.randomBytes(16)
136-
// Deep clone to avoid mutating the original template
137-
const chunk = JSON.parse(JSON.stringify(chunkTemplate))
138-
chunk.value.candidates[0].content.parts[0].text = data.toString('base64')
139-
yield chunk
140-
}
141-
})()
142-
return asyncGen
143-
}
144-
14576
function getShortenedPrompt(reqBody) {
14677
try {
14778
const prompt = reqBody.contents?.[0]?.parts?.[0]?.text ||

test/versioned/google-genai/package.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,15 @@
99
"version": "0.0.0",
1010
"private": true,
1111
"engines": {
12-
"node": ">=18"
12+
"node": ">=20"
1313
},
1414
"tests": [
1515
{
1616
"engines": {
17-
"node": ">=18"
17+
"node": ">=20"
1818
},
1919
"dependencies": {
20-
"@google/genai": "^1.1.0"
20+
"@google/genai": "^1.2.0"
2121
},
2222
"files": [
2323
"chat-completions.test.js",

0 commit comments

Comments
 (0)