Skip to content

Commit 89d5f7b

Browse files
committed
wip: handles error in stream test
1 parent f7a95d9 commit 89d5f7b

File tree

3 files changed

+60
-10
lines changed

3 files changed

+60
-10
lines changed

test/versioned/openai/chat-completions-v5.test.js

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -408,6 +408,48 @@ test('responses.create', async (t) => {
408408
})
409409
})
410410

411+
await t.test('handles error in stream', (t, end) => {
412+
const { client, agent } = t.nr
413+
helper.runInTransaction(agent, async (tx) => {
414+
const content = 'bad stream'
415+
const model = 'gpt-4'
416+
417+
try {
418+
const stream = await client.responses.create({
419+
max_tokens: 100,
420+
temperature: 0.5,
421+
model,
422+
input: [
423+
{ role: 'user', content },
424+
{ role: 'user', content: 'What does 1 plus 1 equal?' }
425+
],
426+
stream: true
427+
})
428+
for await (const chunk of stream) {
429+
continue
430+
}
431+
} catch (err) {
432+
assert.ok(err.message, 'exceeded count')
433+
const events = agent.customEventAggregator.events.toArray()
434+
assert.equal(events.length, 4)
435+
const chatSummary = events.filter(([{ type }]) => type === 'LlmChatCompletionSummary')[0]
436+
assertChatCompletionSummary({ tx, model, chatSummary, error: true })
437+
assert.equal(tx.exceptions.length, 1)
438+
// only asserting message and completion_id as the rest of the attrs
439+
// are asserted in other tests
440+
match(tx.exceptions[0], {
441+
customAttributes: {
442+
'error.message': /terminated|Premature close/,
443+
completion_id: /\w{32}/
444+
}
445+
})
446+
447+
tx.end()
448+
end()
449+
}
450+
})
451+
})
452+
411453
await t.test('should not create llm events when ai_monitoring.streaming.enabled is false', (t, end) => {
412454
const { client, agent } = t.nr
413455
agent.config.ai_monitoring.streaming.enabled = false

test/versioned/openai/mock-server-v5.js

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ module.exports = openaiMockServer
99

1010
const http = require('node:http')
1111
const RESPONSES = require('./mock-responses-api-responses')
12-
const STREAM_CHUNKS = require('./stream-chunks-v5')
12+
const { chunks, errorChunk } = require('./stream-chunks-v5')
1313
const { Readable } = require('node:stream')
1414

1515
/**
@@ -79,11 +79,7 @@ function handler(req, res) {
7979
} else {
8080
outStream = new Readable({
8181
read() {
82-
for (let i = 0; i < STREAM_CHUNKS.length; i++) {
83-
const chunkString = JSON.stringify(STREAM_CHUNKS[i])
84-
this.push(`data: ${chunkString}\n\n`)
85-
// TODO: send an error chunk instead
86-
}
82+
this.push(`data: ${JSON.stringify(errorChunk)}\n\n`)
8783
this.push('data: [DONE]\n\n')
8884
this.push(null)
8985
}
@@ -109,8 +105,8 @@ function finiteStream() {
109105
return new Readable({
110106
read() {
111107
// This is how the data is streamed from openai
112-
for (let i = 0; i < STREAM_CHUNKS.length; i++) {
113-
const chunkString = JSON.stringify(STREAM_CHUNKS[i])
108+
for (let i = 0; i < chunks.length; i++) {
109+
const chunkString = JSON.stringify(chunks[i])
114110
this.push(`data: ${chunkString}\n\n`)
115111
}
116112
this.push('data: [DONE]\n\n')

test/versioned/openai/stream-chunks-v5.js

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,19 @@
55

66
'use strict'
77

8-
const chunks = []
9-
module.exports = chunks
8+
const errorChunk = {
9+
response: {
10+
message: 'fetch failed',
11+
stack: 'TypeError: fetch failed',
12+
cause: {
13+
code: 'BAD_STREAM',
14+
reason: 'internal error',
15+
library: 'opeani',
16+
}
17+
}
18+
}
1019

20+
const chunks = []
1121
// Setup chunks
1222
chunks.push({
1323
response: {
@@ -139,3 +149,5 @@ chunks.push({
139149
sequence_number: 9,
140150
type: 'response_completed',
141151
})
152+
153+
module.exports = { chunks, errorChunk }

0 commit comments

Comments
 (0)