Skip to content

Commit b1ffd6d

Browse files
committed
fix(autocorrelation): Handle streaming errors in AI chat handler
Errors from streamText (e.g., OpenAI quota exceeded, invalid prompt) were not caught in handleStreamChat, causing unhandled rejections. The renderer never received StreamChatEnd, leaving the UI in a broken state. Now errors are caught, forwarded as error chunks, and StreamChatEnd is always sent. Also prevents NoOutputGeneratedError from propagating when response.usage is rejected after a stream error. Fixes K6-STUDIO-VP
1 parent 7e122fe commit b1ffd6d

File tree

3 files changed

+153
-4
lines changed

3 files changed

+153
-4
lines changed

src/handlers/ai/index.test.ts

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
/* eslint-disable @typescript-eslint/unbound-method */
2+
import { UIMessage } from 'ai'
3+
import { afterEach, describe, expect, it, vi } from 'vitest'
4+
5+
import { AiHandler, StreamChatChunk, StreamChatEnd } from './types'
6+
7+
vi.mock('electron', () => ({
8+
ipcMain: {
9+
on: vi.fn(),
10+
},
11+
}))
12+
13+
vi.mock('./model', () => ({
14+
getOpenAiModel: vi.fn(),
15+
getGrafanaAssistantModel: vi.fn(),
16+
}))
17+
18+
vi.mock('./a2a/assistantAuth', () => ({
19+
initialize: vi.fn(),
20+
}))
21+
22+
function createMockWebContents() {
23+
return { send: vi.fn() } as unknown as Electron.WebContents
24+
}
25+
26+
function createUserMessage(text: string): UIMessage {
27+
return {
28+
id: 'msg-1',
29+
role: 'user',
30+
parts: [{ type: 'text', text }],
31+
}
32+
}
33+
34+
async function getStreamChatHandler() {
35+
const { ipcMain } = await import('electron')
36+
const { initialize } = await import('./index')
37+
initialize()
38+
39+
const calls = vi.mocked(ipcMain.on).mock.calls
40+
const entry = calls.find(
41+
([channel]) => channel === (AiHandler.StreamChat as string)
42+
)
43+
return entry![1] as (...args: unknown[]) => Promise<void>
44+
}
45+
46+
function getSentChunks(webContents: Electron.WebContents) {
47+
return vi
48+
.mocked(webContents.send)
49+
.mock.calls.filter(
50+
(call): call is [string, StreamChatChunk] =>
51+
call[0] === (AiHandler.StreamChatChunk as string)
52+
)
53+
.map(([, data]) => data)
54+
}
55+
56+
function getSentEnds(webContents: Electron.WebContents) {
57+
return vi
58+
.mocked(webContents.send)
59+
.mock.calls.filter(
60+
(call): call is [string, StreamChatEnd] =>
61+
call[0] === (AiHandler.StreamChatEnd as string)
62+
)
63+
.map(([, data]) => data)
64+
}
65+
66+
describe('handleStreamChat error handling', () => {
67+
afterEach(() => {
68+
vi.restoreAllMocks()
69+
vi.resetModules()
70+
})
71+
72+
it('forwards the error chunk and sends StreamChatEnd when the model throws', async () => {
73+
const { getOpenAiModel } = await import('./model')
74+
vi.mocked(getOpenAiModel).mockResolvedValue({
75+
specificationVersion: 'v2',
76+
provider: 'test',
77+
modelId: 'test',
78+
supportedUrls: {},
79+
// eslint-disable-next-line @typescript-eslint/require-await
80+
doStream: async () => {
81+
throw new Error('insufficient_quota: You exceeded your current quota')
82+
},
83+
// eslint-disable-next-line @typescript-eslint/require-await
84+
doGenerate: async () => {
85+
throw new Error('not implemented')
86+
},
87+
})
88+
89+
const handler = await getStreamChatHandler()
90+
const webContents = createMockWebContents()
91+
92+
await handler(
93+
{ sender: webContents },
94+
{
95+
id: 'req-1',
96+
trigger: 'submit-message',
97+
messages: [createUserMessage('Hello')],
98+
}
99+
)
100+
101+
// The AI SDK's toUIMessageStream emits an error chunk with the real error
102+
const chunks = getSentChunks(webContents)
103+
const errorChunk = chunks.find((data) => data.chunk?.type === 'error')
104+
expect(errorChunk).toBeDefined()
105+
expect(errorChunk?.chunk).toHaveProperty('errorText')
106+
107+
// StreamChatEnd must always be sent so the renderer doesn't hang
108+
const ends = getSentEnds(webContents)
109+
expect(ends).toHaveLength(1)
110+
expect(ends[0]?.id).toBe('req-1')
111+
})
112+
113+
it('sends an error chunk and StreamChatEnd when message conversion fails', async () => {
114+
const handler = await getStreamChatHandler()
115+
const webContents = createMockWebContents()
116+
117+
await handler(
118+
{ sender: webContents },
119+
{
120+
id: 'req-2',
121+
trigger: 'submit-message',
122+
// Messages with null parts cause convertToModelMessages to throw
123+
messages: [
124+
{ role: 'user', id: 'x', parts: null } as unknown as UIMessage,
125+
],
126+
}
127+
)
128+
129+
// Should send an error chunk from the catch block
130+
const chunks = getSentChunks(webContents)
131+
const errorChunk = chunks.find((data) => data.chunk?.type === 'error')
132+
expect(errorChunk).toBeDefined()
133+
134+
// StreamChatEnd must always be sent
135+
const ends = getSentEnds(webContents)
136+
expect(ends).toHaveLength(1)
137+
expect(ends[0]?.id).toBe('req-2')
138+
})
139+
})

src/handlers/ai/index.ts

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
2+
import { getErrorMessage } from '@ai-sdk/provider-utils'
23
import { convertToModelMessages, streamText } from 'ai'
34
import { ipcMain, IpcMainEvent } from 'electron'
45

@@ -22,12 +23,12 @@ async function handleStreamChat(
2223
request: StreamChatRequest
2324
) {
2425
const provider = request.provider ?? 'openai'
25-
const messages = convertToModelMessages(request.messages)
26-
2726
const abortController = new AbortController()
2827
activeAbortControllers.set(request.id, abortController)
2928

3029
try {
30+
const messages = convertToModelMessages(request.messages)
31+
3132
if (provider === 'grafana-assistant') {
3233
const aiModel = getGrafanaAssistantModel()
3334

@@ -62,8 +63,15 @@ async function handleStreamChat(
6263

6364
await streamMessages(event.sender, response, request.id, true)
6465
}
66+
} catch (error) {
67+
event.sender.send(AiHandler.StreamChatChunk, {
68+
id: request.id,
69+
chunk: { type: 'error', errorText: getErrorMessage(error) },
70+
})
71+
event.sender.send(AiHandler.StreamChatEnd, {
72+
id: request.id,
73+
})
6574
} finally {
66-
// Clean up the AbortController after streaming completes or fails
6775
activeAbortControllers.delete(request.id)
6876
}
6977
}

src/handlers/ai/streamMessages.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,9 @@ export async function streamMessages<Tools extends ToolSet, PARTIAL_OUTPUT>(
1717
})
1818
}
1919

20-
const usageData = includeUsage ? await response.usage : undefined
20+
const usageData = includeUsage
21+
? await response.usage.catch(() => undefined)
22+
: undefined
2123

2224
webContents.send(AiHandler.StreamChatEnd, {
2325
id: requestId,

0 commit comments

Comments
 (0)