@@ -2,7 +2,7 @@ import { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
22import { convertToModelMessages , streamText } from 'ai'
33import { ipcMain , IpcMainEvent } from 'electron'
44
5- import { setupAiModel } from './model'
5+ import { getGrafanaAssistantModel , getOpenAiModel } from './model'
66import { streamMessages } from './streamMessages'
77import { tools } from './tools'
88import { AiHandler , StreamChatRequest , AbortStreamChatRequest } from './types'
@@ -19,31 +19,47 @@ async function handleStreamChat(
1919 event : IpcMainEvent ,
2020 request : StreamChatRequest
2121) {
22- const aiModel = await setupAiModel ( )
22+ const provider = request . provider ?? 'openai'
2323 const messages = convertToModelMessages ( request . messages )
2424
2525 const abortController = new AbortController ( )
2626 activeAbortControllers . set ( request . id , abortController )
2727
2828 try {
29- const response = streamText ( {
30- model : aiModel ,
31- toolChoice : 'required' ,
32- messages,
33- tools,
34- abortSignal : abortController . signal ,
35- providerOptions : {
36- openai : {
37- parallelToolCalls : false ,
38- reasoningEffort : 'low' ,
39- textVerbosity : 'low' ,
40- // Disable storing of conversations, required for orgs with zero data retention
41- store : false ,
42- } satisfies OpenAIResponsesProviderOptions ,
43- } ,
44- } )
29+ if ( provider === 'grafana-assistant' ) {
30+ const aiModel = getGrafanaAssistantModel ( )
4531
46- await streamMessages ( event . sender , response , request . id )
32+ const response = streamText ( {
33+ model : aiModel ,
34+ toolChoice : 'required' ,
35+ messages,
36+ tools,
37+ abortSignal : abortController . signal ,
38+ } )
39+
40+ await streamMessages ( event . sender , response , request . id , false )
41+ } else {
42+ const aiModel = await getOpenAiModel ( )
43+
44+ const response = streamText ( {
45+ model : aiModel ,
46+ toolChoice : 'required' ,
47+ messages,
48+ tools,
49+ abortSignal : abortController . signal ,
50+ providerOptions : {
51+ openai : {
52+ parallelToolCalls : false ,
53+ reasoningEffort : 'low' ,
54+ textVerbosity : 'low' ,
55+ // Disable storing of conversations, required for orgs with zero data retention
56+ store : false ,
57+ } satisfies OpenAIResponsesProviderOptions ,
58+ } ,
59+ } )
60+
61+ await streamMessages ( event . sender , response , request . id , true )
62+ }
4763 } finally {
4864 // Clean up the AbortController after streaming completes or fails
4965 activeAbortControllers . delete ( request . id )
0 commit comments