Skip to content

Commit fcab45a

Browse files
authored
Merge pull request #290 from Center-for-AI-Innovation/fix-tools-api-key
Fix fetch logic of OpenAI key for tool calling
2 parents fd22a01 + bb06bac commit fcab45a

File tree

2 files changed

+75
-52
lines changed

2 files changed

+75
-52
lines changed

src/components/Chat/Chat.tsx

Lines changed: 63 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -312,25 +312,26 @@ export const Chat = memo(
312312
body: JSON.stringify({
313313
projectName: courseName,
314314
}),
315-
});
315+
})
316316

317317
if (!response.ok) {
318-
throw new Error('Failed to fetch LLM providers');
318+
throw new Error('Failed to fetch LLM providers')
319319
}
320320

321-
const data = await response.json();
322-
llmProviders = data;
321+
const data = await response.json()
322+
llmProviders = data
323323

324324
if (!llmProviders) {
325-
throw new Error('No LLM providers returned from API');
325+
throw new Error('No LLM providers returned from API')
326326
}
327327
} catch (error) {
328-
console.error('Error fetching LLM providers:', error);
328+
console.error('Error fetching LLM providers:', error)
329329
errorToast({
330330
title: 'Website Error - Please refresh the page',
331-
message: 'Failed to fetch LLM providers. Please refresh the page and try again.',
332-
});
333-
return;
331+
message:
332+
'Failed to fetch LLM providers. Please refresh the page and try again.',
333+
})
334+
return
334335
}
335336
}
336337

@@ -367,8 +368,8 @@ export const Chat = memo(
367368
message.contexts = []
368369
message.content = Array.isArray(message.content)
369370
? message.content.filter(
370-
(content) => content.type !== 'tool_image_url',
371-
)
371+
(content) => content.type !== 'tool_image_url',
372+
)
372373
: message.content
373374

374375
const updatedMessages = [...(selectedConversation.messages || [])]
@@ -571,12 +572,12 @@ export const Chat = memo(
571572
.map((msg) => {
572573
const contentText = Array.isArray(msg.content)
573574
? msg.content
574-
.filter(
575-
(content) =>
576-
content.type === 'text' && content.text,
577-
)
578-
.map((content) => content.text!)
579-
.join(' ')
575+
.filter(
576+
(content) =>
577+
content.type === 'text' && content.text,
578+
)
579+
.map((content) => content.text!)
580+
.join(' ')
580581
: typeof msg.content === 'string'
581582
? msg.content
582583
: ''
@@ -591,12 +592,12 @@ export const Chat = memo(
591592
.map((msg) => {
592593
const contentText = Array.isArray(msg.content)
593594
? msg.content
594-
.filter(
595-
(content) =>
596-
content.type === 'text' && content.text,
597-
)
598-
.map((content) => content.text!)
599-
.join(' ')
595+
.filter(
596+
(content) =>
597+
content.type === 'text' && content.text,
598+
)
599+
.map((content) => content.text!)
600+
.join(' ')
600601
: typeof msg.content === 'string'
601602
? msg.content
602603
: ''
@@ -628,13 +629,13 @@ export const Chat = memo(
628629
? msg.content.trim()
629630
: Array.isArray(msg.content)
630631
? msg.content
631-
.map((c) => c.text)
632-
.join(' ')
633-
.trim()
632+
.map((c) => c.text)
633+
.join(' ')
634+
.trim()
634635
: '',
635636
})),
636637
},
637-
key: getOpenAIKey(courseMetadata, apiKey),
638+
key: getOpenAIKey(llmProviders, courseMetadata, apiKey),
638639
course_name: courseName,
639640
stream: false,
640641
courseMetadata: courseMetadata,
@@ -752,7 +753,7 @@ export const Chat = memo(
752753
// Check if the response is NO_REWRITE_REQUIRED or if we couldn't extract a valid query
753754
if (
754755
rewrittenQuery.trim().toUpperCase() ===
755-
'NO_REWRITE_REQUIRED' ||
756+
'NO_REWRITE_REQUIRED' ||
756757
!extractedQuery
757758
) {
758759
console.log(
@@ -812,7 +813,7 @@ export const Chat = memo(
812813
imageUrls,
813814
imgDesc,
814815
updatedConversation,
815-
getOpenAIKey(courseMetadata, apiKey),
816+
getOpenAIKey(llmProviders, courseMetadata, apiKey),
816817
)
817818
homeDispatch({ field: 'isRouting', value: false })
818819
if (uiucToolsToRun.length > 0) {
@@ -838,7 +839,7 @@ export const Chat = memo(
838839

839840
const finalChatBody: ChatBody = {
840841
conversation: updatedConversation,
841-
key: getOpenAIKey(courseMetadata, apiKey),
842+
key: getOpenAIKey(llmProviders, courseMetadata, apiKey),
842843
course_name: courseName,
843844
stream: true,
844845
courseMetadata: courseMetadata,
@@ -913,10 +914,18 @@ export const Chat = memo(
913914
// Check if response is ok before proceeding
914915
if (!response.ok) {
915916
const errorData = await response.json()
916-
console.log('Chat.txs --- errorData from /api/allNewRoutingChat', errorData)
917+
console.log(
918+
'Chat.txs --- errorData from /api/allNewRoutingChat',
919+
errorData,
920+
)
917921
// Read our custom error object. But normal errors are captured too via errorData.error.
918-
const customError = new Error(errorData.message || errorData.error || 'The LLM might be overloaded or misconfigured. Please check your API key, or use a different LLM.')
919-
; (customError as any).title = errorData.title || 'LLM Didn\'t Respond'
922+
const customError = new Error(
923+
errorData.message ||
924+
errorData.error ||
925+
'The LLM might be overloaded or misconfigured. Please check your API key, or use a different LLM.',
926+
)
927+
;(customError as any).title =
928+
errorData.title || "LLM Didn't Respond"
920929
throw customError
921930
}
922931
} catch (error) {
@@ -926,7 +935,10 @@ export const Chat = memo(
926935

927936
errorToast({
928937
title: (error as any).title || 'Error',
929-
message: error instanceof Error ? error.message : 'An unexpected error occurred',
938+
message:
939+
error instanceof Error
940+
? error.message
941+
: 'An unexpected error occurred',
930942
})
931943
return
932944
}
@@ -937,7 +949,10 @@ export const Chat = memo(
937949

938950
errorToast({
939951
title: (error as any).title || 'Error',
940-
message: error instanceof Error ? error.message : 'An unexpected error occurred',
952+
message:
953+
error instanceof Error
954+
? error.message
955+
: 'An unexpected error occurred',
941956
})
942957
return
943958
}
@@ -951,12 +966,15 @@ export const Chat = memo(
951966
const errorData = await response.json()
952967
errorToast({
953968
title: errorData.title || 'Error',
954-
message: errorData.message || 'There was an unexpected error calling the LLM. Try using a different model.',
969+
message:
970+
errorData.message ||
971+
'There was an unexpected error calling the LLM. Try using a different model.',
955972
})
956973
} catch (error) {
957974
errorToast({
958975
title: 'Error',
959-
message: 'There was an unexpected error calling the LLM. Try using a different model.',
976+
message:
977+
'There was an unexpected error calling the LLM. Try using a different model.',
960978
})
961979
}
962980
return
@@ -1574,13 +1592,13 @@ export const Chat = memo(
15741592

15751593
const statements =
15761594
courseMetadata?.example_questions &&
1577-
courseMetadata.example_questions.length > 0
1595+
courseMetadata.example_questions.length > 0
15781596
? courseMetadata.example_questions
15791597
: [
1580-
'Make a bullet point list of key takeaways from this project.',
1581-
'What are the best practices for [Activity or Process] in [Context or Field]?',
1582-
'Can you explain the concept of [Specific Concept] in simple terms?',
1583-
]
1598+
'Make a bullet point list of key takeaways from this project.',
1599+
'What are the best practices for [Activity or Process] in [Context or Field]?',
1600+
'Can you explain the concept of [Specific Concept] in simple terms?',
1601+
]
15841602

15851603
// Add this function to create dividers with statements
15861604
const renderIntroductoryStatements = () => {
@@ -1895,8 +1913,8 @@ export const Chat = memo(
18951913
transition={{ duration: 0.1 }}
18961914
>
18971915
{selectedConversation &&
1898-
selectedConversation.messages &&
1899-
selectedConversation.messages?.length === 0 ? (
1916+
selectedConversation.messages &&
1917+
selectedConversation.messages?.length === 0 ? (
19001918
<>
19011919
<div className="mt-16">
19021920
{renderIntroductoryStatements()}
@@ -1914,7 +1932,7 @@ export const Chat = memo(
19141932
handleSend(
19151933
editedMessage,
19161934
selectedConversation?.messages?.length -
1917-
index,
1935+
index,
19181936
null,
19191937
tools,
19201938
enabledDocumentGroups,

src/utils/streamProcessing.ts

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import {
2323
type BedrockProvider,
2424
type GeminiProvider,
2525
type SambaNovaProvider,
26+
ProviderNames,
2627
} from '~/utils/modelProviders/LLMProvider'
2728
import fetchMQRContexts from '~/pages/api/getContextsMQR'
2829
import fetchContexts from '~/pages/api/getContexts'
@@ -46,7 +47,6 @@ import { runGeminiChat } from '~/app/api/chat/gemini/route'
4647
import { runBedrockChat } from '~/app/api/chat/bedrock/route'
4748
import { runSambaNovaChat } from '~/app/api/chat/sambanova/route'
4849

49-
5050
/**
5151
* Enum representing the possible states of the state machine used in processing text chunks.
5252
*/
@@ -700,12 +700,12 @@ export async function handleImageContent(
700700
)
701701

702702
if (imgDescIndex !== -1) {
703-
; (message.content as Content[])[imgDescIndex] = {
703+
;(message.content as Content[])[imgDescIndex] = {
704704
type: 'text',
705705
text: `Image description: ${imgDesc}`,
706706
}
707707
} else {
708-
; (message.content as Content[]).push({
708+
;(message.content as Content[]).push({
709709
type: 'text',
710710
text: `Image description: ${imgDesc}`,
711711
})
@@ -719,13 +719,17 @@ export async function handleImageContent(
719719
}
720720

721721
export const getOpenAIKey = (
722+
llmProviders: AllLLMProviders,
722723
courseMetadata: CourseMetadata,
723724
userApiKey: string,
724725
) => {
725726
const key =
726-
courseMetadata?.openai_api_key && courseMetadata?.openai_api_key != ''
727-
? courseMetadata.openai_api_key
727+
llmProviders[ProviderNames.OpenAI]?.apiKey &&
728+
llmProviders[ProviderNames.OpenAI]?.apiKey != ''
729+
? llmProviders[ProviderNames.OpenAI]?.apiKey
728730
: userApiKey
731+
// console.log('OpenAI key found for getOpenAIKey:', key)
732+
// console.log('llmProviders:', llmProviders)
729733
return key
730734
}
731735

@@ -779,10 +783,11 @@ export const routeModelRequest = async (
779783
NOTE: WebLLM is handled separately, because it MUST be called from the Client browser itself.
780784
*/
781785

782-
console.debug('In routeModelRequest: ', chatBody, baseUrl)
786+
// console.debug('In routeModelRequest: ', chatBody, baseUrl)
787+
// console.debug('In routeModelRequest: ', baseUrl)
783788

784789
const selectedConversation = chatBody.conversation!
785-
console.debug('Selected conversation:', selectedConversation)
790+
// console.debug('Selected conversation:', selectedConversation)
786791
if (!selectedConversation.model || !selectedConversation.model.id) {
787792
console.debug('Invalid conversation:', selectedConversation)
788793
throw new Error('Conversation model is undefined or missing "id" property.')

0 commit comments

Comments
 (0)