Skip to content

Commit d799964

Browse files
authored
Merge pull request #293 from Center-for-AI-Innovation/fix-fetch-models
Fetch only enabled models
2 parents 6f9560a + feaa736 commit d799964

File tree

11 files changed

+90
-45
lines changed

11 files changed

+90
-45
lines changed

src/app/api/chat/openaiFunctionCall/route.ts

+48-34
Original file line numberDiff line numberDiff line change
@@ -46,16 +46,20 @@ export async function POST(req: Request) {
4646
openaiKey: string
4747
} = await req.json()
4848

49-
let decryptedKey = openaiKey ?
50-
await decryptKeyIfNeeded(openaiKey) :
51-
process.env.VLADS_OPENAI_KEY
49+
// console.log('Received request with openaiKey:', openaiKey)
50+
let decryptedKey = openaiKey
51+
? await decryptKeyIfNeeded(openaiKey)
52+
: process.env.VLADS_OPENAI_KEY
5253

5354
if (!decryptedKey?.startsWith('sk-')) {
5455
decryptedKey = process.env.VLADS_OPENAI_KEY as string
5556
}
5657

58+
// console.log('Using key for function calling:', decryptedKey)
59+
5760
// Format messages
58-
const message_to_send: ChatCompletionMessageParam[] = conversationToMessages(conversation)
61+
const message_to_send: ChatCompletionMessageParam[] =
62+
conversationToMessages(conversation)
5963

6064
// Add system message
6165
const globalToolsSytemPromptPrefix =
@@ -97,70 +101,80 @@ export async function POST(req: Request) {
97101
})
98102

99103
if (!response.ok) {
104+
console.log('OpenAI API error:', response.status, response.statusText)
100105
return new Response(
101-
JSON.stringify({ error: `OpenAI API error: ${response.status}` }),
102-
{
106+
JSON.stringify({ error: `OpenAI API error: ${response.status}` }),
107+
{
103108
status: response.status,
104-
headers: { 'Content-Type': 'application/json' }
105-
}
109+
headers: { 'Content-Type': 'application/json' },
110+
},
106111
)
107112
}
108113

109114
const data = await response.json()
110115

111116
if (!data.choices) {
117+
console.log('No response from OpenAI')
112118
return new Response(
113-
JSON.stringify({ error: 'No response from OpenAI' }),
114-
{
119+
JSON.stringify({ error: 'No response from OpenAI' }),
120+
{
115121
status: 500,
116-
headers: { 'Content-Type': 'application/json' }
117-
}
122+
headers: { 'Content-Type': 'application/json' },
123+
},
118124
)
119125
}
120126

121127
if (!data.choices[0]?.message.tool_calls) {
128+
console.log('No tool calls from OpenAI')
122129
return new Response(
123130
JSON.stringify({
124-
choices: [{
125-
message: {
126-
content: 'No tools invoked by OpenAI',
127-
role: 'assistant'
128-
}
129-
}]
131+
choices: [
132+
{
133+
message: {
134+
content: 'No tools invoked by OpenAI',
135+
role: 'assistant',
136+
},
137+
},
138+
],
130139
}),
131140
{
132141
status: 200,
133-
headers: { 'Content-Type': 'application/json' }
134-
}
142+
headers: { 'Content-Type': 'application/json' },
143+
},
135144
)
136145
}
137146

138147
const toolCalls = data.choices[0].message.tool_calls
139148

140149
return new Response(
141150
JSON.stringify({
142-
choices: [{
143-
message: {
144-
content: JSON.stringify(toolCalls),
145-
role: 'assistant',
146-
tool_calls: toolCalls
147-
}
148-
}]
151+
choices: [
152+
{
153+
message: {
154+
content: JSON.stringify(toolCalls),
155+
role: 'assistant',
156+
tool_calls: toolCalls,
157+
},
158+
},
159+
],
149160
}),
150161
{
151162
status: 200,
152-
headers: { 'Content-Type': 'application/json' }
153-
}
163+
headers: { 'Content-Type': 'application/json' },
164+
},
154165
)
155166
} catch (error) {
156167
return new Response(
157-
JSON.stringify({
158-
error: error instanceof Error ? error.message : 'An unexpected error occurred'
168+
JSON.stringify({
169+
error:
170+
error instanceof Error
171+
? error.message
172+
: 'An unexpected error occurred',
159173
}),
160-
{
174+
{
161175
status: 500,
162-
headers: { 'Content-Type': 'application/json' }
163-
}
176+
headers: { 'Content-Type': 'application/json' },
177+
},
164178
)
165179
}
166180
}

src/utils/modelProviders/NCSAHosted.ts

+5-1
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,18 @@ import {
33
ProviderNames,
44
} from '~/utils/modelProviders/LLMProvider'
55
import { OllamaModels, OllamaModelIDs } from './ollama'
6-
import { ChatBody } from '~/types/chat'
76

87
export const getNCSAHostedModels = async (
98
ncsaHostedProvider: NCSAHostedProvider,
109
): Promise<NCSAHostedProvider> => {
1110
delete ncsaHostedProvider.error // Remove the error property if it exists
1211
ncsaHostedProvider.provider = ProviderNames.NCSAHosted
1312

13+
if (!ncsaHostedProvider.enabled) {
14+
ncsaHostedProvider.models = []
15+
return ncsaHostedProvider
16+
}
17+
1418
// Store existing model states
1519
const existingModelStates = new Map<
1620
string,

src/utils/modelProviders/WebLLM.ts

+5-1
Original file line numberDiff line numberDiff line change
@@ -299,7 +299,11 @@ export const getWebLLMModels = async (
299299
webLLMProvider: WebLLMProvider,
300300
): Promise<WebLLMProvider> => {
301301
webLLMProvider.provider = ProviderNames.WebLLM
302-
if (!webLLMProvider.models || webLLMProvider.models.length === 0) {
302+
if (
303+
!webLLMProvider.models ||
304+
webLLMProvider.models.length === 0 ||
305+
!webLLMProvider.enabled
306+
) {
303307
// If no models, add all models but only enable Llama 3 8b, Phi 3 mini, and Gemma 2b by default
304308
webLLMProvider.models = webLLMModels.map((model) => ({
305309
...model,

src/utils/modelProviders/azure.ts

+7-1
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,11 @@ export const getAzureModels = async (
127127
delete azureProvider.error // Clear previous errors if any.
128128
azureProvider.provider = ProviderNames.Azure
129129
try {
130-
if (!azureProvider.AzureEndpoint || !azureProvider.apiKey) {
130+
if (
131+
!azureProvider.AzureEndpoint ||
132+
!azureProvider.apiKey ||
133+
!azureProvider.enabled
134+
) {
131135
// azureProvider.error = `Azure OpenAI Endpoint or Deployment is not set. Endpoint: ${azureProvider.AzureEndpoint}, Deployment: ${azureProvider.AzureDeployment}`
132136
azureProvider.models = [] // clear any previous models.
133137
return azureProvider
@@ -138,6 +142,8 @@ export const getAzureModels = async (
138142
: azureProvider.AzureEndpoint
139143
const url = `${baseUrl}/openai/deployments?api-version=${OPENAI_API_VERSION}`
140144

145+
// console.log('Fetching Azure models from:', url)
146+
141147
const response = await fetch(url, {
142148
method: 'GET',
143149
headers: {

src/utils/modelProviders/ollama.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ export const getOllamaModels = async (
128128
delete ollamaProvider.error // Remove the error property if it exists
129129
ollamaProvider.provider = ProviderNames.Ollama
130130
try {
131-
if (!ollamaProvider.baseUrl) {
131+
if (!ollamaProvider.baseUrl || !ollamaProvider.enabled) {
132132
// Don't error here, too confusing for users.
133133
// ollamaProvider.error = `Ollama Base Url is not defined, please set it to the URL that points to your Ollama instance.`
134134
ollamaProvider.models = [] // clear any previous models.

src/utils/modelProviders/routes/anthropic.ts

+5-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,11 @@ export const getAnthropicModels = async (
1111
anthropicProvider.provider = ProviderNames.Anthropic
1212
delete anthropicProvider.error // Clear any previous errors
1313

14-
if (!anthropicProvider.apiKey || anthropicProvider.apiKey === '') {
14+
if (
15+
!anthropicProvider.apiKey ||
16+
anthropicProvider.apiKey === '' ||
17+
!anthropicProvider.enabled
18+
) {
1519
// Don't show any error here... too confusing for users.
1620
anthropicProvider.models = []
1721
return anthropicProvider

src/utils/modelProviders/routes/bedrock.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ export const getBedrockModels = async (
1717
!bedrockProvider.region ||
1818
bedrockProvider.accessKeyId === '' ||
1919
bedrockProvider.secretAccessKey === '' ||
20-
bedrockProvider.region === ''
20+
bedrockProvider.region === '' ||
21+
!bedrockProvider.enabled
2122
) {
2223
// Don't show any error here... too confusing for users.
2324
bedrockProvider.models = []

src/utils/modelProviders/routes/gemini.ts

+6-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { type GeminiProvider, ProviderNames } from '../LLMProvider'
22
import {
3-
type GeminiModel,
4-
GeminiModelID,
3+
type GeminiModelID,
54
GeminiModels,
65
preferredGeminiModelIds,
76
} from '../types/gemini'
@@ -17,7 +16,11 @@ export const getGeminiModels = async (
1716
geminiProvider.provider = ProviderNames.Gemini
1817
delete geminiProvider.error // Clear any previous errors
1918

20-
if (!geminiProvider.apiKey || geminiProvider.apiKey === '') {
19+
if (
20+
!geminiProvider.apiKey ||
21+
geminiProvider.apiKey === '' ||
22+
!geminiProvider.enabled
23+
) {
2124
// Don't show any error here... too confusing for users.
2225
geminiProvider.models = []
2326
return geminiProvider

src/utils/modelProviders/routes/openai.ts

+5-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,11 @@ export const getOpenAIModels = async (
1414
// 1. Use all passed-in models that are ALSO available from the endpoint...
1515
// If no passed-in models, then use all. Enabled by default, following our types spec.
1616

17-
if (!openAIProvider.apiKey || openAIProvider.apiKey === undefined) {
17+
if (
18+
!openAIProvider.apiKey ||
19+
openAIProvider.apiKey === undefined ||
20+
!openAIProvider.enabled
21+
) {
1822
// No error here, too confusing for users.
1923
// openAIProvider.error = 'OpenAI API Key is not set.'
2024
openAIProvider.models = [] // clear any previous models.

src/utils/modelProviders/routes/sambanova.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { type SambaNovaProvider, ProviderNames } from '../LLMProvider'
22
import {
33
SambaNovaModels,
44
type SambaNovaModel,
5-
SambaNovaModelID,
5+
type SambaNovaModelID,
66
SambanovaPreferredModelIDs,
77
} from '../types/SambaNova'
88

src/utils/modelProviders/types/NCSAHostedVLM.ts

+5
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,11 @@ export const getNCSAHostedVLMModels = async (
5252
delete vlmProvider.error // Clear any previous errors
5353
vlmProvider.provider = ProviderNames.NCSAHostedVLM
5454

55+
if (!vlmProvider.enabled) {
56+
vlmProvider.models = []
57+
return vlmProvider
58+
}
59+
5560
// Store existing model states
5661
const existingModelStates = new Map<
5762
string,

0 commit comments

Comments
 (0)