Skip to content

Commit 766b283

Browse files
fix: add deepseek to all Record<SupportedProvider> types
- Add deepseek entries to error parsers/mappers in errors.ts - Add fetchDeepSeekModels function and registry entries in routes.models.ts - Add deepseek API key fallback and model fetcher - Add deepseek to displayNames and providerEnvVars in seed.ts - Add deepseek to pricesByProvider and rulesByProvider in optimization-rule.ts - Add deepseek to preferredSourcePrefixes in models-dev-client.ts
1 parent 059fbfc commit 766b283

File tree

5 files changed

+47
-0
lines changed

5 files changed

+47
-0
lines changed

platform/backend/src/clients/models-dev-client.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,7 @@ class ModelsDevClient {
406406
anthropic: ["anthropic/"],
407407
cohere: ["cohere/"],
408408
cerebras: ["cerebras/"],
409+
deepseek: ["deepseek/"],
409410
mistral: ["mistral/"],
410411
bedrock: ["amazon-bedrock/"],
411412
ollama: ["ollama/"],

platform/backend/src/database/seed.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ async function seedChatApiKeysFromEnv(): Promise<void> {
317317
gemini: config.chat.gemini.apiKey,
318318
cerebras: config.chat.cerebras.apiKey,
319319
cohere: config.chat.cohere.apiKey,
320+
deepseek: config.chat.deepseek.apiKey,
320321
mistral: config.chat.mistral.apiKey,
321322
ollama: config.chat.ollama.apiKey,
322323
vllm: config.chat.vllm.apiKey,
@@ -405,6 +406,7 @@ function getProviderDisplayName(provider: SupportedProvider): string {
405406
gemini: "Google",
406407
cerebras: "Cerebras",
407408
cohere: "Cohere",
409+
deepseek: "DeepSeek",
408410
mistral: "Mistral",
409411
ollama: "Ollama",
410412
vllm: "vLLM",

platform/backend/src/models/optimization-rule.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,7 @@ class OptimizationRuleModel {
272272
gemini: [],
273273
cohere: [],
274274
cerebras: [],
275+
deepseek: [], // DeepSeek model pricing varies, so no defaults
275276
mistral: [],
276277
vllm: [], // vLLM model pricing varies by deployment, so no defaults
277278
ollama: [], // Ollama model pricing varies by deployment, so no defaults
@@ -306,6 +307,7 @@ class OptimizationRuleModel {
306307
gemini: [],
307308
cohere: [],
308309
cerebras: [],
310+
deepseek: [], // DeepSeek optimization rules are deployment-specific, no defaults
309311
mistral: [],
310312
vllm: [], // vLLM optimization rules are deployment-specific, no defaults
311313
ollama: [], // Ollama optimization rules are deployment-specific, no defaults

platform/backend/src/routes/chat/errors.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1107,6 +1107,7 @@ const providerParsers: Record<SupportedProvider, ErrorParser> = {
11071107
bedrock: parseBedrockError,
11081108
cerebras: parseOpenAIError, // Cerebras uses OpenAI-compatible API
11091109
cohere: parseCohereError,
1110+
deepseek: parseOpenAIError, // DeepSeek uses OpenAI-compatible API
11101111
mistral: parseOpenAIError, // Mistral uses OpenAI-compatible API
11111112
vllm: parseVllmError,
11121113
ollama: parseOllamaError,
@@ -1125,6 +1126,7 @@ const providerMappers: Record<SupportedProvider, ErrorMapper> = {
11251126
bedrock: mapBedrockErrorWrapper,
11261127
cerebras: mapOpenAIErrorWrapper, // Cerebras uses OpenAI-compatible API
11271128
cohere: mapCohereErrorWrapper,
1129+
deepseek: mapOpenAIErrorWrapper, // DeepSeek uses OpenAI-compatible API
11281130
mistral: mapOpenAIErrorWrapper, // Mistral uses OpenAI-compatible API
11291131
vllm: mapVllmErrorWrapper,
11301132
ollama: mapOllamaErrorWrapper,

platform/backend/src/routes/chat/routes.models.ts

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -237,6 +237,44 @@ async function fetchCerebrasModels(apiKey: string): Promise<ModelInfo[]> {
237237
}));
238238
}
239239

240+
/**
241+
* Fetch models from DeepSeek API (OpenAI-compatible)
242+
*/
243+
async function fetchDeepSeekModels(apiKey: string): Promise<ModelInfo[]> {
244+
const baseUrl = config.llm.deepseek.baseUrl;
245+
const url = `${baseUrl}/models`;
246+
247+
const response = await fetch(url, {
248+
headers: {
249+
Authorization: `Bearer ${apiKey}`,
250+
},
251+
});
252+
253+
if (!response.ok) {
254+
const errorText = await response.text();
255+
logger.error(
256+
{ status: response.status, error: errorText },
257+
"Failed to fetch DeepSeek models",
258+
);
259+
throw new Error(`Failed to fetch DeepSeek models: ${response.status}`);
260+
}
261+
262+
const data = (await response.json()) as {
263+
data: Array<{
264+
id: string;
265+
created: number;
266+
owned_by: string;
267+
}>;
268+
};
269+
270+
return data.data.map((model) => ({
271+
id: model.id,
272+
displayName: model.id,
273+
provider: "deepseek" as const,
274+
createdAt: new Date(model.created * 1000).toISOString(),
275+
}));
276+
}
277+
240278
/**
241279
* Fetch models from Mistral API (OpenAI-compatible)
242280
*/
@@ -726,6 +764,7 @@ async function getProviderApiKey({
726764
anthropic: () => config.chat.anthropic.apiKey || null,
727765
cerebras: () => config.chat.cerebras.apiKey || null,
728766
cohere: () => config.chat.cohere?.apiKey || null,
767+
deepseek: () => config.chat.deepseek?.apiKey || null,
729768
gemini: () => config.chat.gemini.apiKey || null,
730769
mistral: () => config.chat.mistral.apiKey || null,
731770
ollama: () => config.chat.ollama.apiKey || "", // Ollama typically doesn't require API keys
@@ -746,6 +785,7 @@ const modelFetchers: Record<
746785
anthropic: fetchAnthropicModels,
747786
bedrock: fetchBedrockModels,
748787
cerebras: fetchCerebrasModels,
788+
deepseek: fetchDeepSeekModels,
749789
gemini: fetchGeminiModels,
750790
mistral: fetchMistralModels,
751791
openai: fetchOpenAiModels,

0 commit comments

Comments
 (0)