Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/renderer/packages/model-setting-utils/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import GroqSettingUtil from './groq-setting-util'
import { ModelSettingUtil } from './interface'
import LMStudioSettingUtil from './lmstudio-setting-util'
import MistralAISettingUtil from './mistral-ai-setting-util'
import NovitaAISettingUtil from './novita-ai-setting-util'
import OllamaSettingUtil from './ollama-setting-util'
import OpenAISettingUtil from './openai-setting-util'
import PerplexitySettingUtil from './perplexity-setting-util'
Expand All @@ -33,6 +34,7 @@ export function getModelSettingUtil(aiProvider: ModelProvider): ModelSettingUtil
[ModelProviderEnum.VolcEngine]: VolcEngineSettingUtil,
[ModelProviderEnum.MistralAI]: MistralAISettingUtil,
[ModelProviderEnum.LMStudio]: LMStudioSettingUtil,
[ModelProviderEnum.NovitaAI]: NovitaAISettingUtil,
[ModelProviderEnum.Perplexity]: PerplexitySettingUtil,
[ModelProviderEnum.XAI]: XAISettingUtil,
[ModelProviderEnum.Custom]: CustomModelSettingUtil,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import OpenAI from 'src/shared/models/openai'
import { type ModelProvider, ModelProviderEnum, type ProviderSettings } from 'src/shared/types'
import { createModelDependencies } from '@/adapters'
import BaseConfig from './base-config'
import type { ModelSettingUtil } from './interface'

export default class NovitaAISettingUtil extends BaseConfig implements ModelSettingUtil {
public provider: ModelProvider = ModelProviderEnum.NovitaAI

async getCurrentModelDisplayName(
model: string,
): Promise<string> {
return `Novita AI (${model})`
}

protected async listProviderModels(settings: ProviderSettings): Promise<string[]> {
const model = settings.models?.[0] || { modelId: 'deepseek/deepseek-v3-0324' }
const dependencies = await createModelDependencies()

const openai = new OpenAI(
{
apiHost: settings.apiHost!,
apiKey: settings.apiKey!,
model,
temperature: 0,
dalleStyle: 'vivid',
injectDefaultMetadata: false,
useProxy: settings.useProxy || false,
},
dependencies
)

try {
const [chatModels, embeddingModels] = await Promise.all([
openai.listModels(),
this.fetchEmbeddingModels(settings.apiHost!, settings.apiKey!),
])

return [...chatModels, ...embeddingModels]
} catch (error) {
console.error('Failed to fetch Novita AI models:', error)
return openai.listModels()
}
}

private async fetchEmbeddingModels(apiHost: string, apiKey: string): Promise<string[]> {
try {
const response = await fetch(`${apiHost}/models?model_type=embedding`, {
method: 'GET',
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json',
},
})

if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`)
}

const data = await response.json()
return data.data?.map((model: any) => model.id) || []
} catch (error) {
console.error('Failed to fetch embedding models:', error)
return []
}
}
}
Binary file added src/renderer/static/icons/providers/novita-ai.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
85 changes: 85 additions & 0 deletions src/shared/defaults.ts
Original file line number Diff line number Diff line change
Expand Up @@ -695,4 +695,89 @@ export const SystemProviders: ProviderBaseInfo[] = [
],
},
},
{
id: ModelProviderEnum.NovitaAI,
name: 'Novita AI',
type: ModelProviderType.OpenAI,
urls: {
website: 'https://novita.ai',
},
defaultSettings: {
apiHost: 'https://api.novita.ai/v3/openai',
apiPath: '/chat/completions',
models: [
{
modelId: 'deepseek/deepseek-v3-0324',
contextWindow: 163840,
maxOutput: 163840,
capabilities: ['tool_use'],
},
{
modelId: 'deepseek/deepseek-r1-0528',
contextWindow: 163840,
maxOutput: 163840,
capabilities: ['tool_use'],
},
{
modelId: 'moonshotai/kimi-k2-instruct',
contextWindow: 131072,
maxOutput: 131072,
capabilities: ['tool_use'],
},
{
modelId: 'zai-org/glm-4.5',
contextWindow: 131072,
maxOutput: 131072,
capabilities: ['tool_use'],
},
{
modelId: 'baidu/ernie-4.5-vl-424b-a47b',
contextWindow: 123000,
maxOutput: 16000,
capabilities: ['tool_use', 'vision'],
},
{
modelId: 'qwen/qwen3-235b-a22b-thinking-2507',
contextWindow: 131072,
maxOutput: 131072,
capabilities: ['tool_use'],
},
{
modelId: 'qwen/qwen3-235b-a22b-instruct-2507',
contextWindow: 262144,
maxOutput: 262144,
capabilities: ['tool_use'],
},
{
modelId: 'qwen/qwen3-30b-a3b-fp8',
contextWindow: 262144,
maxOutput: 262144,
},
{
modelId: 'qwen/qwen2.5-vl-72b-instruct',
contextWindow: 32768,
maxOutput: 32768,
capabilities: ['vision'],
},
{
modelId: 'google/gemma-3-27b-it',
contextWindow: 32000,
maxOutput: 32000,
capabilities: ['tool_use', 'vision'],
},
{
modelId: 'qwen/qwen3-embedding-8b',
contextWindow: 32768,
maxOutput: 4096,
type: 'embedding',
},
{
modelId: 'baai/bge-m3',
contextWindow: 8192,
maxOutput: 96000,
type: 'embedding',
}
],
},
},
]
23 changes: 23 additions & 0 deletions src/shared/models/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,23 @@ export function getModel(setting: Settings, config: Config, dependencies: ModelD
},
dependencies
)

case ModelProviderEnum.NovitaAI:
return new CustomOpenAI(
{
apiKey: providerSetting.apiKey || '',
apiHost: formattedApiHost,
apiPath: providerSetting.apiPath || '/chat/completions',
model,
temperature: setting.temperature,
topP: setting.topP,
maxTokens: setting.maxTokens,
stream: setting.stream,
useProxy: providerSetting.useProxy,
},
dependencies
)

default:
if (providerBaseInfo.isCustom) {
return new CustomOpenAI(
Expand Down Expand Up @@ -308,6 +325,7 @@ export const aiProviderNameHash: Record<ModelProvider, string> = {
[ModelProviderEnum.LMStudio]: 'LM Studio API',
[ModelProviderEnum.Perplexity]: 'Perplexity API',
[ModelProviderEnum.XAI]: 'xAI API',
[ModelProviderEnum.NovitaAI]: 'Novita AI API',
[ModelProviderEnum.Custom]: 'Custom Provider',
}

Expand Down Expand Up @@ -383,6 +401,11 @@ export const AIModelProviderMenuOptionList = [
label: aiProviderNameHash[ModelProviderEnum.ChatGLM6B],
disabled: false,
},
{
value: ModelProviderEnum.NovitaAI,
label: aiProviderNameHash[ModelProviderEnum.NovitaAI],
disabled: false,
},
// {
// value: 'hunyuan',
// label: '腾讯混元',
Expand Down
1 change: 1 addition & 0 deletions src/shared/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,7 @@ export enum ModelProviderEnum {
LMStudio = 'lm-studio',
Perplexity = 'perplexity',
XAI = 'xAI',
NovitaAI = 'novita-ai',
Custom = 'custom',
}
export type ModelProvider = ModelProviderEnum | string
Expand Down
1 change: 1 addition & 0 deletions src/shared/utils/llm_utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ export function isOpenAICompatible(providerId: string, modelId: string) {
ModelProviderEnum.Groq,
ModelProviderEnum.DeepSeek,
ModelProviderEnum.LMStudio,
ModelProviderEnum.NovitaAI,
].includes(providerId as ModelProviderEnum) || providerId.startsWith('custom-provider-')
)
}