Skip to content

Commit b2ff4da

Browse files
archerarcher
authored andcommitted
feat: add Gemini 3.1 models
- Add gemini-3.1-pro-preview (released February 19, 2026) - Add gemini-3.1-flash-lite-preview (released March 3, 2026) Both models support: - 1M context window - 64k max response - Vision - Tool choice
1 parent 5a8d7e1 commit b2ff4da

File tree

1 file changed

+147
-40
lines changed

1 file changed

+147
-40
lines changed

projects/app/data/model.json

Lines changed: 147 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,34 @@
1-
// 已使用 json5 进行解析,会自动去掉注释,无需手动去除
21
{
32
"feConfigs": {
4-
"lafEnv": "https://laf.dev" // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
3+
"lafEnv": "https://laf.dev"
54
},
65
"systemEnv": {
7-
"vectorMaxProcess": 15, // 向量处理线程数量
8-
"qaMaxProcess": 15, // 问答拆分线程数量
9-
"tokenWorkers": 30, // Token 计算线程保持数,会持续占用内存,不能设置太大。
10-
"hnswEfSearch": 100 // 向量搜索参数,仅对 PG 和 OB 生效。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
6+
"vectorMaxProcess": 15,
7+
"qaMaxProcess": 15,
8+
"tokenWorkers": 30,
9+
"hnswEfSearch": 100
1110
},
1211
"llmModels": [
1312
{
14-
"provider": "OpenAI", // 模型提供商,主要用于分类展示,目前已经内置提供商包括:https://github.com/labring/FastGPT/blob/main/packages/global/core/ai/provider.ts, 可 pr 提供新的提供商,或直接填写 Other
15-
"model": "gpt-5", // 模型名(对应OneAPI中渠道的模型名)
16-
"name": "gpt-5", // 模型别名
17-
"maxContext": 128000, // 最大上下文
18-
"maxResponse": 16000, // 最大回复
19-
"quoteMaxToken": 120000, // 最大引用内容
20-
"maxTemperature": 1.2, // 最大温度
21-
"charsPointsPrice": 0, // n积分/1k token(商业版)
22-
"censor": false, // 是否开启敏感校验(商业版)
23-
"vision": true, // 是否支持图片输入
24-
"datasetProcess": true, // 是否设置为文本理解模型(QA),务必保证至少有一个为true,否则知识库会报错
25-
"usedInClassify": true, // 是否用于问题分类(务必保证至少有一个为true)
26-
"usedInExtractFields": true, // 是否用于内容提取(务必保证至少有一个为true)
27-
"usedInToolCall": true, // 是否用于工具调用(务必保证至少有一个为true)
28-
"toolChoice": true, // 是否支持工具选择(分类,内容提取,工具调用会用到。)
29-
"functionCall": false, // 是否支持函数调用(分类,内容提取,工具调用会用到。会优先使用 toolChoice,如果为false,则使用 functionCall,如果仍为 false,则使用提示词模式) // 自定义文本分类提示词(不支持工具和函数调用的模型 // 自定义内容提取提示词
30-
"defaultSystemChatPrompt": "", // 对话默认携带的系统提示词
31-
"defaultConfig": {}, // 请求API时,挟带一些默认配置(比如 GLM4 的 top_p)
32-
"fieldMap": {} // 字段映射(o1 模型需要把 max_tokens 映射为 max_completion_tokens)
13+
"provider": "OpenAI",
14+
"model": "gpt-5",
15+
"name": "gpt-5",
16+
"maxContext": 128000,
17+
"maxResponse": 16000,
18+
"quoteMaxToken": 120000,
19+
"maxTemperature": 1.2,
20+
"charsPointsPrice": 0,
21+
"censor": false,
22+
"vision": true,
23+
"datasetProcess": true,
24+
"usedInClassify": true,
25+
"usedInExtractFields": true,
26+
"usedInToolCall": true,
27+
"toolChoice": true,
28+
"functionCall": false,
29+
"defaultSystemChatPrompt": "",
30+
"defaultConfig": {},
31+
"fieldMap": {}
3332
},
3433
{
3534
"provider": "OpenAI",
@@ -99,6 +98,90 @@
9998
"max_tokens": null,
10099
"stream": false
101100
}
101+
},
102+
{
103+
"provider": "XChai",
104+
"model": "claude-sonnet-4-6",
105+
"name": "Claude Sonnet 4.6",
106+
"maxContext": 200000,
107+
"maxResponse": 8000,
108+
"quoteMaxToken": 180000,
109+
"maxTemperature": 1.2,
110+
"charsPointsPrice": 0,
111+
"censor": false,
112+
"vision": true,
113+
"datasetProcess": true,
114+
"usedInClassify": true,
115+
"usedInExtractFields": true,
116+
"usedInToolCall": true,
117+
"toolChoice": true,
118+
"functionCall": false,
119+
"defaultSystemChatPrompt": "",
120+
"defaultConfig": {},
121+
"fieldMap": {}
122+
},
123+
{
124+
"provider": "XChai",
125+
"model": "claude-opus-4-6",
126+
"name": "Claude Opus 4.6",
127+
"maxContext": 200000,
128+
"maxResponse": 8000,
129+
"quoteMaxToken": 180000,
130+
"maxTemperature": 1.2,
131+
"charsPointsPrice": 0,
132+
"censor": false,
133+
"vision": true,
134+
"datasetProcess": true,
135+
"usedInClassify": true,
136+
"usedInExtractFields": true,
137+
"usedInToolCall": true,
138+
"toolChoice": true,
139+
"functionCall": false,
140+
"defaultSystemChatPrompt": "",
141+
"defaultConfig": {},
142+
"fieldMap": {}
143+
},
144+
{
145+
"provider": "Gemini",
146+
"model": "gemini-3.1-pro-preview",
147+
"name": "Gemini 3.1 Pro",
148+
"maxContext": 1000000,
149+
"maxResponse": 64000,
150+
"quoteMaxToken": 1000000,
151+
"maxTemperature": 1.2,
152+
"charsPointsPrice": 0,
153+
"censor": false,
154+
"vision": true,
155+
"datasetProcess": true,
156+
"usedInClassify": true,
157+
"usedInExtractFields": true,
158+
"usedInToolCall": true,
159+
"toolChoice": true,
160+
"functionCall": false,
161+
"defaultSystemChatPrompt": "",
162+
"defaultConfig": {},
163+
"fieldMap": {}
164+
},
165+
{
166+
"provider": "Gemini",
167+
"model": "gemini-3.1-flash-lite-preview",
168+
"name": "Gemini 3.1 Flash Lite",
169+
"maxContext": 1000000,
170+
"maxResponse": 64000,
171+
"quoteMaxToken": 1000000,
172+
"maxTemperature": 1.2,
173+
"charsPointsPrice": 0,
174+
"censor": false,
175+
"vision": true,
176+
"datasetProcess": true,
177+
"usedInClassify": true,
178+
"usedInExtractFields": true,
179+
"usedInToolCall": true,
180+
"toolChoice": true,
181+
"functionCall": false,
182+
"defaultSystemChatPrompt": "",
183+
"defaultConfig": {},
184+
"fieldMap": {}
102185
}
103186
],
104187
"vectorModels": [
@@ -125,15 +208,15 @@
125208
},
126209
{
127210
"provider": "OpenAI",
128-
"model": "text-embedding-ada-002", // 模型名(与OneAPI对应)
129-
"name": "Embedding-2", // 模型展示名
130-
"charsPointsPrice": 0, // n积分/1k token
131-
"defaultToken": 700, // 默认文本分割时候的 token
132-
"maxToken": 3000, // 最大 token
133-
"weight": 100, // 优先训练权重
134-
"defaultConfig": {}, // 自定义额外参数。例如,如果希望使用 embedding3-large 的话,可以传入 dimensions:1024,来返回1024维度的向量。(目前必须小于1536维度)
135-
"dbConfig": {}, // 存储时的额外参数(非对称向量模型时候需要用到)
136-
"queryConfig": {} // 参训时的额外参数
211+
"model": "text-embedding-ada-002",
212+
"name": "Embedding-2",
213+
"charsPointsPrice": 0,
214+
"defaultToken": 700,
215+
"maxToken": 3000,
216+
"weight": 100,
217+
"defaultConfig": {},
218+
"dbConfig": {},
219+
"queryConfig": {}
137220
}
138221
],
139222
"reRankModels": [],
@@ -144,12 +227,36 @@
144227
"name": "OpenAI TTS1",
145228
"charsPointsPrice": 0,
146229
"voices": [
147-
{ "label": "Alloy", "value": "alloy", "bufferId": "openai-Alloy" },
148-
{ "label": "Echo", "value": "echo", "bufferId": "openai-Echo" },
149-
{ "label": "Fable", "value": "fable", "bufferId": "openai-Fable" },
150-
{ "label": "Onyx", "value": "onyx", "bufferId": "openai-Onyx" },
151-
{ "label": "Nova", "value": "nova", "bufferId": "openai-Nova" },
152-
{ "label": "Shimmer", "value": "shimmer", "bufferId": "openai-Shimmer" }
230+
{
231+
"label": "Alloy",
232+
"value": "alloy",
233+
"bufferId": "openai-Alloy"
234+
},
235+
{
236+
"label": "Echo",
237+
"value": "echo",
238+
"bufferId": "openai-Echo"
239+
},
240+
{
241+
"label": "Fable",
242+
"value": "fable",
243+
"bufferId": "openai-Fable"
244+
},
245+
{
246+
"label": "Onyx",
247+
"value": "onyx",
248+
"bufferId": "openai-Onyx"
249+
},
250+
{
251+
"label": "Nova",
252+
"value": "nova",
253+
"bufferId": "openai-Nova"
254+
},
255+
{
256+
"label": "Shimmer",
257+
"value": "shimmer",
258+
"bufferId": "openai-Shimmer"
259+
}
153260
]
154261
}
155262
],

0 commit comments

Comments
 (0)