diff --git a/src/renderer/components/ReasoningEffortSelect.tsx b/src/renderer/components/ReasoningEffortSelect.tsx
new file mode 100644
index 00000000..95d3b987
--- /dev/null
+++ b/src/renderer/components/ReasoningEffortSelect.tsx
@@ -0,0 +1,45 @@
+import { useEffect } from 'react'
+import { Select, MenuItem, Typography, Box } from '@mui/material'
+import { useTranslation } from 'react-i18next'
+
+export interface Props {
+ value: string
+ onChange: (value: string) => void
+ className?: string
+}
+
+export default function ReasoningEffortSelect(props: Props) {
+ const { t } = useTranslation()
+
+ useEffect(() => {
+ if (!props.value) {
+ props.onChange('medium')
+ }
+ }, [])
+
+ const handleChange = (event: any) => {
+ props.onChange(event.target.value as string)
+ }
+
+ return (
+
+
+
+ {t('Reasoning Effort')}
+
+
+
+
+
+
+ )
+}
\ No newline at end of file
diff --git a/src/renderer/packages/models/openai.ts b/src/renderer/packages/models/openai.ts
index a1385c96..f3e552a5 100644
--- a/src/renderer/packages/models/openai.ts
+++ b/src/renderer/packages/models/openai.ts
@@ -8,6 +8,7 @@ interface Options {
apiPath?: string
model: Model | 'custom-model'
openaiCustomModel?: string
+ openaiReasoningEffort: string
temperature: number
topP: number
}
@@ -48,10 +49,25 @@ export default class OpenAI extends Base {
rawMessages = injectModelSystemPrompt(model, rawMessages)
- if (model.startsWith('o1')) {
- const messages = await populateO1Message(rawMessages)
- return this.requestChatCompletionsNotStream({ model, messages }, signal, onResultChange)
+ // o1-mini and o1-preview does not support reasoning unlike o1 relase
+ if (model.startsWith('o1-mini') || model.startsWith('o1-preview')) {
+ const messages = await populateReasoningMessage(rawMessages)
+ return this.requestChatCompletionsNotStream({
+ model,
+ messages,
+ }, signal, onResultChange)
}
+
+ // https://platform.openai.com/docs/guides/reasoning
+ if (model.startsWith('o')) {
+ const messages = await populateReasoningMessage(rawMessages)
+ return this.requestChatCompletionsNotStream({
+ model,
+ messages,
+ reasoning_effort: this.options.openaiReasoningEffort,
+ }, signal, onResultChange)
+ }
+
const messages = await populateGPTMessage(rawMessages)
return this.requestChatCompletionsStream({
messages,
@@ -184,6 +200,15 @@ export const openaiModelConfigs = {
maxContextTokens: 128_000,
},
+ // https://platform.openai.com/docs/models#o1
+ 'o1': {
+ maxTokens: 100_000,
+ maxContextTokens: 200_000,
+ },
+ 'o1-2024-12-17': {
+ maxTokens: 100_000,
+ maxContextTokens: 200_000,
+ },
'o1-preview': {
maxTokens: 32_768,
maxContextTokens: 128_000,
@@ -201,6 +226,16 @@ export const openaiModelConfigs = {
maxContextTokens: 128_000,
},
+ // https://platform.openai.com/docs/models#o3-mini
+ 'o3-mini': {
+ maxTokens: 100_000,
+ maxContextTokens: 200_000,
+ },
+ 'o3-mini-2025-01-31': {
+ maxTokens: 100_000,
+ maxContextTokens: 200_000,
+ },
+
'gpt-4': {
maxTokens: 4_096,
maxContextTokens: 8_192,
@@ -267,7 +302,7 @@ export async function populateGPTMessage(rawMessages: Message[]): Promise {
+export async function populateReasoningMessage(rawMessages: Message[]): Promise {
const messages: OpenAIMessage[] = rawMessages.map((m) => ({
role: m.role === 'system' ? 'user' : m.role,
content: m.content,
diff --git a/src/renderer/pages/SettingDialog/OpenAISetting.tsx b/src/renderer/pages/SettingDialog/OpenAISetting.tsx
index 137cc39a..50b4f887 100644
--- a/src/renderer/pages/SettingDialog/OpenAISetting.tsx
+++ b/src/renderer/pages/SettingDialog/OpenAISetting.tsx
@@ -2,6 +2,7 @@ import { Typography, Box } from '@mui/material'
import { ModelSettings } from '../../../shared/types'
import { useTranslation } from 'react-i18next'
import { Accordion, AccordionSummary, AccordionDetails } from '../../components/Accordion'
+import ReasoningEffortSelect from '../../components/ReasoningEffortSelect'
import TemperatureSlider from '../../components/TemperatureSlider'
import TopPSlider from '../../components/TopPSlider'
import PasswordTextField from '../../components/PasswordTextField'
@@ -17,6 +18,11 @@ interface ModelConfigProps {
export default function OpenAISetting(props: ModelConfigProps) {
const { settingsEdit, setSettingsEdit } = props
const { t } = useTranslation()
+ const model = settingsEdit.model;
+ const isReasoningModel = model?.startsWith('o') &&
+ !model?.startsWith('o1-preview') &&
+ !model?.startsWith('o1-mini');
+
return (
- setSettingsEdit({ ...settingsEdit, temperature: value })}
- />
- setSettingsEdit({ ...settingsEdit, topP: v })}
- />
+ {isReasoningModel && (
+ setSettingsEdit({ ...settingsEdit, openaiReasoningEffort: value })}
+ />
+ )}
+
+ {!model?.startsWith('o') && (
+ <>
+ setSettingsEdit({ ...settingsEdit, temperature: value })}
+ />
+ setSettingsEdit({ ...settingsEdit, topP: v })}
+ />
+ >
+ )}
+
setSettingsEdit({ ...settingsEdit, openaiMaxContextMessageCount: v })}
diff --git a/src/shared/defaults.ts b/src/shared/defaults.ts
index c82422d3..e14883eb 100644
--- a/src/shared/defaults.ts
+++ b/src/shared/defaults.ts
@@ -53,6 +53,7 @@ export function settings(): Settings {
siliconCloudModel: 'THUDM/glm-4-9b-chat',
autoGenerateTitle: true,
+ openaiReasoningEffort: 'medium',
}
}
diff --git a/src/shared/types.ts b/src/shared/types.ts
index fe07c9f6..ce500837 100644
--- a/src/shared/types.ts
+++ b/src/shared/types.ts
@@ -118,6 +118,7 @@ export interface ModelSettings {
temperature: number
topP: number
openaiMaxContextMessageCount: number
+ openaiReasoningEffort: string
}
export interface Settings extends ModelSettings {