Skip to content
31 changes: 9 additions & 22 deletions src/components/StartupScreen.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
* Addresses: https://github.com/Gitlawb/openclaude/issues/55
*/

import { isLocalProviderUrl } from '../services/api/providerConfig.js'
import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'

declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
Expand Down Expand Up @@ -101,7 +101,11 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc

if (useOpenAI) {
const rawModel = process.env.OPENAI_MODEL || 'gpt-4o'
const baseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1'
const resolvedRequest = resolveProviderRequest({
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
})
const baseUrl = resolvedRequest.baseUrl
const isLocal = isLocalProviderUrl(baseUrl)
let name = 'OpenAI'
if (/deepseek/i.test(baseUrl) || /deepseek/i.test(rawModel)) name = 'DeepSeek'
Expand All @@ -114,26 +118,9 @@ function detectProvider(): { name: string; model: string; baseUrl: string; isLoc
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)

// Resolve model alias to actual model name + reasoning effort
let displayModel = rawModel
const codexAliases: Record<string, { model: string; reasoningEffort?: string }> = {
codexplan: { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.4': { model: 'gpt-5.4', reasoningEffort: 'high' },
'gpt-5.3-codex': { model: 'gpt-5.3-codex', reasoningEffort: 'high' },
'gpt-5.3-codex-spark': { model: 'gpt-5.3-codex-spark' },
codexspark: { model: 'gpt-5.3-codex-spark' },
'gpt-5.2-codex': { model: 'gpt-5.2-codex', reasoningEffort: 'high' },
'gpt-5.1-codex-max': { model: 'gpt-5.1-codex-max', reasoningEffort: 'high' },
'gpt-5.1-codex-mini': { model: 'gpt-5.1-codex-mini' },
'gpt-5.4-mini': { model: 'gpt-5.4-mini', reasoningEffort: 'medium' },
'gpt-5.2': { model: 'gpt-5.2', reasoningEffort: 'medium' },
}
const alias = rawModel.toLowerCase()
if (alias in codexAliases) {
const resolved = codexAliases[alias]
displayModel = resolved.model
if (resolved.reasoningEffort) {
displayModel = `${displayModel} (${resolved.reasoningEffort})`
}
let displayModel = resolvedRequest.resolvedModel
if (resolvedRequest.reasoning?.effort) {
displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})`
}

return { name, model: displayModel, baseUrl, isLocal }
Expand Down
30 changes: 26 additions & 4 deletions src/services/api/providerConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ const CODEX_ALIAS_MODELS: Record<
type CodexAlias = keyof typeof CODEX_ALIAS_MODELS
type ReasoningEffort = 'low' | 'medium' | 'high' | 'xhigh'

const OPENAI_CODEX_SHORTCUT_ALIASES = new Set(['codexplan', 'codexspark'])

export type ProviderTransport = 'chat_completions' | 'codex_responses'

export type ResolvedProviderRequest = {
Expand Down Expand Up @@ -219,6 +221,12 @@ export function isCodexAlias(model: string): boolean {
return base in CODEX_ALIAS_MODELS
}

function isOpenAICodexShortcutAlias(model: string): boolean {
const normalized = model.trim().toLowerCase()
const base = normalized.split('?', 1)[0] ?? normalized
return OPENAI_CODEX_SHORTCUT_ALIASES.has(base)
}

export function shouldUseCodexTransport(
model: string,
baseUrl: string | undefined,
Expand Down Expand Up @@ -363,10 +371,24 @@ export function resolveProviderRequest(options?: {
options?.fallbackModel?.trim() ||
(isGithubMode ? 'github:copilot' : 'gpt-4o')
const descriptor = parseModelDescriptor(requestedModel)
const rawBaseUrl =
asEnvUrl(options?.baseUrl) ??
const explicitBaseUrl = asEnvUrl(options?.baseUrl)
const envBaseUrlRaw =
asEnvUrl(process.env.OPENAI_BASE_URL) ??
asEnvUrl(process.env.OPENAI_API_BASE)
const envBaseUrl =
isGithubMode && envBaseUrlRaw && getGithubEndpointType(envBaseUrlRaw) === 'custom'
? undefined
: envBaseUrlRaw
const rawBaseUrl = explicitBaseUrl ?? envBaseUrl

const shellModel = process.env.OPENAI_MODEL?.trim() ?? ''
const isCodexAliasModel =
isOpenAICodexShortcutAlias(requestedModel) ||
isOpenAICodexShortcutAlias(shellModel)
const finalBaseUrl =
Comment on lines +396 to +408
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

isCodexAliasModel currently considers process.env.OPENAI_MODEL (shellModel) even when the caller passes an explicit options.model. This can incorrectly force finalBaseUrl to DEFAULT_CODEX_BASE_URL for non-Codex requests (e.g., resolveProviderRequest({ model: 'gpt-4o' }) while the environment has OPENAI_MODEL=codexplan), which would misroute traffic to the Codex endpoint. Consider basing the Codex-shortcut check only on requestedModel, or only consulting shellModel when options?.model is not provided and requestedModel came from the env var.

Copilot uses AI. Check for mistakes.
!isGithubMode && isCodexAliasModel && !explicitBaseUrl
? DEFAULT_CODEX_BASE_URL
: rawBaseUrl
Comment on lines +408 to +411
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The finalBaseUrl override for Codex shortcuts is currently gated on !explicitBaseUrl (i.e., whether the caller passed options.baseUrl), not on whether the user configured a non-Codex endpoint. This makes behavior inconsistent across call sites (e.g., openaiShim calls resolveProviderRequest without baseUrl, so a locally configured OPENAI_BASE_URL=http://127.0.0.1:8080/v1 would be ignored for codexplan/codexspark and silently switched to the Codex endpoint). Consider basing the override on rawBaseUrl's value (e.g., only override when no base URL is set / it’s empty/"undefined" / it’s the official OpenAI v1 base URL), rather than on whether it was passed via options.

Copilot uses AI. Check for mistakes.
Comment on lines +407 to +411
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hasUserSetBaseUrl compares rawBaseUrl to DEFAULT_OPENAI_BASE_URL without normalizing. If a user sets OPENAI_BASE_URL to an equivalent value like https://api.openai.com/v1/ (trailing slash) or different casing, this will be treated as “custom”, preventing the Codex shortcut override and potentially flipping transport back to chat_completions. Consider normalizing before comparison (e.g., trimming trailing slashes or parsing with new URL).

Copilot uses AI. Check for mistakes.
Comment on lines +405 to +411
Copy link

Copilot AI Apr 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hasUserSetBaseUrl treats rawBaseUrl === DEFAULT_OPENAI_BASE_URL as “not user set”, so an explicit OPENAI_BASE_URL=https://api.openai.com/v1 (or options.baseUrl) will still be overridden to the Codex endpoint for shortcut aliases. This also creates inconsistent behavior where a trailing slash (e.g. .../v1/) avoids the override. Consider treating any explicitly provided base URL (options/env) as user-set, or normalize/compare URLs rather than raw strings, and only apply the Codex default when no base URL was provided at all.

Copilot uses AI. Check for mistakes.
Comment on lines +406 to +411
Copy link

Copilot AI Apr 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

resolveProviderRequest can override an explicitly provided baseUrl when it equals DEFAULT_OPENAI_BASE_URL. Example: calling resolveProviderRequest({ model: 'codexplan', baseUrl: DEFAULT_OPENAI_BASE_URL }) will currently force finalBaseUrl to DEFAULT_CODEX_BASE_URL because hasUserSetBaseUrl is false for the default URL. This breaks the expected precedence where an explicit option should always win. Consider treating explicitBaseUrl presence as “user-set” (even if it equals the default) and/or adding a regression test for this case.

Copilot uses AI. Check for mistakes.

const githubEndpointType = isGithubMode
? getGithubEndpointType(rawBaseUrl)
Expand All @@ -380,7 +402,7 @@ export function resolveProviderRequest(options?: {
: requestedModel

const transport: ProviderTransport =
shouldUseCodexTransport(requestedModel, rawBaseUrl) ||
shouldUseCodexTransport(requestedModel, finalBaseUrl) ||
(isGithubCopilot && shouldUseGithubResponsesApi(githubResolvedModel))
? 'codex_responses'
: 'chat_completions'
Expand All @@ -404,7 +426,7 @@ export function resolveProviderRequest(options?: {
requestedModel,
resolvedModel,
baseUrl:
(rawBaseUrl ??
(finalBaseUrl ??
(isGithubCopilot && transport === 'codex_responses'
? GITHUB_COPILOT_BASE_URL
: (isGithubMode
Expand Down
Loading