Skip to content

Commit 0067d06

Browse files
committed
Fix effort test provider isolation
Replace leaking effort test provider mocks with env-driven provider setup, expand provider env isolation, and keep the shared mutation lock so the focused tests do not leak state across Bun module imports.
1 parent 4fdc0a7 commit 0067d06

1 file changed

Lines changed: 96 additions & 31 deletions

File tree

src/utils/effort.codex.test.ts

Lines changed: 96 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -3,46 +3,108 @@ import {
33
acquireSharedMutationLock,
44
releaseSharedMutationLock,
55
} from '../test/sharedMutationLock.js'
6-
// Import the real auth.js and providerConfig.js up front so we can spread
7-
// their export surfaces into mock factories. `mock.module()` is process-global
8-
// in bun:test and `mock.restore()` does not undo it (see user.test.ts), so
9-
// any module we mock here needs to keep the full original export shape — or
10-
// downstream tests that load it via openaiShim/client/codexShim crash with
11-
// "Export named 'X' not found in module".
6+
// Import the real modules up front so we can spread their export surfaces into
7+
// mock factories. `mock.module()` is process-global in bun:test and
8+
// `mock.restore()` does not undo it (see user.test.ts), so mocked modules need
9+
// to keep the full original export shape.
1210
import * as actualAuth from './auth.js'
1311
import * as actualProviderConfig from '../services/api/providerConfig.js'
1412
import * as actualThinking from './thinking.js'
1513
import * as actualGrowthbook from 'src/services/analytics/growthbook.js'
1614
import * as actualProviders from './model/providers.js'
1715
import * as actualModelSupportOverrides from './model/modelSupportOverrides.js'
1816

17+
const ENV_KEYS = [
18+
'CLAUDE_CODE_USE_BEDROCK',
19+
'CLAUDE_CODE_USE_FOUNDRY',
20+
'CLAUDE_CODE_USE_GEMINI',
21+
'CLAUDE_CODE_USE_GITHUB',
22+
'CLAUDE_CODE_USE_MISTRAL',
23+
'CLAUDE_CODE_USE_OPENAI',
24+
'CLAUDE_CODE_USE_VERTEX',
25+
'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED',
26+
'MISTRAL_BASE_URL',
27+
'OPENAI_BASE_URL',
28+
'OPENAI_MODEL',
29+
'OPENAI_API_BASE',
30+
'XAI_API_KEY',
31+
'MINIMAX_API_KEY',
32+
] as const
33+
34+
const originalEnv: Record<string, string | undefined> = {}
35+
type TestProvider = 'codex' | 'openai' | 'firstParty'
36+
37+
function resolveProviderFromEnv(): TestProvider {
38+
if (process.env.CLAUDE_CODE_USE_OPENAI) {
39+
const baseUrl = process.env.OPENAI_BASE_URL ?? ''
40+
const model = process.env.OPENAI_MODEL ?? ''
41+
return baseUrl.includes('/backend-api/codex') || model.startsWith('codex')
42+
? 'codex'
43+
: 'openai'
44+
}
45+
return 'firstParty'
46+
}
47+
48+
function installProviderMock(provider?: TestProvider): void {
49+
mock.module('./model/providers.js', () => ({
50+
...actualProviders,
51+
getAPIProvider: () => provider ?? resolveProviderFromEnv(),
52+
getAPIProviderForStatsig: () => provider ?? resolveProviderFromEnv(),
53+
isFirstPartyAnthropicBaseUrl: () => true,
54+
isGithubNativeAnthropicMode: () => false,
55+
usesAnthropicAccountFlow: () =>
56+
(provider ?? resolveProviderFromEnv()) === 'firstParty',
57+
}))
58+
}
59+
1960
beforeEach(async () => {
2061
await acquireSharedMutationLock('utils/effort.codex.test.ts')
62+
mock.restore()
63+
installProviderMock()
64+
for (const key of ENV_KEYS) {
65+
originalEnv[key] = process.env[key]
66+
}
2167
})
2268

2369
afterEach(() => {
2470
try {
71+
for (const key of ENV_KEYS) {
72+
if (originalEnv[key] === undefined) {
73+
delete process.env[key]
74+
} else {
75+
process.env[key] = originalEnv[key]
76+
}
77+
}
2578
mock.restore()
2679
} finally {
2780
releaseSharedMutationLock()
2881
}
2982
})
3083

3184
async function importFreshEffortModule(options: {
32-
provider: 'codex' | 'openai'
33-
supportsCodexReasoningEffort: boolean
85+
provider: TestProvider
86+
supportsCodexReasoningEffort?: boolean
3487
}) {
35-
mock.module('./model/providers.js', () => ({
36-
...actualProviders,
37-
getAPIProvider: () => options.provider,
38-
}))
88+
for (const key of ENV_KEYS) {
89+
delete process.env[key]
90+
}
91+
installProviderMock(options.provider)
92+
if (options.provider === 'codex') {
93+
process.env.CLAUDE_CODE_USE_OPENAI = '1'
94+
process.env.OPENAI_MODEL = 'gpt-5.4'
95+
} else if (options.provider === 'openai') {
96+
process.env.CLAUDE_CODE_USE_OPENAI = '1'
97+
process.env.OPENAI_BASE_URL = 'https://api.openai.com/v1'
98+
process.env.OPENAI_MODEL = 'gpt-5.4'
99+
}
39100
mock.module('./model/modelSupportOverrides.js', () => ({
40101
...actualModelSupportOverrides,
41102
get3PModelCapabilityOverride: () => undefined,
42103
}))
43104
mock.module('../services/api/providerConfig.js', () => ({
44105
...actualProviderConfig,
45-
supportsCodexReasoningEffort: () => options.supportsCodexReasoningEffort,
106+
supportsCodexReasoningEffort: () =>
107+
options.supportsCodexReasoningEffort ?? true,
46108
}))
47109
mock.module('./auth.js', () => ({
48110
...actualAuth,
@@ -63,21 +125,25 @@ async function importFreshEffortModule(options: {
63125
return import(`./effort.js?ts=${Date.now()}-${Math.random()}`)
64126
}
65127

66-
test('gpt-5.4 on the ChatGPT Codex backend supports effort selection', async () => {
67-
const { getAvailableEffortLevels, modelSupportsEffort } =
68-
await importFreshEffortModule({
69-
provider: 'codex',
70-
supportsCodexReasoningEffort: true,
71-
})
128+
test(
129+
'gpt-5.4 on the ChatGPT Codex backend supports effort selection',
130+
async () => {
131+
const { getAvailableEffortLevels, modelSupportsEffort } =
132+
await importFreshEffortModule({
133+
provider: 'codex',
134+
supportsCodexReasoningEffort: true,
135+
})
72136

73-
expect(modelSupportsEffort('gpt-5.4')).toBe(true)
74-
expect(getAvailableEffortLevels('gpt-5.4')).toEqual([
75-
'low',
76-
'medium',
77-
'high',
78-
'xhigh',
79-
])
80-
})
137+
expect(modelSupportsEffort('gpt-5.4')).toBe(true)
138+
expect(getAvailableEffortLevels('gpt-5.4')).toEqual([
139+
'low',
140+
'medium',
141+
'high',
142+
'xhigh',
143+
])
144+
},
145+
15_000,
146+
)
81147

82148
test('gpt-5.4 on the OpenAI provider still supports effort selection', async () => {
83149
const { getAvailableEffortLevels, modelSupportsEffort } =
@@ -133,7 +199,7 @@ test('standardEffortToOpenAI maps max to xhigh for shim payload', async () => {
133199
expect(openAIEffortToStandard('high')).toBe('high')
134200
})
135201

136-
test('e2e: xhigh persisted max resolveAppliedEffort wire xhigh on OpenAI/Codex (no high clamp)', async () => {
202+
test('e2e: xhigh -> persisted max -> resolveAppliedEffort -> wire xhigh on OpenAI/Codex (no high clamp)', async () => {
137203
const {
138204
toPersistableEffort,
139205
resolveAppliedEffort,
@@ -148,7 +214,7 @@ test('e2e: xhigh → persisted max → resolveAppliedEffort → wire xhigh on Op
148214
expect(persisted).toBe('max')
149215

150216
// App state holds 'max'. Non-Opus 'max' must NOT be downgraded to 'high'
151-
// when the model uses the OpenAI effort scheme the shim converts back
217+
// when the model uses the OpenAI effort scheme; the shim converts back
152218
// to 'xhigh' on the wire.
153219
const applied = resolveAppliedEffort('gpt-5.4', persisted)
154220
expect(applied).toBe('max')
@@ -159,14 +225,13 @@ test('e2e: xhigh → persisted max → resolveAppliedEffort → wire xhigh on Op
159225

160226
test('e2e: max on non-Opus Anthropic model still clamps to high', async () => {
161227
const { resolveAppliedEffort } = await importFreshEffortModule({
162-
provider: 'firstParty' as unknown as 'openai',
228+
provider: 'firstParty',
163229
supportsCodexReasoningEffort: false,
164230
})
165231

166232
expect(resolveAppliedEffort('claude-sonnet-4-6', 'max')).toBe('high')
167233
})
168234

169-
170235
test('getEffortSuffix shows the effective displayed effort for supported models', async () => {
171236
const { getDisplayedEffortLevel, getEffortSuffix } =
172237
await importFreshEffortModule({

0 commit comments

Comments
 (0)