1- import { afterEach , expect , mock , test } from 'bun:test'
2- // Import the real auth.js and providerConfig.js up front so we can spread
3- // their export surfaces into mock factories. `mock.module()` is process-global
4- // in bun:test and `mock.restore()` does not undo it (see user.test.ts), so
5- // any module we mock here needs to keep the full original export shape — or
6- // downstream tests that load it via openaiShim/client/codexShim crash with
7- // "Export named 'X' not found in module".
8- import * as actualAuth from './auth.js'
9- import * as actualProviderConfig from '../services/api/providerConfig.js'
10- import * as actualThinking from './thinking.js'
11- import * as actualGrowthbook from 'src/services/analytics/growthbook.js'
12- import * as actualProviders from './model/providers.js'
13- import * as actualModelSupportOverrides from './model/modelSupportOverrides.js'
1+ import { afterEach , beforeEach , expect , mock , test } from 'bun:test'
2+
3+ const ENV_KEYS = [
4+ 'CLAUDE_CODE_USE_BEDROCK' ,
5+ 'CLAUDE_CODE_USE_FOUNDRY' ,
6+ 'CLAUDE_CODE_USE_GEMINI' ,
7+ 'CLAUDE_CODE_USE_GITHUB' ,
8+ 'CLAUDE_CODE_USE_MISTRAL' ,
9+ 'CLAUDE_CODE_USE_OPENAI' ,
10+ 'CLAUDE_CODE_USE_VERTEX' ,
11+ 'CLAUDE_CODE_PROVIDER_PROFILE_ENV_APPLIED' ,
12+ 'MISTRAL_BASE_URL' ,
13+ 'OPENAI_BASE_URL' ,
14+ 'OPENAI_MODEL' ,
15+ 'OPENAI_API_BASE' ,
16+ 'XAI_API_KEY' ,
17+ 'MINIMAX_API_KEY' ,
18+ ] as const
19+
20+ const originalEnv : Record < string , string | undefined > = { }
21+
22+ beforeEach ( ( ) => {
23+ mock . restore ( )
24+ for ( const key of ENV_KEYS ) {
25+ originalEnv [ key ] = process . env [ key ]
26+ }
27+ } )
1428
1529afterEach ( ( ) => {
16- mock . restore ( )
30+ for ( const key of ENV_KEYS ) {
31+ if ( originalEnv [ key ] === undefined ) {
32+ delete process . env [ key ]
33+ } else {
34+ process . env [ key ] = originalEnv [ key ]
35+ }
36+ }
1737} )
1838
1939async function importFreshEffortModule ( options : {
20- provider : 'codex' | 'openai'
21- supportsCodexReasoningEffort : boolean
40+ provider : 'codex' | 'openai' | 'firstParty'
2241} ) {
23- mock . module ( './model/providers.js' , ( ) => ( {
24- ...actualProviders ,
25- getAPIProvider : ( ) => options . provider ,
26- } ) )
27- mock . module ( './model/modelSupportOverrides.js' , ( ) => ( {
28- ...actualModelSupportOverrides ,
29- get3PModelCapabilityOverride : ( ) => undefined ,
30- } ) )
31- mock . module ( '../services/api/providerConfig.js' , ( ) => ( {
32- ...actualProviderConfig ,
33- supportsCodexReasoningEffort : ( ) => options . supportsCodexReasoningEffort ,
34- } ) )
35- mock . module ( './auth.js' , ( ) => ( {
36- ...actualAuth ,
37- isProSubscriber : ( ) => false ,
38- isMaxSubscriber : ( ) => false ,
39- isTeamSubscriber : ( ) => false ,
40- } ) )
41- mock . module ( './thinking.js' , ( ) => ( {
42- ...actualThinking ,
43- isUltrathinkEnabled : ( ) => false ,
44- } ) )
45- mock . module ( 'src/services/analytics/growthbook.js' , ( ) => ( {
46- ...actualGrowthbook ,
47- getFeatureValue_CACHED_MAY_BE_STALE : ( _key : string , fallback : unknown ) =>
48- fallback ,
49- } ) )
42+ for ( const key of ENV_KEYS ) {
43+ delete process . env [ key ]
44+ }
45+ if ( options . provider === 'codex' ) {
46+ process . env . CLAUDE_CODE_USE_OPENAI = '1'
47+ process . env . OPENAI_MODEL = 'gpt-5.4'
48+ } else if ( options . provider === 'openai' ) {
49+ process . env . CLAUDE_CODE_USE_OPENAI = '1'
50+ process . env . OPENAI_BASE_URL = 'https://api.openai.com/v1'
51+ process . env . OPENAI_MODEL = 'gpt-5.4'
52+ }
5053
5154 return import ( `./effort.js?ts=${ Date . now ( ) } -${ Math . random ( ) } ` )
5255}
5356
54- test ( 'gpt-5.4 on the ChatGPT Codex backend supports effort selection' , async ( ) => {
55- const { getAvailableEffortLevels, modelSupportsEffort } =
56- await importFreshEffortModule ( {
57- provider : 'codex' ,
58- supportsCodexReasoningEffort : true ,
59- } )
60-
61- expect ( modelSupportsEffort ( 'gpt-5.4' ) ) . toBe ( true )
62- expect ( getAvailableEffortLevels ( 'gpt-5.4' ) ) . toEqual ( [
63- 'low' ,
64- 'medium' ,
65- 'high' ,
66- 'xhigh' ,
67- ] )
68- } )
57+ test (
58+ 'gpt-5.4 on the ChatGPT Codex backend supports effort selection' ,
59+ async ( ) => {
60+ const { getAvailableEffortLevels, modelSupportsEffort } =
61+ await importFreshEffortModule ( {
62+ provider : 'codex' ,
63+ } )
64+
65+ expect ( modelSupportsEffort ( 'gpt-5.4' ) ) . toBe ( true )
66+ expect ( getAvailableEffortLevels ( 'gpt-5.4' ) ) . toEqual ( [
67+ 'low' ,
68+ 'medium' ,
69+ 'high' ,
70+ 'xhigh' ,
71+ ] )
72+ } ,
73+ 15_000 ,
74+ )
6975
7076test ( 'gpt-5.4 on the OpenAI provider still supports effort selection' , async ( ) => {
7177 const { getAvailableEffortLevels, modelSupportsEffort } =
7278 await importFreshEffortModule ( {
7379 provider : 'openai' ,
74- supportsCodexReasoningEffort : true ,
7580 } )
7681
7782 expect ( modelSupportsEffort ( 'gpt-5.4' ) ) . toBe ( true )
@@ -87,7 +92,6 @@ test('gpt-5.3-codex-spark stays without effort controls', async () => {
8792 const { getAvailableEffortLevels, modelSupportsEffort } =
8893 await importFreshEffortModule ( {
8994 provider : 'codex' ,
90- supportsCodexReasoningEffort : false ,
9195 } )
9296
9397 expect ( modelSupportsEffort ( 'gpt-5.3-codex-spark' ) ) . toBe ( false )
@@ -97,7 +101,6 @@ test('gpt-5.3-codex-spark stays without effort controls', async () => {
97101test ( 'toPersistableEffort normalizes xhigh to max so it survives settings write' , async ( ) => {
98102 const { toPersistableEffort } = await importFreshEffortModule ( {
99103 provider : 'openai' ,
100- supportsCodexReasoningEffort : true ,
101104 } )
102105
103106 expect ( toPersistableEffort ( 'xhigh' ) ) . toBe ( 'max' )
@@ -112,7 +115,6 @@ test('standardEffortToOpenAI maps max to xhigh for shim payload', async () => {
112115 const { standardEffortToOpenAI, openAIEffortToStandard } =
113116 await importFreshEffortModule ( {
114117 provider : 'openai' ,
115- supportsCodexReasoningEffort : true ,
116118 } )
117119
118120 expect ( standardEffortToOpenAI ( 'max' ) ) . toBe ( 'xhigh' )
@@ -128,7 +130,6 @@ test('e2e: xhigh → persisted max → resolveAppliedEffort → wire xhigh on Op
128130 standardEffortToOpenAI,
129131 } = await importFreshEffortModule ( {
130132 provider : 'openai' ,
131- supportsCodexReasoningEffort : true ,
132133 } )
133134
134135 // Picker writes the OpenAI-shaped value; toPersistableEffort normalizes.
@@ -147,19 +148,16 @@ test('e2e: xhigh → persisted max → resolveAppliedEffort → wire xhigh on Op
147148
148149test ( 'e2e: max on non-Opus Anthropic model still clamps to high' , async ( ) => {
149150 const { resolveAppliedEffort } = await importFreshEffortModule ( {
150- provider : 'firstParty' as unknown as 'openai' ,
151- supportsCodexReasoningEffort : false ,
151+ provider : 'firstParty' ,
152152 } )
153153
154154 expect ( resolveAppliedEffort ( 'claude-sonnet-4-6' , 'max' ) ) . toBe ( 'high' )
155155} )
156156
157-
158157test ( 'getEffortSuffix shows the effective displayed effort for supported models' , async ( ) => {
159158 const { getDisplayedEffortLevel, getEffortSuffix } =
160159 await importFreshEffortModule ( {
161160 provider : 'openai' ,
162- supportsCodexReasoningEffort : true ,
163161 } )
164162
165163 expect ( getEffortSuffix ( 'gpt-5.4' , 'medium' ) ) . toBe ( ' with medium effort' )
@@ -173,7 +171,6 @@ test('getEffortSuffix shows the effective displayed effort for supported models'
173171test ( 'getEffortSuffix stays hidden for models without effort controls' , async ( ) => {
174172 const { getEffortSuffix } = await importFreshEffortModule ( {
175173 provider : 'codex' ,
176- supportsCodexReasoningEffort : false ,
177174 } )
178175
179176 expect ( getEffortSuffix ( 'gpt-5.3-codex-spark' , 'medium' ) ) . toBe ( '' )
0 commit comments