Skip to content

Commit 7def95e

Browse files
committed
feat(models): update Gemini model context windows and output limits
1 parent f6a4455 commit 7def95e

1 file changed

Lines changed: 8 additions & 4 deletions

File tree

src/utils/model/openaiContextWindows.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,11 @@ const OPENAI_CONTEXT_WINDOWS: Record<string, number> = {
123123
'google/gemini-2.5-pro': 1_048_576,
124124

125125
// Google (native via CLAUDE_CODE_USE_GEMINI)
126-
'gemini-2.0-flash': 1_048_576,
127-
'gemini-2.5-pro': 1_048_576,
128-
'gemini-2.5-flash': 1_048_576,
126+
'gemini-2.0-flash': 1_048_576,
127+
'gemini-2.5-flash': 1_048_576,
128+
'gemini-2.5-pro': 1_048_576,
129+
'gemini-3-flash': 1_048_576,
130+
'gemini-3.1-pro': 1_048_576,
129131

130132
// Ollama local models
131133
// Llama 3.1+ models support 128k context natively (Meta official specs).
@@ -253,8 +255,10 @@ const OPENAI_MAX_OUTPUT_TOKENS: Record<string, number> = {
253255

254256
// Google (native via CLAUDE_CODE_USE_GEMINI)
255257
'gemini-2.0-flash': 8_192,
256-
'gemini-2.5-pro': 65_536,
257258
'gemini-2.5-flash': 65_536,
259+
'gemini-2.5-pro': 65_536,
260+
'gemini-3-flash': 65_536,
261+
'gemini-3.1-pro': 65_536,
258262

259263
// Ollama local models (conservative safe defaults)
260264
'llama3.3:70b': 4_096,

0 commit comments

Comments
 (0)