Skip to content

Commit e1e1254

Browse files
authored
add gemini models (#415)
Signed-off-by: Trevor Grant <[email protected]>
1 parent 2d84a6a commit e1e1254

File tree

4 files changed

+372
-3
lines changed

4 files changed

+372
-3
lines changed

webapp/packages/api/user-service/agent_factory/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,20 @@ async def generate_agent_code(request: GenerateCodeRequest):
110110
model = request.composer_model_config.model
111111
provider = request.composer_model_config.provider
112112

113+
config = request.composer_model_config.parameters.copy() # Make a copy to avoid modifying the original request object
114+
115+
# --- Special handling for Gemini tools ---
116+
if provider == "gemini" and "tools" in config:
117+
selected_gemini_tools = config["tools"]
118+
if isinstance(selected_gemini_tools, list) and all(isinstance(t, str) for t in selected_gemini_tools):
119+
# Transform ["toolName"] to [{"toolName": {}}]
120+
config["tools"] = [{tool_name: {}} for tool_name in selected_gemini_tools]
121+
else:
122+
# If it's not the expected list of strings, log a warning
123+
print(f"Warning: Gemini tools parameter has unexpected format: {selected_gemini_tools}. Skipping transformation.")
124+
# Optionally, remove it if it's malformed to prevent errors in litellm
125+
# del config["tools"]
126+
113127
# ---- Code Generation Task ----
114128
code_gen_messages = [
115129
{"role": "system", "content": system_prompt},
Lines changed: 313 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,313 @@
1+
2+
3+
models = {
4+
"gemini-2.5-pro": {
5+
"parameters": {
6+
"temperature": {
7+
"type": "float",
8+
"default": 1.0,
9+
"min": 0.0,
10+
"max": 2.0,
11+
"description": "Temperature - Controls randomness"
12+
},
13+
# "top_p": {
14+
# "type": "float",
15+
# "default": None,
16+
# "min": 0.0,
17+
# "max": 1.0,
18+
# "description": "TopP - Nucleus sampling"
19+
# },
20+
# "max_tokens": { #TODO: Verify max tokens for Gemini
21+
# "type": "integer",
22+
# "default": None,
23+
# "min": 1,
24+
# "max": 1048576,
25+
# "description": "Maximum tokens in response"
26+
# },
27+
# "max_ completion_tokens": {
28+
# "type": "integer",
29+
# "default": 65536,
30+
# "min": 1,
31+
# "max": 65536,
32+
# "description": "Maximum completion tokens"
33+
# },
34+
# "tools": {
35+
# "type": "list_choice",
36+
# "default": [],
37+
# "choices": ["googleSearch", "googleMaps", "urlContext", "codeExecution"],
38+
# "description": "Gemini Tools to enable during generation"
39+
# },
40+
# "tool_choice": { # if you don't want it calling tools, don't give it any tools...
41+
# "type": "choice",
42+
# "default": "auto",
43+
# "choices": ["auto", "none"],
44+
# "description": "Tool selection mode"
45+
# },
46+
47+
# "response_format": { # Need to set response_format: {"type": ...} in request body
48+
# "type": "choice",
49+
# "default": "text",
50+
# "choices": ["text", "json_object"],
51+
# "description": "Format of the response"
52+
# },
53+
# "n" : {
54+
# "type": "integer",
55+
# "default": 1,
56+
# "min": 1,
57+
# "max": 5,
58+
# "description": "Number of completions to generate"
59+
# },
60+
# "stop": {
61+
# "type": "list_string",
62+
# "default": [],
63+
# "description": "Sequences where the API will stop generating."
64+
# },
65+
# "logprobs": {
66+
# "type": "integer",
67+
# "default": 0,
68+
# "min": 0,
69+
# "max": 5,
70+
# "description": "Include logprobs on the logprobs most likely tokens"
71+
# },
72+
# "frequency_penalty": {
73+
# "type": "float",
74+
# "default": 0.0,
75+
# "min": -2.0,
76+
# "max": 2.0,
77+
# "description": "Penalty for token repetition"
78+
# },
79+
# "modalities": {
80+
# "type": "list_choice",
81+
# "default": ["text"],
82+
# "choices": ["text", "image", "video", "audio"],
83+
# "description": "Modalities to include in generation"
84+
# },
85+
# "parallel_tool_calls": {
86+
# "type": "boolean",
87+
# "default": False,
88+
# "description": "Enable parallel tool calls"
89+
# },
90+
# "web_search_options": {
91+
# "type": "object",
92+
# "default": {
93+
# "num_results": 3,
94+
# "region": "us",
95+
# "language": "en"
96+
# },
97+
# "description": "Options for web search tool"
98+
# },
99+
"reasoning_effort": {
100+
"type": "choice",
101+
"default": "disable",
102+
"choices": ["disable", "low", "medium", "high"],
103+
"description": "Reasoning Effort: Effort level for reasoning during generation"
104+
},
105+
}
106+
},
107+
"gemini-2.5-flash": {
108+
"parameters": {
109+
"temperature": {
110+
"type": "float",
111+
"default": 1.0,
112+
"min": 0.0,
113+
"max": 2.0,
114+
"description": "Temperature - Controls randomness",
115+
},
116+
# "top_p": {
117+
# "type": "float",
118+
# "default": None,
119+
# "min": 0.0,
120+
# "max": 1.0,
121+
# "description": "TopP - Nucleus sampling"
122+
# },
123+
# "max_tokens": { #TODO: Verify max tokens for Gemini
124+
# "type": "integer",
125+
# "default": None,
126+
# "min": 1,
127+
# "max": 1048576,
128+
# "description": "Maximum tokens in response"
129+
# },
130+
# "max_ completion_tokens": {
131+
# "type": "integer",
132+
# "default": 65536,
133+
# "min": 1,
134+
# "max": 65536,
135+
# "description": "Maximum completion tokens"
136+
# },
137+
# "tools": {
138+
# "type": "list_choice",
139+
# "default": [],
140+
# "choices": ["googleSearch", "googleMaps", "urlContext", "codeExecution"],
141+
# "description": "Gemini Tools to enable during generation"
142+
# },
143+
# "tool_choice": { # if you don't want it calling tools, don't give it any tools...
144+
# "type": "choice",
145+
# "default": "auto",
146+
# "choices": ["auto", "none"],
147+
# "description": "Tool selection mode"
148+
# },
149+
150+
# "response_format": { # Need to set response_format: {"type": ...} in request body
151+
# "type": "choice",
152+
# "default": "text",
153+
# "choices": ["text", "json_object"],
154+
# "description": "Format of the response"
155+
# },
156+
# "n" : {
157+
# "type": "integer",
158+
# "default": 1,
159+
# "min": 1,
160+
# "max": 5,
161+
# "description": "Number of completions to generate"
162+
# },
163+
# "stop": {
164+
# "type": "list_string",
165+
# "default": [],
166+
# "description": "Sequences where the API will stop generating."
167+
# },
168+
# "logprobs": {
169+
# "type": "integer",
170+
# "default": 0,
171+
# "min": 0,
172+
# "max": 5,
173+
# "description": "Include logprobs on the logprobs most likely tokens"
174+
# },
175+
# "frequency_penalty": {
176+
# "type": "float",
177+
# "default": 0.0,
178+
# "min": -2.0,
179+
# "max": 2.0,
180+
# "description": "Penalty for token repetition"
181+
# },
182+
# "modalities": {
183+
# "type": "list_choice",
184+
# "default": ["text"],
185+
# "choices": ["text", "image", "video", "audio"],
186+
# "description": "Modalities to include in generation"
187+
# },
188+
# "parallel_tool_calls": {
189+
# "type": "boolean",
190+
# "default": False,
191+
# "description": "Enable parallel tool calls"
192+
# },
193+
# "web_search_options": {
194+
# "type": "object",
195+
# "default": {
196+
# "num_results": 3,
197+
# "region": "us",
198+
# "language": "en"
199+
# },
200+
# "description": "Options for web search tool"
201+
# },
202+
"reasoning_effort": {
203+
"type": "choice",
204+
"default": "disable",
205+
"choices": ["disable", "low", "medium", "high"],
206+
"description": "Reasoning Effort: Effort level for reasoning during generation"
207+
},
208+
}
209+
},
210+
"gemini-2.5-flash-lite": {
211+
"parameters": {
212+
"temperature": {
213+
"type": "float",
214+
"default": 1.0,
215+
"min": 0.0,
216+
"max": 2.0,
217+
"description": "Temperature - Controls randomness"
218+
},
219+
# "top_p": {
220+
# "type": "float",
221+
# "default": None,
222+
# "min": 0.0,
223+
# "max": 1.0,
224+
# "description": "TopP - Nucleus sampling"
225+
# },
226+
# "max_tokens": { #TODO: Verify max tokens for Gemini
227+
# "type": "integer",
228+
# "default": None,
229+
# "min": 1,
230+
# "max": 1048576,
231+
# "description": "Maximum tokens in response"
232+
# },
233+
# "max_ completion_tokens": {
234+
# "type": "integer",
235+
# "default": 65536,
236+
# "min": 1,
237+
# "max": 65536,
238+
# "description": "Maximum completion tokens"
239+
# },
240+
# "tools": {
241+
# "type": "list_choice",
242+
# "default": [],
243+
# "choices": ["googleSearch", "googleMaps", "urlContext", "codeExecution"],
244+
# "description": "Gemini Tools to enable during generation"
245+
# },
246+
# "tool_choice": { # if you don't want it calling tools, don't give it any tools...
247+
# "type": "choice",
248+
# "default": "auto",
249+
# "choices": ["auto", "none"],
250+
# "description": "Tool selection mode"
251+
# },
252+
253+
# "response_format": { # Need to set response_format: {"type": ...} in request body
254+
# "type": "choice",
255+
# "default": "text",
256+
# "choices": ["text", "json_object"],
257+
# "description": "Format of the response"
258+
# },
259+
# "n" : {
260+
# "type": "integer",
261+
# "default": 1,
262+
# "min": 1,
263+
# "max": 5,
264+
# "description": "Number of completions to generate"
265+
# },
266+
# "stop": {
267+
# "type": "list_string",
268+
# "default": [],
269+
# "description": "Sequences where the API will stop generating."
270+
# },
271+
# "logprobs": {
272+
# "type": "integer",
273+
# "default": 0,
274+
# "min": 0,
275+
# "max": 5,
276+
# "description": "Include logprobs on the logprobs most likely tokens"
277+
# },
278+
# "frequency_penalty": {
279+
# "type": "float",
280+
# "default": 0.0,
281+
# "min": -2.0,
282+
# "max": 2.0,
283+
# "description": "Penalty for token repetition"
284+
# },
285+
# "modalities": {
286+
# "type": "list_choice",
287+
# "default": ["text"],
288+
# "choices": ["text", "image", "video", "audio"],
289+
# "description": "Modalities to include in generation"
290+
# },
291+
# "parallel_tool_calls": {
292+
# "type": "boolean",
293+
# "default": False,
294+
# "description": "Enable parallel tool calls"
295+
# },
296+
# "web_search_options": {
297+
# "type": "object",
298+
# "default": {
299+
# "num_results": 3,
300+
# "region": "us",
301+
# "language": "en"
302+
# },
303+
# "description": "Options for web search tool"
304+
# },
305+
"reasoning_effort": {
306+
"type": "choice",
307+
"default": "disable",
308+
"choices": ["disable", "low", "medium", "high"],
309+
"description": "Reasoning Effort: Effort level for reasoning during generation"
310+
},
311+
}
312+
}
313+
}

webapp/packages/api/user-service/config/provider_config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11

2+
from .gemini import models as gemini_models
3+
24
PROVIDER_CONFIG = {
35
"openai": {
46
"api_key_env_var": "OPENAI_API_KEY",

0 commit comments

Comments
 (0)