1919
2020
2121PROVIDER_ENV_VAR_MAP = {
22- "google " : "GOOGLE_API_KEY" ,
23- "anthropic " : "ANTHROPIC_API_KEY" ,
24- "chatgpt " : "OPENAI_API_KEY" ,
25- "grok " : "XAI_API_KEY" ,
26- "glama " : "GLAMA_API_KEY" ,
22+ "Google " : "GOOGLE_API_KEY" ,
23+ "Anthropic " : "ANTHROPIC_API_KEY" ,
24+ "OpenAI " : "OPENAI_API_KEY" ,
25+ "xAI " : "XAI_API_KEY" ,
26+ "Glama " : "GLAMA_API_KEY" ,
2727}
2828
2929PROVIDER_API_CONFIG = {
30- "google " : {
30+ "Google " : {
3131 "url_template" : "https://generativelanguage.googleapis.com/v1beta/models?key={api_key}" ,
3232 "method" : "GET" ,
3333 "auth_type" : "query_param" , # Key goes in URL
3434 },
35- "anthropic " : {
35+ "Anthropic " : {
3636 "url" : "https://api.anthropic.com/v1/models" ,
3737 "method" : "GET" ,
3838 "auth_type" : "header" ,
4242 "Content-Type" : "application/json" ,
4343 }
4444 },
45- "chatgpt " : { # OpenAI
45+ "OpenAI " : { # OpenAI
4646 "url" : "https://api.openai.com/v1/models" ,
4747 "method" : "GET" ,
4848 "auth_type" : "bearer" , # Authorization: Bearer {api_key}
4949 },
50- "grok " : { # xAI
50+ "xAI " : { # xAI
5151 "url" : "https://api.x.ai/v1/models" , # Updated to correct xAI URL
5252 "method" : "GET" ,
5353 "auth_type" : "bearer" ,
5454 },
55- "glama " : {
55+ "Glama " : {
5656 "url" : "https://glama.ai/api/gateway/openai/v1/models" , # Glama's specific endpoint
5757 "method" : "GET" ,
5858 "auth_type" : "bearer" ,
@@ -95,7 +95,7 @@ def fetch_and_format_models(provider: str, api_key: str) -> List[Dict[str, str]]
9595
9696 # Parse the response based on provider structure
9797 models_data = []
98- if provider == "google " :
98+ if provider == "Google " :
9999 models_data = data .get ("models" , [])
100100 # Filter and map Google models
101101 formatted_models = [
@@ -106,7 +106,7 @@ def fetch_and_format_models(provider: str, api_key: str) -> List[Dict[str, str]]
106106 for m in models_data
107107 if m .get ("name" ) and "generateContent" in m .get ("supportedGenerationMethods" , [])
108108 ]
109- elif provider == "anthropic " :
109+ elif provider == "Anthropic " :
110110 models_data = data .get ("data" , [])
111111 # Map Anthropic models
112112 formatted_models = [
@@ -189,26 +189,26 @@ def get_models():
189189
190190def create_llm_provider (provider_name : str , api_key : str ):
191191 """Factory function to create LLM provider instances."""
192- if provider_name == "google " :
192+ if provider_name == "Google " :
193193 if not GeminiProvider .validate_api_key (api_key ):
194194 raise ValueError ("Invalid Google API Key" )
195195 return GeminiProvider (api_key )
196- elif provider_name == "chatgpt " : # Assuming maps to OpenAIProvider
196+ elif provider_name == "OpenAI " : # Assuming maps to OpenAIProvider
197197 # Make sure OpenAIProvider exists and follows the pattern
198198 if not OpenAIProvider .validate_api_key (api_key ):
199199 raise ValueError ("Invalid OpenAI API Key" )
200200 return OpenAIProvider (api_key )
201- elif provider_name == "grok " :
201+ elif provider_name == "xAI " :
202202 # Make sure XAIProvider exists and follows the pattern
203203 if not XAIProvider .validate_api_key (api_key ):
204204 raise ValueError ("Invalid xAI API Key" )
205205 return XAIProvider (api_key )
206- elif provider_name == "anthropic " :
206+ elif provider_name == "Anthropic " :
207207 # Make sure AnthropicProvider exists and follows the pattern
208208 if not AnthropicProvider .validate_api_key (api_key ):
209209 raise ValueError ("Invalid Anthropic API Key" )
210210 return AnthropicProvider (api_key )
211- elif provider_name == "glama " : # <-- Add Glama case
211+ elif provider_name == "Glama " : # <-- Add Glama case
212212 if not GlamaProvider .validate_api_key (api_key ):
213213 raise ValueError ("Invalid Glama API Key" )
214214 return GlamaProvider (api_key )
@@ -243,7 +243,7 @@ def chat():
243243
244244 # Handle potential screenshot (currently only for Google Gemini)
245245 image_part = None
246- if screenshot_base64 and provider_name == "google " :
246+ if screenshot_base64 and provider_name == "Google " :
247247 try :
248248 # Ensure correct padding for base64
249249 screenshot_data = screenshot_base64 .split (',' , 1 )[1 ]
@@ -265,13 +265,13 @@ def chat():
265265 return jsonify ({"error" : f"Invalid message format at index { i } : { message } " }), 400
266266
267267 # Adjust roles if needed (e.g., Gemini specific adjustments)
268- if provider_name == "google " :
268+ if provider_name == "Google " :
269269 # Gemini uses 'user' and 'model'. Map 'system' to 'user'.
270270 # The first message can often be 'system', treat it as 'user'.
271271 # Subsequent system messages might need careful handling depending on context.
272272 # Let's map system -> user for simplicity here.
273273 target_role = 'user' if role in ['user' , 'system' ] else 'model'
274- elif provider_name == "glama " or provider_name == "chatgpt " :
274+ elif provider_name == "Glama " or provider_name == "OpenAI " :
275275 # OpenAI/Glama typically use 'user', 'assistant', 'system'
276276 # Keep roles as they are, assuming frontend sends compatible roles
277277 target_role = role
@@ -285,10 +285,10 @@ def chat():
285285 message_dict = {"role" : target_role }
286286
287287 # Add image part to the *last* message if it exists (Gemini logic)
288- if image_part and i == len (messages_input ) - 1 and provider_name == "google " :
288+ if image_part and i == len (messages_input ) - 1 and provider_name == "Google " :
289289 message_dict ["parts" ] = [{"text" : content }, image_part ]
290290 else :
291- if provider_name == "google " :
291+ if provider_name == "Google " :
292292 # Gemini needs parts structure
293293 message_dict ["parts" ] = [{"text" : content }]
294294 # If image is present and it's the last message, add it
@@ -325,7 +325,7 @@ def chat():
325325
326326 # --- Provider Instantiation and Call ---
327327 # Check for Glama required option *before* creating provider
328- if provider_name == "glama " and not options .get ('model' ):
328+ if provider_name == "Glama " and not options .get ('model' ):
329329 return jsonify ({"error" : "Missing 'model' in options for Glama provider" }), 400
330330
331331 llm_provider = create_llm_provider (provider_name , api_key )
0 commit comments