Skip to content

Commit d7cdd9b

Browse files
committed
Fix naming of the LLM providers
1 parent f93e880 commit d7cdd9b

File tree

3 files changed

+36
-36
lines changed

3 files changed

+36
-36
lines changed

backend/main.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,20 @@
1919

2020

2121
PROVIDER_ENV_VAR_MAP = {
22-
"google": "GOOGLE_API_KEY",
23-
"anthropic": "ANTHROPIC_API_KEY",
24-
"chatgpt": "OPENAI_API_KEY",
25-
"grok": "XAI_API_KEY",
26-
"glama": "GLAMA_API_KEY",
22+
"Google": "GOOGLE_API_KEY",
23+
"Anthropic": "ANTHROPIC_API_KEY",
24+
"OpenAI": "OPENAI_API_KEY",
25+
"xAI": "XAI_API_KEY",
26+
"Glama": "GLAMA_API_KEY",
2727
}
2828

2929
PROVIDER_API_CONFIG = {
30-
"google": {
30+
"Google": {
3131
"url_template": "https://generativelanguage.googleapis.com/v1beta/models?key={api_key}",
3232
"method": "GET",
3333
"auth_type": "query_param", # Key goes in URL
3434
},
35-
"anthropic": {
35+
"Anthropic": {
3636
"url": "https://api.anthropic.com/v1/models",
3737
"method": "GET",
3838
"auth_type": "header",
@@ -42,17 +42,17 @@
4242
"Content-Type": "application/json",
4343
}
4444
},
45-
"chatgpt": { # OpenAI
45+
"OpenAI": { # OpenAI
4646
"url": "https://api.openai.com/v1/models",
4747
"method": "GET",
4848
"auth_type": "bearer", # Authorization: Bearer {api_key}
4949
},
50-
"grok": { # xAI
50+
"xAI": { # xAI
5151
"url": "https://api.x.ai/v1/models", # Updated to correct xAI URL
5252
"method": "GET",
5353
"auth_type": "bearer",
5454
},
55-
"glama": {
55+
"Glama": {
5656
"url": "https://glama.ai/api/gateway/openai/v1/models", # Glama's specific endpoint
5757
"method": "GET",
5858
"auth_type": "bearer",
@@ -95,7 +95,7 @@ def fetch_and_format_models(provider: str, api_key: str) -> List[Dict[str, str]]
9595

9696
# Parse the response based on provider structure
9797
models_data = []
98-
if provider == "google":
98+
if provider == "Google":
9999
models_data = data.get("models", [])
100100
# Filter and map Google models
101101
formatted_models = [
@@ -106,7 +106,7 @@ def fetch_and_format_models(provider: str, api_key: str) -> List[Dict[str, str]]
106106
for m in models_data
107107
if m.get("name") and "generateContent" in m.get("supportedGenerationMethods", [])
108108
]
109-
elif provider == "anthropic":
109+
elif provider == "Anthropic":
110110
models_data = data.get("data", [])
111111
# Map Anthropic models
112112
formatted_models = [
@@ -189,26 +189,26 @@ def get_models():
189189

190190
def create_llm_provider(provider_name: str, api_key: str):
191191
"""Factory function to create LLM provider instances."""
192-
if provider_name == "google":
192+
if provider_name == "Google":
193193
if not GeminiProvider.validate_api_key(api_key):
194194
raise ValueError("Invalid Google API Key")
195195
return GeminiProvider(api_key)
196-
elif provider_name == "chatgpt": # Assuming maps to OpenAIProvider
196+
elif provider_name == "OpenAI": # Assuming maps to OpenAIProvider
197197
# Make sure OpenAIProvider exists and follows the pattern
198198
if not OpenAIProvider.validate_api_key(api_key):
199199
raise ValueError("Invalid OpenAI API Key")
200200
return OpenAIProvider(api_key)
201-
elif provider_name == "grok":
201+
elif provider_name == "xAI":
202202
# Make sure XAIProvider exists and follows the pattern
203203
if not XAIProvider.validate_api_key(api_key):
204204
raise ValueError("Invalid xAI API Key")
205205
return XAIProvider(api_key)
206-
elif provider_name == "anthropic":
206+
elif provider_name == "Anthropic":
207207
# Make sure AnthropicProvider exists and follows the pattern
208208
if not AnthropicProvider.validate_api_key(api_key):
209209
raise ValueError("Invalid Anthropic API Key")
210210
return AnthropicProvider(api_key)
211-
elif provider_name == "glama": # <-- Add Glama case
211+
elif provider_name == "Glama": # <-- Add Glama case
212212
if not GlamaProvider.validate_api_key(api_key):
213213
raise ValueError("Invalid Glama API Key")
214214
return GlamaProvider(api_key)
@@ -243,7 +243,7 @@ def chat():
243243

244244
# Handle potential screenshot (currently only for Google Gemini)
245245
image_part = None
246-
if screenshot_base64 and provider_name == "google":
246+
if screenshot_base64 and provider_name == "Google":
247247
try:
248248
# Ensure correct padding for base64
249249
screenshot_data = screenshot_base64.split(',', 1)[1]
@@ -265,13 +265,13 @@ def chat():
265265
return jsonify({"error": f"Invalid message format at index {i}: {message}"}), 400
266266

267267
# Adjust roles if needed (e.g., Gemini specific adjustments)
268-
if provider_name == "google":
268+
if provider_name == "Google":
269269
# Gemini uses 'user' and 'model'. Map 'system' to 'user'.
270270
# The first message can often be 'system', treat it as 'user'.
271271
# Subsequent system messages might need careful handling depending on context.
272272
# Let's map system -> user for simplicity here.
273273
target_role = 'user' if role in ['user', 'system'] else 'model'
274-
elif provider_name == "glama" or provider_name == "chatgpt":
274+
elif provider_name == "Glama" or provider_name == "OpenAI":
275275
# OpenAI/Glama typically use 'user', 'assistant', 'system'
276276
# Keep roles as they are, assuming frontend sends compatible roles
277277
target_role = role
@@ -285,10 +285,10 @@ def chat():
285285
message_dict = {"role": target_role}
286286

287287
# Add image part to the *last* message if it exists (Gemini logic)
288-
if image_part and i == len(messages_input) - 1 and provider_name == "google":
288+
if image_part and i == len(messages_input) - 1 and provider_name == "Google":
289289
message_dict["parts"] = [{"text": content}, image_part]
290290
else:
291-
if provider_name == "google":
291+
if provider_name == "Google":
292292
# Gemini needs parts structure
293293
message_dict["parts"] = [{"text": content}]
294294
# If image is present and it's the last message, add it
@@ -325,7 +325,7 @@ def chat():
325325

326326
# --- Provider Instantiation and Call ---
327327
# Check for Glama required option *before* creating provider
328-
if provider_name == "glama" and not options.get('model'):
328+
if provider_name == "Glama" and not options.get('model'):
329329
return jsonify({"error": "Missing 'model' in options for Glama provider"}), 400
330330

331331
llm_provider = create_llm_provider(provider_name, api_key)

provisioning/dashboards/dashboard.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,12 @@
3939
"id": 1,
4040
"options": {
4141
"apiKey": "",
42-
"backendAddr": "localhost",
42+
"backendAddr": "http://localhost:5000",
4343
"context": "",
4444
"controlEndpointHeaders": "",
4545
"controlEndpointMethod": "POST",
4646
"controlEndpointUrl": "",
47-
"llmProvider": "google",
47+
"llmProvider": "Google",
4848
"model": ""
4949
},
5050
"pluginVersion": "1.0.0",

src/module.ts

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ const fetchModelsFromBackend = async (
1111

1212
// Basic validation before calling backend
1313
if (!provider) { return [{ label: 'Select a provider', value: '' }]; }
14-
if (!apiKey) { return [{ label: 'Enter API Key', value: '' }]; }
14+
// if (!apiKey) { return [{ label: 'Enter API Key', value: '' }]; } No longer required as backend handles it
1515
if (!backendAddr) { return [{ label: 'Enter Backend Address', value: '', description: 'Required to fetch models' }]; }
1616

1717
// Construct URL to your backend's new /models endpoint
@@ -71,34 +71,34 @@ const fetchModelsFromBackend = async (
7171
};
7272

7373
export const plugin = new PanelPlugin<SimpleOptions>(LLMPanel).setPanelOptions((builder, context) => {
74-
const providersWithModels = ['glama', 'google', 'chatgpt', 'grok', 'anthropic'];
74+
const providersWithModels = ['Glama', 'Google', 'OpenAI', 'xAI', 'Anthropic'];
7575

7676
return builder
7777
.addTextInput({
7878
path: 'backendAddr',
7979
name: 'Backend Address',
8080
description: 'Enter the address of the backend server. (e.g, http://localhost:5000)',
81-
defaultValue: 'localhost', // Default to localhost
81+
defaultValue: 'http://localhost:5000',
8282
})
8383
.addSelect({
8484
path: 'llmProvider',
8585
name: 'API Provider',
8686
description: 'Select the API Provider',
8787
settings: {
8888
options: [
89-
{ label: 'Glama', value: 'glama' },
90-
{ label: 'xAI (Grok)', value: 'grok' },
91-
{ label: 'OpenAI', value: 'chatgpt' },
92-
{ label: 'Google', value: 'google' },
93-
{ label: 'Anthropic', value: 'anthropic' },
89+
{ label: 'Glama', value: 'Glama' },
90+
{ label: 'xAI (Grok)', value: 'xAI' },
91+
{ label: 'OpenAI', value: 'OpenAI' },
92+
{ label: 'Google', value: 'Google' },
93+
{ label: 'Anthropic', value: 'Anthropic' },
9494
],
9595
},
96-
defaultValue: 'google',
96+
defaultValue: 'Google',
9797
})
9898
.addTextInput({
9999
path: 'apiKey',
100-
name: 'API Key',
101-
description: 'Enter your API key',
100+
name: 'API Key (Optinoal)',
101+
description: 'Leave blank to use the environment variables in the backend (Recommended). Keys entered here are not secure and are accessible by other users of the dashboard. Only use for testing purposes.',
102102
defaultValue: '',
103103
settings: {
104104
secure: true,

0 commit comments

Comments
 (0)