-
Notifications
You must be signed in to change notification settings - Fork 539
Multiple LLM model selection in DashBot #704
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 1 commit
8caa03c
c15e819
87b4d03
5bccded
08487c4
157969f
19da75e
906843f
a440308
e219891
b01b1ba
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,24 +1,64 @@ | ||
import 'package:apidash/dashbot/features/debug.dart'; | ||
import 'package:ollama_dart/ollama_dart.dart'; | ||
import 'package:openai_dart/openai_dart.dart'; | ||
import 'package:flutter_gemini/flutter_gemini.dart'; | ||
import '../../consts.dart'; | ||
import '../features/explain.dart'; | ||
import 'package:apidash/models/request_model.dart'; | ||
|
||
|
||
class DashBotService { | ||
final OllamaClient _client; | ||
late final OllamaClient _ollamaClient; | ||
late final OpenAIClient _openAiClient; | ||
late final ExplainFeature _explainFeature; | ||
late final DebugFeature _debugFeature; | ||
|
||
LLMProvider _selectedModel = LLMProvider.ollama; | ||
|
||
DashBotService() | ||
: _client = OllamaClient(baseUrl: 'http://127.0.0.1:11434/api') { | ||
: _ollamaClient = OllamaClient(baseUrl: 'http://127.0.0.1:11434/api'), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add provision to change baseURL via LLMProvider settings |
||
//TODO: Add API key to .env file | ||
_openAiClient = OpenAIClient(apiKey: "your_openai_api_key") { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add provision to add API Key via LLMProvider settings |
||
_explainFeature = ExplainFeature(this); | ||
_debugFeature = DebugFeature(this); | ||
} | ||
|
||
void setModel(LLMProvider model) { | ||
_selectedModel = model; | ||
} | ||
|
||
Future<String> generateResponse(String prompt) async { | ||
final response = await _client.generateCompletion( | ||
request: GenerateCompletionRequest(model: 'llama3.2:3b', prompt: prompt), | ||
); | ||
return response.response.toString(); | ||
try { | ||
switch (_selectedModel) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. LLMProvider != model |
||
case LLMProvider.gemini: | ||
final response = await Gemini.instance.chat([ | ||
Content(parts: [Part.text(prompt)], role: 'user') | ||
]); | ||
return response?.output ?? "Error: No response from Gemini."; | ||
|
||
case LLMProvider.ollama: | ||
final response = await _ollamaClient.generateCompletion( | ||
request: GenerateCompletionRequest(model: 'llama3.2:3b', prompt: prompt), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. llama3.2:3b is the model. Add provision to change it via LLMProvider settings based on list of installed models in the system |
||
); | ||
return response.response.toString(); | ||
|
||
case LLMProvider.openai: | ||
final response = await _openAiClient.createChatCompletion( | ||
request: CreateChatCompletionRequest( | ||
model: ChatCompletionModel.modelId('gpt-4o'), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. gpt-4o is the model. Add provision to change it via LLMProvider settings based on list of available openai models. |
||
messages: [ | ||
ChatCompletionMessage.user( | ||
content: ChatCompletionUserMessageContent.string(prompt), | ||
), | ||
], | ||
temperature: 0, | ||
), | ||
); | ||
return response.choices?.first.message?.content ?? "Error: No response from OpenAI."; | ||
} | ||
} catch (e) { | ||
return "Error: ${e.toString()}"; | ||
} | ||
} | ||
|
||
Future<String> handleRequest( | ||
|
@@ -33,4 +73,4 @@ class DashBotService { | |
|
||
return generateResponse(input); | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
import 'package:apidash_design_system/apidash_design_system.dart'; | ||
import 'package:flutter/material.dart'; | ||
import 'package:flutter_gemini/flutter_gemini.dart'; | ||
import 'package:flutter_riverpod/flutter_riverpod.dart'; | ||
import 'models/models.dart'; | ||
import 'providers/providers.dart'; | ||
|
@@ -9,6 +10,8 @@ import 'app.dart'; | |
|
||
void main() async { | ||
WidgetsFlutterBinding.ensureInitialized(); | ||
//TODO: Add API key to .env file | ||
Gemini.init(apiKey: "apiKey"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Gemini.init cannot happen until user adds an API key via LLMProvider settings |
||
var settingsModel = await getSettingsFromSharedPrefs(); | ||
final initStatus = await initApp( | ||
kIsDesktop, | ||
|
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Each LLMProvider should have some sort of setting where the user can click on a small settings button to change the parameters like API URL, model, etc.