@@ -122,22 +122,43 @@ def select_research_depth() -> int:
122122 return choice
123123
124124
125- def select_shallow_thinking_agent () -> str :
125+ def select_shallow_thinking_agent (provider ) -> str :
126126 """Select shallow thinking llm engine using an interactive selection."""
127127
128128 # Define shallow thinking llm engine options with their corresponding model names
129- SHALLOW_AGENT_OPTIONS = [
130- ("GPT-4o-mini - Fast and efficient for quick tasks" , "gpt-4o-mini" ),
131- ("GPT-4.1-nano - Ultra-lightweight model for basic operations" , "gpt-4.1-nano" ),
132- ("GPT-4.1-mini - Compact model with good performance" , "gpt-4.1-mini" ),
133- ("GPT-4o - Standard model with solid capabilities" , "gpt-4o" ),
134- ]
129+ SHALLOW_AGENT_OPTIONS = {
130+ "openai" : [
131+ ("GPT-4o-mini - Fast and efficient for quick tasks" , "gpt-4o-mini" ),
132+ ("GPT-4.1-nano - Ultra-lightweight model for basic operations" , "gpt-4.1-nano" ),
133+ ("GPT-4.1-mini - Compact model with good performance" , "gpt-4.1-mini" ),
134+ ("GPT-4o - Standard model with solid capabilities" , "gpt-4o" ),
135+ ],
136+ "anthropic" : [
137+ ("Claude Haiku 3.5 - Fast inference and standard capabilities" , "claude-3-5-haiku-latest" ),
138+ ("Claude Sonnet 3.5 - Highly capable standard model" , "claude-3-5-sonnet-latest" ),
139+ ("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities" , "claude-3-7-sonnet-latest" ),
140+ ("Claude Sonnet 4 - High performance and excellent reasoning" , "claude-sonnet-4-0" ),
141+ ],
142+ "google" : [
143+ ("Gemini 2.0 Flash-Lite - Cost efficiency and low latency" , "gemini-2.0-flash-lite" ),
144+ ("Gemini 2.0 Flash - Next generation features, speed, and thinking" , "gemini-2.0-flash" ),
145+ ("Gemini 2.5 Flash - Adaptive thinking, cost efficiency" , "gemini-2.5-flash-preview-05-20" ),
146+ ],
147+ "openrouter" : [
148+ ("Meta: Llama 4 Scout" , "meta-llama/llama-4-scout:free" ),
149+ ("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B" , "meta-llama/llama-3.3-8b-instruct:free" ),
150+ ("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token" , "google/gemini-2.0-flash-exp:free" ),
151+ ],
152+ "ollama" : [
153+ ("llama3.2 local" , "llama3.2" ),
154+ ]
155+ }
135156
136157 choice = questionary .select (
137158 "Select Your [Quick-Thinking LLM Engine]:" ,
138159 choices = [
139160 questionary .Choice (display , value = value )
140- for display , value in SHALLOW_AGENT_OPTIONS
161+ for display , value in SHALLOW_AGENT_OPTIONS [ provider . lower ()]
141162 ],
142163 instruction = "\n - Use arrow keys to navigate\n - Press Enter to select" ,
143164 style = questionary .Style (
@@ -158,25 +179,47 @@ def select_shallow_thinking_agent() -> str:
158179 return choice
159180
160181
161- def select_deep_thinking_agent () -> str :
182+ def select_deep_thinking_agent (provider ) -> str :
162183 """Select deep thinking llm engine using an interactive selection."""
163184
164185 # Define deep thinking llm engine options with their corresponding model names
165- DEEP_AGENT_OPTIONS = [
166- ("GPT-4.1-nano - Ultra-lightweight model for basic operations" , "gpt-4.1-nano" ),
167- ("GPT-4.1-mini - Compact model with good performance" , "gpt-4.1-mini" ),
168- ("GPT-4o - Standard model with solid capabilities" , "gpt-4o" ),
169- ("o4-mini - Specialized reasoning model (compact)" , "o4-mini" ),
170- ("o3-mini - Advanced reasoning model (lightweight)" , "o3-mini" ),
171- ("o3 - Full advanced reasoning model" , "o3" ),
172- ("o1 - Premier reasoning and problem-solving model" , "o1" ),
173- ]
174-
186+ DEEP_AGENT_OPTIONS = {
187+ "openai" : [
188+ ("GPT-4.1-nano - Ultra-lightweight model for basic operations" , "gpt-4.1-nano" ),
189+ ("GPT-4.1-mini - Compact model with good performance" , "gpt-4.1-mini" ),
190+ ("GPT-4o - Standard model with solid capabilities" , "gpt-4o" ),
191+ ("o4-mini - Specialized reasoning model (compact)" , "o4-mini" ),
192+ ("o3-mini - Advanced reasoning model (lightweight)" , "o3-mini" ),
193+ ("o3 - Full advanced reasoning model" , "o3" ),
194+ ("o1 - Premier reasoning and problem-solving model" , "o1" ),
195+ ],
196+ "anthropic" : [
197+ ("Claude Haiku 3.5 - Fast inference and standard capabilities" , "claude-3-5-haiku-latest" ),
198+ ("Claude Sonnet 3.5 - Highly capable standard model" , "claude-3-5-sonnet-latest" ),
199+ ("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities" , "claude-3-7-sonnet-latest" ),
200+ ("Claude Sonnet 4 - High performance and excellent reasoning" , "claude-sonnet-4-0" ),
201+ ("Claude Opus 4 - Most powerful Anthropic model" , " claude-opus-4-0" ),
202+ ],
203+ "google" : [
204+ ("Gemini 2.0 Flash-Lite - Cost efficiency and low latency" , "gemini-2.0-flash-lite" ),
205+ ("Gemini 2.0 Flash - Next generation features, speed, and thinking" , "gemini-2.0-flash" ),
206+ ("Gemini 2.5 Flash - Adaptive thinking, cost efficiency" , "gemini-2.5-flash-preview-05-20" ),
207+ ("Gemini 2.5 Pro" , "gemini-2.5-pro-preview-06-05" ),
208+ ],
209+ "openrouter" : [
210+ ("DeepSeek V3 - a 685B-parameter, mixture-of-experts model" , "deepseek/deepseek-chat-v3-0324:free" ),
211+ ("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team." , "deepseek/deepseek-chat-v3-0324:free" ),
212+ ],
213+ "ollama" : [
214+ ("qwen3" , "qwen3" ),
215+ ]
216+ }
217+
175218 choice = questionary .select (
176219 "Select Your [Deep-Thinking LLM Engine]:" ,
177220 choices = [
178221 questionary .Choice (display , value = value )
179- for display , value in DEEP_AGENT_OPTIONS
222+ for display , value in DEEP_AGENT_OPTIONS [ provider . lower ()]
180223 ],
181224 instruction = "\n - Use arrow keys to navigate\n - Press Enter to select" ,
182225 style = questionary .Style (
@@ -193,3 +236,39 @@ def select_deep_thinking_agent() -> str:
193236 exit (1 )
194237
195238 return choice
239+
240+ def select_llm_provider () -> tuple [str , str ]:
241+ """Select the OpenAI api url using interactive selection."""
242+ # Define OpenAI api options with their corresponding endpoints
243+ BASE_URLS = [
244+ ("OpenAI" , "https://api.openai.com/v1" ),
245+ ("Anthropic" , "https://api.anthropic.com/" ),
246+ ("Google" , "https://generativelanguage.googleapis.com/v1" ),
247+ ("Openrouter" , "https://openrouter.ai/api/v1" ),
248+ ("Ollama" , "http://localhost:11434/v1" ),
249+ ]
250+
251+ choice = questionary .select (
252+ "Select your LLM Provider:" ,
253+ choices = [
254+ questionary .Choice (display , value = (display , value ))
255+ for display , value in BASE_URLS
256+ ],
257+ instruction = "\n - Use arrow keys to navigate\n - Press Enter to select" ,
258+ style = questionary .Style (
259+ [
260+ ("selected" , "fg:magenta noinherit" ),
261+ ("highlighted" , "fg:magenta noinherit" ),
262+ ("pointer" , "fg:magenta noinherit" ),
263+ ]
264+ ),
265+ ).ask ()
266+
267+ if choice is None :
268+ console .print ("\n [red]no OpenAI backend selected. Exiting...[/red]" )
269+ exit (1 )
270+
271+ display_name , url = choice
272+ print (f"You selected: { display_name } \t URL: { url } " )
273+
274+ return display_name , url
0 commit comments