44from langchain .prompts import PromptTemplate
55from langchain_core .output_parsers import StrOutputParser
66
7+ LLM_CMD_LINE_ARGS = {}
8+
79LLM_GPT35 = "GPT3.5"
810LLM_GPT4 = "GPT4"
911LLM_GPT4o = "GPT4o"
12+
1013LLM_LLAMA2 = "LLAMA2"
14+ LLM_CMD_LINE_ARGS ["-l" ] = LLM_LLAMA2
1115LLM_GEMINI = "gemini-2.0-flash"
16+ LLM_CMD_LINE_ARGS ["-g" ] = LLM_GEMINI
1217LLM_AI21 = "AI21"
18+ LLM_CMD_LINE_ARGS ["-a" ] = LLM_AI21
1319LLM_CLAUDE37 = "claude-3-7-sonnet-20250219"
20+ LLM_CMD_LINE_ARGS ["-c" ] = LLM_CLAUDE37
1421LLM_COHERE = "Cohere"
22+ LLM_CMD_LINE_ARGS ["-co" ] = LLM_COHERE
23+
1524LLM_OLLAMA_LLAMA32 = "Ollama:llama3.2"
25+ LLM_CMD_LINE_ARGS ["-o-l32" ] = LLM_OLLAMA_LLAMA32
1626LLM_OLLAMA_LLAMA32_1B = "Ollama:llama3.2:1b"
27+ LLM_CMD_LINE_ARGS ["-o-l321b" ] = LLM_OLLAMA_LLAMA32_1B
1728LLM_OLLAMA_MISTRAL = "Ollama:mistral"
29+ LLM_CMD_LINE_ARGS ["-o-m" ] = LLM_OLLAMA_MISTRAL
1830LLM_OLLAMA_TINYLLAMA = "Ollama:tinyllama"
19-
31+ LLM_CMD_LINE_ARGS [ "-o-t" ] = LLM_OLLAMA_TINYLLAMA
2032LLM_OLLAMA_PHI3 = "Ollama:phi3:latest"
33+ LLM_CMD_LINE_ARGS ["-o-phi3" ] = LLM_OLLAMA_PHI3
2134LLM_OLLAMA_PHI4 = "Ollama:phi4:latest"
35+ LLM_CMD_LINE_ARGS ["-o-phi4" ] = LLM_OLLAMA_PHI4
36+ LLM_OLLAMA_GEMMA = "Ollama:gemma:7b"
37+ LLM_CMD_LINE_ARGS ["-ge" ] = LLM_OLLAMA_GEMMA
2238LLM_OLLAMA_GEMMA2 = "Ollama:gemma2:latest"
39+ LLM_CMD_LINE_ARGS ["-ge2" ] = LLM_OLLAMA_GEMMA2
2340LLM_OLLAMA_GEMMA3 = "Ollama:gemma3:4b"
41+ LLM_CMD_LINE_ARGS ["-ge3" ] = LLM_OLLAMA_GEMMA3
2442LLM_OLLAMA_DEEPSEEK = "Ollama:deepseek-r1:7b"
25- LLM_OLLAMA_GEMMA = "Ollama:gemma:7b"
43+ LLM_CMD_LINE_ARGS [ "-o-dsr1" ] = LLM_OLLAMA_DEEPSEEK
2644LLM_OLLAMA_QWEN = "Ollama:qwen:4b"
45+ LLM_CMD_LINE_ARGS ["-qw" ] = LLM_OLLAMA_QWEN
2746LLM_OLLAMA_CODELLAMA = "Ollama:codellama:latest"
2847LLM_OLLAMA_FALCON2 = "Ollama:falcon2:latest"
2948
@@ -99,17 +118,14 @@ def get_cohere_key():
99118
100119
101120def get_llm (llm_ver , temperature ):
102- print (f"Debug: get_llm received { llm_ver } " )
103121 if llm_ver == LLM_GPT35 :
104- print ("Debug: Using GPT-3.5" )
105122 from langchain_openai import OpenAI
106123
107124 return OpenAI (temperature = temperature , openai_api_key = get_openai_api_key ())
108125
109126 elif llm_ver == LLM_GPT4 :
110127 from langchain_openai import ChatOpenAI
111128
112- print ("Debug: Using GPT-4" )
113129 return ChatOpenAI (
114130 model_name = "gpt-4" ,
115131 openai_api_key = get_openai_api_key (),
@@ -118,7 +134,6 @@ def get_llm(llm_ver, temperature):
118134 elif llm_ver == LLM_GPT4o :
119135 from langchain_openai import ChatOpenAI
120136
121- print ("Debug: Using GPT-4o" )
122137 return ChatOpenAI (
123138 model_name = "gpt-4o" ,
124139 openai_api_key = get_openai_api_key (),
@@ -144,7 +159,6 @@ def get_llm(llm_ver, temperature):
144159 elif llm_ver == LLM_GEMINI :
145160 from langchain_google_genai import ChatGoogleGenerativeAI
146161
147- print ("Debug: Using Gemini Flash" )
148162 return ChatGoogleGenerativeAI (
149163 model = "gemini-2.0-flash" ,
150164 google_api_key = get_gemini_api_key (), # Retrieve API key
@@ -159,7 +173,6 @@ def get_llm(llm_ver, temperature):
159173 elif llm_ver == LLM_CLAUDE37 :
160174 from langchain_anthropic import ChatAnthropic
161175
162- print ("Debug: Using Claude 3.7 Sonnet" )
163176 return ChatAnthropic (
164177 model_name = "claude-3-7-sonnet-20250219" ,
165178 anthropic_api_key = get_anthropic_key (), # Retrieve API key
@@ -171,75 +184,23 @@ def get_llm(llm_ver, temperature):
171184
172185 llm = ChatCohere ()
173186
174- elif llm_ver == LLM_OLLAMA_LLAMA32_1B :
175- from langchain_ollama .llms import OllamaLLM
176-
177- llm = OllamaLLM (model = "llama3.2:1b" )
178-
179- elif llm_ver == LLM_OLLAMA_MISTRAL :
180- from langchain_ollama .llms import OllamaLLM
181-
182- llm = OllamaLLM (model = "mistral" )
183-
184- elif llm_ver == LLM_OLLAMA_TINYLLAMA :
185- from langchain_ollama .llms import OllamaLLM
186-
187- llm = OllamaLLM (model = "tinyllama" )
188-
189- elif llm_ver == LLM_OLLAMA_PHI3 :
190- from langchain_ollama .llms import OllamaLLM
191-
192- print ("Debug: Using Phi-3" )
193- llm = OllamaLLM (model = "phi3:latest" , temperature = temperature )
194-
195- elif llm_ver == LLM_OLLAMA_PHI4 :
196- from langchain_ollama .llms import OllamaLLM
197-
198- print ("Debug: Using Phi-4" )
199- llm = OllamaLLM (model = "phi4:latest" , temperature = temperature )
200-
201- elif llm_ver == LLM_OLLAMA_GEMMA2 :
202- from langchain_ollama .llms import OllamaLLM
203-
204- print ("Debug: Using Gemma-2" )
205- llm = OllamaLLM (model = "gemma2:latest" , temperature = temperature )
206-
207- elif llm_ver == LLM_OLLAMA_GEMMA3 :
208- from langchain_ollama .llms import OllamaLLM
209-
210- print ("Debug: Using Gemma-3" )
211- llm = OllamaLLM (model = "gemma3:4b" , temperature = temperature )
212-
213- elif llm_ver == LLM_OLLAMA_DEEPSEEK :
214- from langchain_ollama .llms import OllamaLLM
215-
216- print ("Debug: Using DeepSeek" )
217- return OllamaLLM (model = "deepseek-r1:7b" , temperature = temperature )
218-
219- elif llm_ver == LLM_OLLAMA_GEMMA :
220- from langchain_ollama .llms import OllamaLLM
221-
222- print ("Debug: Using Gemma" )
223- return OllamaLLM (model = "gemma:7b" , temperature = temperature )
224-
225- elif llm_ver == LLM_OLLAMA_QWEN :
226- from langchain_ollama .llms import OllamaLLM
227-
228- print ("Debug: Using Qwen" )
229- return OllamaLLM (model = "qwen:4b" , temperature = temperature )
230-
231- elif llm_ver == LLM_OLLAMA_CODELLAMA :
232- from langchain_ollama .llms import OllamaLLM
233-
234- print ("Debug: Using CodeLlama" )
235- return OllamaLLM (model = "codellama:latest" , temperature = temperature )
236-
237- elif llm_ver == LLM_OLLAMA_FALCON2 :
187+ elif llm_ver in [
188+ LLM_OLLAMA_LLAMA32_1B ,
189+ LLM_OLLAMA_MISTRAL ,
190+ LLM_OLLAMA_TINYLLAMA ,
191+ LLM_OLLAMA_PHI3 ,
192+ LLM_OLLAMA_PHI4 ,
193+ LLM_OLLAMA_GEMMA ,
194+ LLM_OLLAMA_GEMMA2 ,
195+ LLM_OLLAMA_GEMMA3 ,
196+ LLM_OLLAMA_DEEPSEEK ,
197+ LLM_OLLAMA_QWEN ,
198+ LLM_OLLAMA_CODELLAMA ,
199+ LLM_OLLAMA_FALCON2 ,
200+ ]:
238201 from langchain_ollama .llms import OllamaLLM
239202
240- print ("Debug: Using Falcon2" )
241- return OllamaLLM (model = "falcon2:latest" , temperature = temperature )
242-
203+ llm = OllamaLLM (model = llm_ver .split (":" , 1 )[1 ])
243204 return llm
244205
245206
@@ -340,66 +301,16 @@ def generate_panel_response(input_text, llm_panelists, llm_panel_chair, temperat
340301def get_llm_from_argv (argv ):
341302 llm_ver = LLM_GPT4o
342303
343- if "-g" in argv :
344- llm_ver = LLM_GEMINI
345-
346- if "-ge" in argv :
347- llm_ver = LLM_OLLAMA_GEMMA
348-
349- if "-ge2" in argv :
350- llm_ver = LLM_OLLAMA_GEMMA2
351-
352- if "-ge3" in argv :
353- llm_ver = LLM_OLLAMA_GEMMA3
354-
355- if "-qw" in argv :
356- llm_ver = LLM_OLLAMA_QWEN
357-
358- if "-l" in argv :
359- llm_ver = LLM_LLAMA2
360-
361- if "-a" in argv :
362- llm_ver = LLM_AI21
363-
364- if "-cl" in argv :
365- llm_ver = LLM_CLAUDE37
366-
367- if "-co" in argv :
368- llm_ver = LLM_COHERE
369-
370- if "-o-l32" in argv :
371- llm_ver = LLM_OLLAMA_LLAMA32
372-
373- if "-o-l321b" in argv :
374- llm_ver = LLM_OLLAMA_LLAMA32_1B
375- print (f"DEBUG: Selected LLM Version = { llm_ver } " )
376- return llm_ver
377-
378- if "-o-m" in argv :
379- llm_ver = LLM_OLLAMA_MISTRAL
380-
381- if "-o-t" in argv :
382- llm_ver = LLM_OLLAMA_TINYLLAMA
383-
384- if "-o-phi3" in argv :
385- llm_ver = LLM_OLLAMA_PHI3
386- print (f"DEBUG: Selected LLM Version = { llm_ver } " )
387-
388- if "-o-phi4" in argv :
389- llm_ver = LLM_OLLAMA_PHI4
390-
391- if "-o-dsr1" in argv :
392- llm_ver = LLM_OLLAMA_DEEPSEEK
393-
394- print (f"Debug: get_llm_from_argv selected { llm_ver } " )
304+ for arg in LLM_CMD_LINE_ARGS :
305+ if arg in argv :
306+ llm_ver = LLM_CMD_LINE_ARGS [arg ]
395307
396308 return llm_ver
397309
398310
399311def ask_question_get_response (
400312 question , llm_ver , temperature = 0 , only_celegans = False , print_question = True
401313):
402- print (f"Debug: ask_question_get_response received llm_ver={ llm_ver } " )
403314 print ("--------------------------------------------------------" )
404315 if print_question :
405316 print ("Asking question:\n %s" % question )
0 commit comments