-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Expand file tree
/
Copy pathllm.py
More file actions
2249 lines (2003 loc) · 106 KB
/
llm.py
File metadata and controls
2249 lines (2003 loc) · 106 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import logging
import os
import warnings
from typing import Any, Dict, List, Optional, Union, Literal, Callable
from pydantic import BaseModel
import time
import json
from ..main import (
display_error,
display_tool_call,
display_instruction,
display_interaction,
display_generating,
display_self_reflection,
ReflectionOutput,
)
from rich.console import Console
from rich.live import Live
# Disable litellm telemetry before any imports
os.environ["LITELLM_TELEMETRY"] = "False"
# TODO: Include in-build tool calling in LLM class
# TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
class LLMContextLengthExceededException(Exception):
"""Raised when LLM context length is exceeded"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
def _is_context_limit_error(self, error_message: str) -> bool:
"""Check if error is related to context length"""
context_limit_phrases = [
"maximum context length",
"context window is too long",
"context length exceeded",
"context_length_exceeded"
]
return any(phrase in error_message.lower() for phrase in context_limit_phrases)
class LLM:
"""
Easy to use wrapper for language models. Supports multiple providers like OpenAI,
Anthropic, and others through LiteLLM.
"""
# Default window sizes for different models (75% of actual to be safe)
MODEL_WINDOWS = {
# OpenAI
"gpt-4": 6144, # 8,192 actual
"gpt-4o": 96000, # 128,000 actual
"gpt-4o-mini": 96000, # 128,000 actual
"gpt-4-turbo": 96000, # 128,000 actual
"o1-preview": 96000, # 128,000 actual
"o1-mini": 96000, # 128,000 actual
# Anthropic
"claude-3-5-sonnet": 12288, # 16,384 actual
"claude-3-sonnet": 12288, # 16,384 actual
"claude-3-opus": 96000, # 128,000 actual
"claude-3-haiku": 96000, # 128,000 actual
# Gemini
"gemini-2.0-flash": 786432, # 1,048,576 actual
"gemini-1.5-pro": 1572864, # 2,097,152 actual
"gemini-1.5-flash": 786432, # 1,048,576 actual
"gemini-1.5-flash-8b": 786432, # 1,048,576 actual
# Deepseek
"deepseek-chat": 96000, # 128,000 actual
# Groq
"gemma2-9b-it": 6144, # 8,192 actual
"gemma-7b-it": 6144, # 8,192 actual
"llama3-70b-8192": 6144, # 8,192 actual
"llama3-8b-8192": 6144, # 8,192 actual
"mixtral-8x7b-32768": 24576, # 32,768 actual
"llama-3.3-70b-versatile": 96000, # 128,000 actual
"llama-3.3-70b-instruct": 96000, # 128,000 actual
# Other llama models
"llama-3.1-70b-versatile": 98304, # 131,072 actual
"llama-3.1-8b-instant": 98304, # 131,072 actual
"llama-3.2-1b-preview": 6144, # 8,192 actual
"llama-3.2-3b-preview": 6144, # 8,192 actual
"llama-3.2-11b-text-preview": 6144, # 8,192 actual
"llama-3.2-90b-text-preview": 6144 # 8,192 actual
}
def _log_llm_config(self, method_name: str, **config):
"""Centralized debug logging for LLM configuration and parameters.
Args:
method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
**config: Configuration parameters to log
"""
# Check for debug logging - either global debug level OR explicit verbose mode
verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
if should_log:
# Mask sensitive information
safe_config = config.copy()
if 'api_key' in safe_config:
safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
# Handle special formatting for certain fields
if 'prompt' in safe_config:
prompt = safe_config['prompt']
# Convert to string first for consistent logging behavior
prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
if len(prompt_str) > 100:
safe_config['prompt'] = prompt_str[:100] + "..."
else:
safe_config['prompt'] = prompt_str
if 'system_prompt' in safe_config:
sp = safe_config['system_prompt']
if sp and isinstance(sp, str) and len(sp) > 100:
safe_config['system_prompt'] = sp[:100] + "..."
if 'chat_history' in safe_config:
ch = safe_config['chat_history']
safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
if 'tools' in safe_config:
tools = safe_config['tools']
# Check if tools is iterable before processing
if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
else:
safe_config['tools'] = None
if 'output_json' in safe_config:
oj = safe_config['output_json']
safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
if 'output_pydantic' in safe_config:
op = safe_config['output_pydantic']
safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
# Log based on method name - check more specific conditions first
if method_name == '__init__':
logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
elif "parameters" in method_name:
logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
elif "_async" in method_name:
logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
else:
logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
def __init__(
self,
model: str,
timeout: Optional[int] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
n: Optional[int] = None,
max_tokens: Optional[int] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[Dict[int, float]] = None,
response_format: Optional[Dict[str, Any]] = None,
seed: Optional[int] = None,
logprobs: Optional[bool] = None,
top_logprobs: Optional[int] = None,
api_version: Optional[str] = None,
stop_phrases: Optional[Union[str, List[str]]] = None,
api_key: Optional[str] = None,
base_url: Optional[str] = None,
events: List[Any] = [],
**extra_settings
):
try:
import litellm
# Disable telemetry
litellm.telemetry = False
# Set litellm options globally
litellm.set_verbose = False
litellm.success_callback = []
litellm._async_success_callback = []
litellm.callbacks = []
verbose = extra_settings.get('verbose', True)
# Only suppress logs if not in debug mode
if not isinstance(verbose, bool) and verbose >= 10:
# Enable detailed debug logging
logging.getLogger("asyncio").setLevel(logging.DEBUG)
logging.getLogger("selector_events").setLevel(logging.DEBUG)
logging.getLogger("litellm.utils").setLevel(logging.DEBUG)
logging.getLogger("litellm.main").setLevel(logging.DEBUG)
litellm.suppress_debug_messages = False
litellm.set_verbose = True
else:
# Suppress debug logging for normal operation
logging.getLogger("asyncio").setLevel(logging.WARNING)
logging.getLogger("selector_events").setLevel(logging.WARNING)
logging.getLogger("litellm.utils").setLevel(logging.WARNING)
logging.getLogger("litellm.main").setLevel(logging.WARNING)
litellm.suppress_debug_messages = True
litellm._logging._disable_debugging()
warnings.filterwarnings("ignore", category=RuntimeWarning)
except ImportError:
raise ImportError(
"LiteLLM is required but not installed. "
"Please install with: pip install 'praisonaiagents[llm]'"
)
self.model = model
self.timeout = timeout
self.temperature = temperature
self.top_p = top_p
self.n = n
self.max_tokens = max_tokens
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.response_format = response_format
self.seed = seed
self.logprobs = logprobs
self.top_logprobs = top_logprobs
self.api_version = api_version
self.stop_phrases = stop_phrases
self.api_key = api_key
self.base_url = base_url
self.events = events
self.extra_settings = extra_settings
self.console = Console()
self.chat_history = []
self.verbose = verbose
self.markdown = extra_settings.get('markdown', True)
self.self_reflect = extra_settings.get('self_reflect', False)
self.max_reflect = extra_settings.get('max_reflect', 3)
self.min_reflect = extra_settings.get('min_reflect', 1)
self.reasoning_steps = extra_settings.get('reasoning_steps', False)
# Enable error dropping for cleaner output
litellm.drop_params = True
# Enable parameter modification for providers like Anthropic
litellm.modify_params = True
self._setup_event_tracking(events)
# Log all initialization parameters when in debug mode or verbose >= 10
self._log_llm_config(
'__init__',
model=self.model,
timeout=self.timeout,
temperature=self.temperature,
top_p=self.top_p,
n=self.n,
max_tokens=self.max_tokens,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
logit_bias=self.logit_bias,
response_format=self.response_format,
seed=self.seed,
logprobs=self.logprobs,
top_logprobs=self.top_logprobs,
api_version=self.api_version,
stop_phrases=self.stop_phrases,
api_key=self.api_key,
base_url=self.base_url,
verbose=self.verbose,
markdown=self.markdown,
self_reflect=self.self_reflect,
max_reflect=self.max_reflect,
min_reflect=self.min_reflect,
reasoning_steps=self.reasoning_steps,
extra_settings=self.extra_settings
)
def _is_ollama_provider(self) -> bool:
"""Detect if this is an Ollama provider regardless of naming convention"""
if not self.model:
return False
# Direct ollama/ prefix
if self.model.startswith("ollama/"):
return True
# Check environment variables for Ollama base URL
base_url = os.getenv("OPENAI_BASE_URL", "")
api_base = os.getenv("OPENAI_API_BASE", "")
# Common Ollama endpoints
ollama_endpoints = ["localhost:11434", "127.0.0.1:11434", ":11434"]
return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
def _process_stream_delta(self, delta, response_text: str, tool_calls: List[Dict], formatted_tools: Optional[List] = None) -> tuple:
"""
Process a streaming delta chunk to extract content and tool calls.
Args:
delta: The delta object from a streaming chunk
response_text: The accumulated response text so far
tool_calls: The accumulated tool calls list so far
formatted_tools: Optional list of formatted tools for tool call support check
Returns:
tuple: (updated_response_text, updated_tool_calls)
"""
# Process content
if delta.content:
response_text += delta.content
# Capture tool calls from streaming chunks if provider supports it
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
for tc in delta.tool_calls:
if tc.index >= len(tool_calls):
tool_calls.append({
"id": tc.id,
"type": "function",
"function": {"name": "", "arguments": ""}
})
if tc.function.name:
tool_calls[tc.index]["function"]["name"] = tc.function.name
if tc.function.arguments:
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
return response_text, tool_calls
def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
"""
Safely parse tool call arguments with proper error handling
Returns:
tuple: (function_name, arguments, tool_call_id)
"""
try:
if is_ollama:
# Special handling for Ollama provider which may have different structure
if "function" in tool_call and isinstance(tool_call["function"], dict):
function_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
else:
# Try alternative format that Ollama might return
function_name = tool_call.get("name", "unknown_function")
arguments_str = tool_call.get("arguments", "{}")
arguments = json.loads(arguments_str) if arguments_str else {}
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
else:
# Standard format for other providers with error handling
function_name = tool_call["function"]["name"]
arguments_str = tool_call["function"]["arguments"]
arguments = json.loads(arguments_str) if arguments_str else {}
tool_call_id = tool_call["id"]
except (KeyError, json.JSONDecodeError, TypeError) as e:
logging.error(f"Error parsing tool call arguments: {e}")
function_name = tool_call.get("name", "unknown_function")
arguments = {}
tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
return function_name, arguments, tool_call_id
def _needs_system_message_skip(self) -> bool:
"""Check if this model requires skipping system messages"""
if not self.model:
return False
# Only skip for specific legacy o1 models that don't support system messages
legacy_o1_models = [
"o1-preview", # 2024-09-12 version
"o1-mini", # 2024-09-12 version
"o1-mini-2024-09-12" # Explicit dated version
]
return self.model in legacy_o1_models
def _supports_streaming_tools(self) -> bool:
"""
Check if the current provider supports streaming with tools.
Most providers that support tool calling also support streaming with tools,
but some providers (like Ollama and certain local models) require non-streaming
calls when tools are involved.
Returns:
bool: True if provider supports streaming with tools, False otherwise
"""
if not self.model:
return False
# Ollama doesn't reliably support streaming with tools
if self._is_ollama_provider():
return False
# Import the capability check function
from .model_capabilities import supports_streaming_with_tools
# Check if this model supports streaming with tools
if supports_streaming_with_tools(self.model):
return True
# Anthropic Claude models support streaming with tools
if self.model.startswith("claude-"):
return True
# Google Gemini models support streaming with tools
if any(self.model.startswith(prefix) for prefix in ["gemini-", "gemini/"]):
return True
# For other providers, default to False to be safe
# This ensures we make a single non-streaming call rather than risk
# missing tool calls or making duplicate calls
return False
def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None, tools=None):
"""Build messages list for LLM completion. Works for both sync and async.
Args:
prompt: The user prompt (str or list)
system_prompt: Optional system prompt
chat_history: Optional list of previous messages
output_json: Optional Pydantic model for JSON output
output_pydantic: Optional Pydantic model for JSON output (alias)
tools: Optional list of tools available
Returns:
tuple: (messages list, original prompt)
"""
messages = []
# Handle system prompt
if system_prompt:
# Append JSON schema if needed
if output_json:
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
elif output_pydantic:
system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
# Skip system messages for legacy o1 models as they don't support them
if not self._needs_system_message_skip():
messages.append({"role": "system", "content": system_prompt})
# Add chat history if provided
if chat_history:
messages.extend(chat_history)
# Handle prompt modifications for JSON output
original_prompt = prompt
if output_json or output_pydantic:
if isinstance(prompt, str):
prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
elif isinstance(prompt, list):
# Create a copy to avoid modifying the original
prompt = prompt.copy()
for item in prompt:
if item.get("type") == "text":
item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
break
# Add prompt to messages
if isinstance(prompt, list):
messages.append({"role": "user", "content": prompt})
else:
messages.append({"role": "user", "content": prompt})
return messages, original_prompt
def _fix_array_schemas(self, schema: Dict) -> Dict:
"""
Recursively fix array schemas by adding missing 'items' attribute.
This ensures compatibility with OpenAI's function calling format which
requires array types to specify the type of items they contain.
Args:
schema: The schema dictionary to fix
Returns:
dict: The fixed schema
"""
if not isinstance(schema, dict):
return schema
# Create a copy to avoid modifying the original
fixed_schema = schema.copy()
# Fix array types at the current level
if fixed_schema.get("type") == "array" and "items" not in fixed_schema:
# Add a default items schema for arrays without it
fixed_schema["items"] = {"type": "string"}
# Recursively fix nested schemas in properties
if "properties" in fixed_schema and isinstance(fixed_schema["properties"], dict):
fixed_properties = {}
for prop_name, prop_schema in fixed_schema["properties"].items():
if isinstance(prop_schema, dict):
fixed_properties[prop_name] = self._fix_array_schemas(prop_schema)
else:
fixed_properties[prop_name] = prop_schema
fixed_schema["properties"] = fixed_properties
# Fix items schema if it exists
if "items" in fixed_schema and isinstance(fixed_schema["items"], dict):
fixed_schema["items"] = self._fix_array_schemas(fixed_schema["items"])
return fixed_schema
def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
"""Format tools for LiteLLM - handles all tool formats.
Supports:
- Pre-formatted OpenAI tools (dicts with type='function')
- Lists of pre-formatted tools
- Callable functions
- String function names
Args:
tools: List of tools in various formats
Returns:
List of formatted tools or None
"""
if not tools:
return None
formatted_tools = []
for tool in tools:
# Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
# Validate nested dictionary structure before accessing
if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
# Fix array schemas in the tool parameters
fixed_tool = tool.copy()
if 'parameters' in fixed_tool['function']:
fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
formatted_tools.append(fixed_tool)
else:
logging.debug(f"Skipping malformed OpenAI tool: missing function or name")
# Handle lists of tools (e.g. from MCP.to_openai_tool())
elif isinstance(tool, list):
for subtool in tool:
if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
# Validate nested dictionary structure before accessing
if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
# Fix array schemas in the tool parameters
fixed_tool = subtool.copy()
if 'parameters' in fixed_tool['function']:
fixed_tool['function']['parameters'] = self._fix_array_schemas(fixed_tool['function']['parameters'])
formatted_tools.append(fixed_tool)
else:
logging.debug(f"Skipping malformed OpenAI tool in list: missing function or name")
elif callable(tool):
tool_def = self._generate_tool_definition(tool)
if tool_def:
formatted_tools.append(tool_def)
elif isinstance(tool, str):
tool_def = self._generate_tool_definition(tool)
if tool_def:
formatted_tools.append(tool_def)
else:
logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
# Validate JSON serialization before returning
if formatted_tools:
try:
import json
json.dumps(formatted_tools) # Validate serialization
except (TypeError, ValueError) as e:
logging.error(f"Tools are not JSON serializable: {e}")
return None
return formatted_tools if formatted_tools else None
def get_response(
self,
prompt: Union[str, List[Dict]],
system_prompt: Optional[str] = None,
chat_history: Optional[List[Dict]] = None,
temperature: float = 0.2,
tools: Optional[List[Any]] = None,
output_json: Optional[BaseModel] = None,
output_pydantic: Optional[BaseModel] = None,
verbose: bool = True,
markdown: bool = True,
self_reflect: bool = False,
max_reflect: int = 3,
min_reflect: int = 1,
console: Optional[Console] = None,
agent_name: Optional[str] = None,
agent_role: Optional[str] = None,
agent_tools: Optional[List[str]] = None,
execute_tool_fn: Optional[Callable] = None,
stream: bool = True,
**kwargs
) -> str:
"""Enhanced get_response with all OpenAI-like features"""
logging.info(f"Getting response from {self.model}")
# Log all self values when in debug mode
self._log_llm_config(
'LLM instance',
model=self.model,
timeout=self.timeout,
temperature=self.temperature,
top_p=self.top_p,
n=self.n,
max_tokens=self.max_tokens,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
logit_bias=self.logit_bias,
response_format=self.response_format,
seed=self.seed,
logprobs=self.logprobs,
top_logprobs=self.top_logprobs,
api_version=self.api_version,
stop_phrases=self.stop_phrases,
api_key=self.api_key,
base_url=self.base_url,
verbose=self.verbose,
markdown=self.markdown,
self_reflect=self.self_reflect,
max_reflect=self.max_reflect,
min_reflect=self.min_reflect,
reasoning_steps=self.reasoning_steps
)
# Log the parameter values passed to get_response
self._log_llm_config(
'get_response parameters',
prompt=prompt,
system_prompt=system_prompt,
chat_history=chat_history,
temperature=temperature,
tools=tools,
output_json=output_json,
output_pydantic=output_pydantic,
verbose=verbose,
markdown=markdown,
self_reflect=self_reflect,
max_reflect=max_reflect,
min_reflect=min_reflect,
agent_name=agent_name,
agent_role=agent_role,
agent_tools=agent_tools,
kwargs=str(kwargs)
)
try:
import litellm
# This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
# Disable litellm debug messages
litellm.set_verbose = False
# Format tools if provided
formatted_tools = self._format_tools_for_litellm(tools)
# Build messages list using shared helper
messages, original_prompt = self._build_messages(
prompt=prompt,
system_prompt=system_prompt,
chat_history=chat_history,
output_json=output_json,
output_pydantic=output_pydantic
)
start_time = time.time()
reflection_count = 0
# Display initial instruction once
if verbose:
display_text = prompt
if isinstance(prompt, list):
display_text = next((item["text"] for item in prompt if item["type"] == "text"), "")
if display_text and str(display_text).strip():
display_instruction(
f"Agent {agent_name} is processing prompt: {display_text}",
console=console,
agent_name=agent_name,
agent_role=agent_role,
agent_tools=agent_tools
)
# Sequential tool calling loop - similar to agent.py
max_iterations = 10 # Prevent infinite loops
iteration_count = 0
final_response_text = ""
while iteration_count < max_iterations:
try:
# Get response from LiteLLM
current_time = time.time()
# If reasoning_steps is True, do a single non-streaming call
if reasoning_steps:
resp = litellm.completion(
**self._build_completion_params(
messages=messages,
temperature=temperature,
stream=False, # force non-streaming
tools=formatted_tools,
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
)
)
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
response_text = resp["choices"][0]["message"]["content"]
final_response = resp
# Optionally display reasoning if present
if verbose and reasoning_content:
display_interaction(
original_prompt,
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
markdown=markdown,
generation_time=time.time() - current_time,
console=console
)
else:
display_interaction(
original_prompt,
response_text,
markdown=markdown,
generation_time=time.time() - current_time,
console=console
)
# Otherwise do the existing streaming approach
else:
# Determine if we should use streaming based on tool support
use_streaming = stream
if formatted_tools and not self._supports_streaming_tools():
# Provider doesn't support streaming with tools, use non-streaming
use_streaming = False
if use_streaming:
# Streaming approach (with or without tools)
tool_calls = []
response_text = ""
if verbose:
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
for chunk in litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=True,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta:
delta = chunk.choices[0].delta
response_text, tool_calls = self._process_stream_delta(
delta, response_text, tool_calls, formatted_tools
)
if delta.content:
live.update(display_generating(response_text, current_time))
else:
# Non-verbose streaming
for chunk in litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=True,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta:
delta = chunk.choices[0].delta
if delta.content:
response_text += delta.content
# Capture tool calls from streaming chunks if provider supports it
if formatted_tools and self._supports_streaming_tools():
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
response_text = response_text.strip()
# Create a mock final_response with the captured data
final_response = {
"choices": [{
"message": {
"content": response_text,
"tool_calls": tool_calls if tool_calls else None
}
}]
}
else:
# Non-streaming approach (when tools require it or streaming is disabled)
final_response = litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=False,
**kwargs
)
)
response_text = final_response["choices"][0]["message"]["content"]
if verbose:
# Display the complete response at once
display_interaction(
original_prompt,
response_text,
markdown=markdown,
generation_time=time.time() - current_time,
console=console
)
tool_calls = final_response["choices"][0]["message"].get("tool_calls")
# Handle tool calls - Sequential tool calling logic
if tool_calls and execute_tool_fn:
# Convert tool_calls to a serializable format for all providers
serializable_tool_calls = self._serialize_tool_calls(tool_calls)
messages.append({
"role": "assistant",
"content": response_text,
"tool_calls": serializable_tool_calls
})
should_continue = False
tool_results = [] # Store all tool results
for tool_call in tool_calls:
# Handle both object and dict access patterns
is_ollama = self._is_ollama_provider()
function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
tool_result = execute_tool_fn(function_name, arguments)
logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
tool_results.append(tool_result) # Store the result
if verbose:
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
if tool_result:
display_message += f"Function returned: {tool_result}"
logging.debug(f"[TOOL_EXEC_DEBUG] Display message with result: {display_message}")
else:
display_message += "Function returned no output"
logging.debug("[TOOL_EXEC_DEBUG] Tool returned no output")
logging.debug(f"[TOOL_EXEC_DEBUG] About to display tool call with message: {display_message}")
display_tool_call(display_message, console=console)
messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
})
# Check if we should continue (for tools like sequential thinking)
# This mimics the logic from agent.py lines 1004-1007
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
should_continue = True
# If we should continue, increment iteration and continue loop
if should_continue:
iteration_count += 1
continue
# If we reach here, no more tool calls needed - get final response
# Make one more call to get the final summary response
# Special handling for Ollama models that don't automatically process tool results
ollama_handled = False
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
if ollama_params:
# Get response with streaming
if verbose:
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=ollama_params["follow_up_messages"],
temperature=temperature,
stream=stream
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
live.update(display_generating(response_text, start_time))
else:
response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=ollama_params["follow_up_messages"],
temperature=temperature,
stream=stream
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
# Set flag to indicate Ollama was handled
ollama_handled = True
final_response_text = response_text.strip()
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
# Display the response if we got one
if final_response_text and verbose:
display_interaction(
ollama_params["original_prompt"],
final_response_text,
markdown=markdown,
generation_time=time.time() - start_time,
console=console
)
# Return the final response after processing Ollama's follow-up
if final_response_text:
return final_response_text
else:
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
# If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
if reasoning_steps and not ollama_handled:
resp = litellm.completion(
**self._build_completion_params(
messages=messages,
temperature=temperature,
stream=False, # force non-streaming
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
)
)
reasoning_content = resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
response_text = resp["choices"][0]["message"]["content"]
# Optionally display reasoning if present
if verbose and reasoning_content:
display_interaction(
original_prompt,
f"Reasoning:\n{reasoning_content}\n\nAnswer:\n{response_text}",
markdown=markdown,
generation_time=time.time() - start_time,
console=console
)
else:
display_interaction(
original_prompt,
response_text,
markdown=markdown,
generation_time=time.time() - start_time,
console=console
)
# Otherwise do the existing streaming approach if not already handled
elif not ollama_handled:
# Get response after tool calls with streaming
if verbose:
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
final_response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=True,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
final_response_text += content
live.update(display_generating(final_response_text, current_time))
else:
final_response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=stream,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
final_response_text += chunk.choices[0].delta.content
final_response_text = final_response_text.strip()
# Display final response
if verbose:
display_interaction(
original_prompt,
final_response_text,
markdown=markdown,
generation_time=time.time() - start_time,
console=console
)
return final_response_text
else:
# No tool calls, we're done with this iteration
break
except Exception as e:
logging.error(f"Error in LLM iteration {iteration_count}: {e}")
break
# End of while loop - return final response