Skip to content

Commit 1a8c7ef

Browse files
committed
2 parents cefad2a + 89c1415 commit 1a8c7ef

1 file changed

Lines changed: 9 additions & 5 deletions

File tree

  • src/praisonai-agents/praisonaiagents/llm

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -863,6 +863,8 @@ def get_response(
863863
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
864864

865865
if ollama_params:
866+
# The new messages should be added to the existing history
867+
follow_up_messages = messages + ollama_params["follow_up_messages"]
866868
# Get response based on streaming mode
867869
if stream:
868870
# Streaming approach
@@ -871,7 +873,7 @@ def get_response(
871873
response_text = ""
872874
for chunk in litellm.completion(
873875
**self._build_completion_params(
874-
messages=ollama_params["follow_up_messages"],
876+
messages=follow_up_messages,
875877
temperature=temperature,
876878
stream=True
877879
)
@@ -884,7 +886,7 @@ def get_response(
884886
response_text = ""
885887
for chunk in litellm.completion(
886888
**self._build_completion_params(
887-
messages=ollama_params["follow_up_messages"],
889+
messages=follow_up_messages,
888890
temperature=temperature,
889891
stream=True
890892
)
@@ -895,7 +897,7 @@ def get_response(
895897
# Non-streaming approach
896898
resp = litellm.completion(
897899
**self._build_completion_params(
898-
messages=ollama_params["follow_up_messages"],
900+
messages=follow_up_messages,
899901
temperature=temperature,
900902
stream=False
901903
)
@@ -1435,12 +1437,14 @@ async def get_response_async(
14351437
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
14361438

14371439
if ollama_params:
1440+
# The new messages should be added to the existing history
1441+
follow_up_messages = messages + ollama_params["follow_up_messages"]
14381442
# Get response with streaming
14391443
if verbose:
14401444
response_text = ""
14411445
async for chunk in await litellm.acompletion(
14421446
**self._build_completion_params(
1443-
messages=ollama_params["follow_up_messages"],
1447+
messages=follow_up_messages,
14441448
temperature=temperature,
14451449
stream=stream
14461450
)
@@ -1454,7 +1458,7 @@ async def get_response_async(
14541458
response_text = ""
14551459
async for chunk in await litellm.acompletion(
14561460
**self._build_completion_params(
1457-
messages=ollama_params["follow_up_messages"],
1461+
messages=follow_up_messages,
14581462
temperature=temperature,
14591463
stream=stream
14601464
)

0 commit comments

Comments
 (0)