Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 51 additions & 25 deletions src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,32 +864,44 @@ def get_response(
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)

if ollama_params:
# Get response with streaming
if verbose:
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
# Get response based on streaming mode
if stream:
# Streaming approach
if verbose:
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=ollama_params["follow_up_messages"],
temperature=temperature,
stream=True
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
live.update(display_generating(response_text, start_time))
else:
response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=ollama_params["follow_up_messages"],
temperature=temperature,
stream=stream
stream=True
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response_text += content
live.update(display_generating(response_text, start_time))
response_text += chunk.choices[0].delta.content
else:
response_text = ""
for chunk in litellm.completion(
# Non-streaming approach
resp = litellm.completion(
**self._build_completion_params(
messages=ollama_params["follow_up_messages"],
temperature=temperature,
stream=stream
stream=False
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
response_text += chunk.choices[0].delta.content
)
response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""

# Set flag to indicate Ollama was handled
ollama_handled = True
Expand Down Expand Up @@ -945,9 +957,26 @@ def get_response(

# Otherwise do the existing streaming approach if not already handled
elif not ollama_handled:
# Get response after tool calls with streaming
if verbose:
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
# Get response after tool calls
if stream:
# Streaming approach
if verbose:
with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
final_response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=True,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
final_response_text += content
live.update(display_generating(final_response_text, current_time))
else:
final_response_text = ""
for chunk in litellm.completion(
**self._build_completion_params(
Expand All @@ -959,22 +988,19 @@ def get_response(
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
final_response_text += content
live.update(display_generating(final_response_text, current_time))
final_response_text += chunk.choices[0].delta.content
else:
final_response_text = ""
for chunk in litellm.completion(
# Non-streaming approach
resp = litellm.completion(
**self._build_completion_params(
messages=messages,
tools=formatted_tools,
temperature=temperature,
stream=stream,
stream=False,
**kwargs
)
):
if chunk and chunk.choices and chunk.choices[0].delta.content:
final_response_text += chunk.choices[0].delta.content
)
final_response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""

final_response_text = final_response_text.strip()

Expand Down
49 changes: 49 additions & 0 deletions src/praisonai-agents/test_sequential_tool_calling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
"""Test sequential tool calling fix"""
from praisonaiagents import Agent

def get_stock_price(company_name: str) -> str:
"""
Get the stock price of a company

Args:
company_name (str): The name of the company

Returns:
str: The stock price of the company
"""
print(f"Tool called: get_stock_price({company_name})")
return f"The stock price of {company_name} is 100"

def multiply(a: int, b: int) -> int:
"""
Multiply two numbers
"""
print(f"Tool called: multiply({a}, {b})")
return a * b

# Test with streaming disabled to verify the fix
print("Testing sequential tool calling with stream=False...")
agent = Agent(
instructions="You are a helpful assistant. You can use the tools provided to you to help the user.",
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
self_reflect=False,
verbose=True,
tools=[get_stock_price, multiply],
stream=False # Force non-streaming mode - use stream parameter directly
)

result = agent.chat("Get the stock price of Google and multiply it by 2")
print(f"\nFinal result: {result}")
Comment on lines +24 to +36
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add result validation to verify the fix works correctly.

The non-streaming test configuration looks good, but the test lacks validation to ensure the sequential tool calling produces the expected result. Based on the PR comments mentioning that the final response was None, this validation is crucial.

Add validation after the result to verify the fix:

 result = agent.chat("Get the stock price of Google and multiply it by 2")
 print(f"\nFinal result: {result}")
+
+# Validate the result
+if result is None:
+    print("❌ FAILED: Result is None - sequential tool calling not working in non-streaming mode")
+elif "200" in str(result):
+    print("✅ PASSED: Sequential tool calling working correctly in non-streaming mode")
+else:
+    print(f"⚠️  UNEXPECTED: Got result '{result}' - please verify manually")
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Test with streaming disabled to verify the fix
print("Testing sequential tool calling with stream=False...")
agent = Agent(
instructions="You are a helpful assistant. You can use the tools provided to you to help the user.",
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
self_reflect=False,
verbose=True,
tools=[get_stock_price, multiply],
stream=False # Force non-streaming mode - use stream parameter directly
)
result = agent.chat("Get the stock price of Google and multiply it by 2")
print(f"\nFinal result: {result}")
# Test with streaming disabled to verify the fix
print("Testing sequential tool calling with stream=False...")
agent = Agent(
instructions="You are a helpful assistant. You can use the tools provided to you to help the user.",
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
self_reflect=False,
verbose=True,
tools=[get_stock_price, multiply],
stream=False # Force non-streaming mode - use stream parameter directly
)
result = agent.chat("Get the stock price of Google and multiply it by 2")
print(f"\nFinal result: {result}")
# Validate the result
if result is None:
print("❌ FAILED: Result is None - sequential tool calling not working in non-streaming mode")
elif "200" in str(result):
print("✅ PASSED: Sequential tool calling working correctly in non-streaming mode")
else:
print(f"⚠️ UNEXPECTED: Got result '{result}' - please verify manually")
🤖 Prompt for AI Agents
In src/praisonai-agents/test_sequential_tool_calling.py around lines 24 to 36,
the test for sequential tool calling with stream=False lacks validation of the
result. Add an assertion or conditional check after obtaining the result to
verify it matches the expected output, ensuring the fix works correctly and the
final response is not None.


# Test with default streaming mode
print("\n\nTesting sequential tool calling with default streaming...")
agent2 = Agent(
instructions="You are a helpful assistant. You can use the tools provided to you to help the user.",
llm="gemini/gemini-2.5-flash-lite-preview-06-17",
self_reflect=False,
verbose=True,
tools=[get_stock_price, multiply]
)

result2 = agent2.chat("Get the stock price of Google and multiply it by 2")
print(f"\nFinal result: {result2}")
Comment on lines +38 to +49
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add result validation and consider error handling.

Similar to the non-streaming test, this streaming test needs validation to confirm the expected behavior. Additionally, consider adding basic error handling.

Add validation and basic error handling:

 result2 = agent2.chat("Get the stock price of Google and multiply it by 2")
 print(f"\nFinal result: {result2}")
+
+# Validate the result
+if result2 is None:
+    print("❌ FAILED: Result is None - sequential tool calling not working in streaming mode")
+elif "200" in str(result2):
+    print("✅ PASSED: Sequential tool calling working correctly in streaming mode")
+else:
+    print(f"⚠️  UNEXPECTED: Got result '{result2}' - please verify manually")
+
+# Basic error handling for the entire test
+if __name__ == "__main__":
+    try:
+        # Move the existing test code here
+        pass
+    except Exception as e:
+        print(f"❌ TEST FAILED with exception: {e}")

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In src/praisonai-agents/test_sequential_tool_calling.py around lines 38 to 49,
the test for sequential tool calling with default streaming lacks validation of
the result and error handling. Add assertions to verify that the result matches
the expected output to confirm correct behavior. Wrap the chat call in a
try-except block to catch and log any exceptions, ensuring the test handles
errors gracefully without crashing.

Loading