Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 18 additions & 2 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -731,8 +731,24 @@ def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pyda
if self.use_system_prompt:
system_prompt = f"""{self.backstory}\n
Your Role: {self.role}\n
Your Goal: {self.goal}
"""
Your Goal: {self.goal}"""

# Add tool usage instructions if tools are available
if self.tools:
tool_names = []
for tool in self.tools:
try:
if callable(tool) and hasattr(tool, '__name__'):
tool_names.append(tool.__name__)
elif isinstance(tool, dict) and 'function' in tool and 'name' in tool['function']:
tool_names.append(tool['function']['name'])
elif isinstance(tool, str):
tool_names.append(tool)
except Exception as e:
logging.debug(f"Could not extract tool name from {tool}: {e}")

if tool_names:
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."

# Use openai_client's build_messages method if available
if self._openai_client is not None:
Expand Down
10 changes: 9 additions & 1 deletion src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ def _supports_streaming_tools(self) -> bool:
# missing tool calls or making duplicate calls
return False

def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None, tools=None):
"""Build messages list for LLM completion. Works for both sync and async.

Args:
Expand All @@ -415,6 +415,7 @@ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_
chat_history: Optional list of previous messages
output_json: Optional Pydantic model for JSON output
output_pydantic: Optional Pydantic model for JSON output (alias)
tools: Optional list of tools available

Returns:
tuple: (messages list, original prompt)
Expand Down Expand Up @@ -1858,6 +1859,13 @@ def _build_completion_params(self, **override_params) -> Dict[str, Any]:
# Override with any provided parameters
params.update(override_params)

# Add tool_choice="auto" when tools are provided (unless already specified)
if 'tools' in params and params['tools'] and 'tool_choice' not in params:
# For Gemini models, use tool_choice to encourage tool usage
if self.model.startswith(('gemini-', 'gemini/')):
params['tool_choice'] = 'auto'
logging.debug(f"Setting tool_choice='auto' for Gemini model '{self.model}' with {len(params['tools'])} tools")

return params

def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
Expand Down
64 changes: 64 additions & 0 deletions test_gemini_tool_choice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""
Simple test to verify that tool_choice='auto' is set for Gemini models
"""
import logging
from praisonaiagents.llm.llm import LLM

# Enable debug logging to see our log message
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')

# Test different Gemini model formats
test_models = [
"gemini/gemini-1.5-flash-8b",
"gemini-1.5-flash-8b",
"gemini/gemini-pro",
"gpt-4", # Non-Gemini model for comparison
]

# Mock tools
mock_tools = [
{
"type": "function",
"function": {
"name": "search",
"description": "Search for information",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string"}
}
}
}
}
]

print("Testing tool_choice setting for different models:\n")

for model in test_models:
print(f"\nTesting model: {model}")
try:
llm = LLM(model=model)
params = llm._build_completion_params(
messages=[{"role": "user", "content": "test"}],
tools=mock_tools
)

tool_choice = params.get('tool_choice', 'NOT SET')
print(f" tool_choice: {tool_choice}")

# Verify behavior
if model.startswith(('gemini-', 'gemini/')):
if tool_choice == 'auto':
print(f" ✅ CORRECT: Gemini model has tool_choice='auto'")
else:
print(f" ❌ ERROR: Gemini model should have tool_choice='auto'")
else:
if tool_choice == 'NOT SET':
print(f" ✅ CORRECT: Non-Gemini model doesn't have tool_choice set")
else:
print(f" ⚠️ WARNING: Non-Gemini model has tool_choice set to '{tool_choice}'")

except Exception as e:
print(f" ❌ ERROR: {e}")

print("\nTest complete!")
115 changes: 115 additions & 0 deletions test_tool_fix_documentation.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# Tool Call Fix Documentation

## Issue
Agents using Gemini models (`gemini/gemini-1.5-flash-8b`) were not calling provided tools, instead responding with "I do not have access to the internet" when tasked with searching.

## Root Cause
The Gemini model through LiteLLM was not being properly instructed to use the available tools. The system prompt didn't mention the tools, and the tool_choice parameter wasn't being set.

## Fix Applied

### 1. Enhanced System Prompt (agent.py)
When tools are available, the agent's system prompt now explicitly mentions them:

```python
# In _build_messages method
if self.tools:
tool_names = []
for tool in self.tools:
Comment on lines +26 to +
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix inconsistent variable usage in tool iteration.

The code sets tools_to_use but then iterates over self.tools instead, which defeats the purpose of the parameter logic.

-    # Use provided tools or fall back to self.tools
-    tools_to_use = tools if tools is not None else self.tools
-    if tools_to_use:
-    tool_names = []
-    for tool in self.tools:
+    # Use provided tools or fall back to self.tools
+    tools_to_use = tools if tools is not None else self.tools
+    if tools_to_use:
+        tool_names = []
+        for tool in tools_to_use:
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
tools_to_use = tools if tools is not None else self.tools
if tools_to_use:
tool_names = []
for tool in self.tools:
# Use provided tools or fall back to self.tools
tools_to_use = tools if tools is not None else self.tools
if tools_to_use:
tool_names = []
for tool in tools_to_use:
🤖 Prompt for AI Agents
In test_tool_fix_documentation.md around lines 26 to 29, the code assigns
tools_to_use based on the tools parameter but then incorrectly iterates over
self.tools instead of tools_to_use. To fix this, change the iteration to loop
over tools_to_use so that the correct set of tools is used according to the
parameter logic.

if callable(tool) and hasattr(tool, '__name__'):
tool_names.append(tool.__name__)
elif isinstance(tool, dict) and 'function' in tool and 'name' in tool['function']:
tool_names.append(tool['function']['name'])
elif isinstance(tool, str):
tool_names.append(tool)

if tool_names:
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."
```

### 2. Tool Choice Parameter (llm.py)
For Gemini models, we now set `tool_choice='auto'` to encourage tool usage:

```python
# In _build_completion_params method
if 'tools' in params and params['tools'] and 'tool_choice' not in params:
# For Gemini models, use tool_choice to encourage tool usage
if self.model.startswith(('gemini', 'gemini/')):
params['tool_choice'] = 'auto'
```

## Testing the Fix

To test the fix, use the following code:

```python
import asyncio
from praisonaiagents import Agent, Task, PraisonAIAgents

# Define a simple tool
async def search_tool(query: str) -> str:
"""Search for information on the internet"""
return f"Search results for: {query}"

# Create agent with Gemini model
agent = Agent(
name="SearchAgent",
role="Information Researcher",
goal="Find accurate information using search tools",
backstory="Expert at finding and analyzing information",
tools=[search_tool],
llm={"model": "gemini/gemini-1.5-flash-8b"}
)

# Create task
task = Task(
description="Search for information about AI breakthroughs in 2024",
expected_output="Summary of AI breakthroughs",
agent=agent
)

# Run
async def test():
agents = PraisonAIAgents(agents=[agent], tasks=[task])
result = await agents.astart()
print(result)

asyncio.run(test())
```

## Backward Compatibility
- The fix only adds to existing functionality without modifying core behavior
- Tools continue to work exactly as before for all other models
- The system prompt enhancement only occurs when tools are present
- The tool_choice parameter is only added for Gemini models

## Additional Recommendations

If issues persist with specific models:

1. **Explicit Tool Instructions in Task Description**:
```python
task = Task(
description="Use the tavily_search tool to find information about AI breakthroughs",
# ... rest of task config
)
```

2. **Use OpenAI Models for Tool-Heavy Tasks**:
OpenAI models (gpt-4, gpt-4o) have better native tool calling support.

3. **Debug Tool Registration**:
Enable debug logging to see tool registration:
```python
import logging
logging.basicConfig(level=logging.DEBUG)
```

## Verification Steps

1. Check that tools are properly formatted by the agent
2. Verify the system prompt includes tool instructions
3. Confirm tool_choice is set for Gemini models
4. Monitor LLM responses for tool_calls in the response

The fix ensures that Gemini models are properly instructed to use available tools, resolving the issue where agents would claim they don't have internet access despite having search tools available.
89 changes: 89 additions & 0 deletions test_tool_fix_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
"""
Test example demonstrating the tool call fix for Gemini models.

This example shows how agents now properly use tools instead of saying
"I do not have access to the internet".
"""
import logging
from praisonaiagents import Agent, Task, PraisonAIAgents

# Enable debug logging to see tool processing
logging.basicConfig(level=logging.DEBUG)

# Define a simple search tool (synchronous version)
def mock_search(query: str) -> str:
"""Search for information on the internet.

Args:
query: The search query string

Returns:
Mock search results for the query
"""
return f"Mock search results for '{query}': Found 10 relevant articles about {query}. Top result: Latest developments and breakthroughs in this field..."

# Create agent with Gemini model and the search tool
search_agent = Agent(
name="SearchAgent",
role="Information Researcher",
goal="Find accurate information using the mock_search tool",
backstory="Expert researcher skilled at finding and analyzing information from various sources",
tools=[mock_search],
llm={"model": "gemini/gemini-1.5-flash-8b"},
verbose=True
)

# Create a task that should trigger tool usage
search_task = Task(
name="search_ai_breakthroughs",
description="Search for information about latest AI breakthroughs in 2024",
expected_output="A comprehensive summary of AI breakthroughs found through search",
agent=search_agent
)

def test_tool_usage():
"""Test that the agent uses tools instead of saying it has no internet access."""
print("=" * 60)
print("Testing Tool Usage with Gemini Model")
print("=" * 60)

# Create workflow
workflow = PraisonAIAgents(
agents=[search_agent],
tasks=[search_task],
verbose=True
)

# Execute the workflow
print("\nExecuting task...")
result = workflow.start()

# Check the result
print("\n" + "=" * 60)
print("RESULT:")
print("=" * 60)

if isinstance(result, dict) and 'task_results' in result:
task_result = result['task_results'][0]
print(f"Task Output: {task_result}")

# Check if the agent used the tool or claimed no access
if "do not have access" in str(task_result).lower():
print("\n❌ FAILED: Agent still claims no internet access")
elif "mock search results" in str(task_result).lower():
print("\n✅ SUCCESS: Agent used the search tool!")
else:
print("\n⚠️ UNCLEAR: Check if agent used the tool properly")
else:
print(f"Result: {result}")

print("\n" + "=" * 60)
print("Test Complete")
print("=" * 60)

if __name__ == "__main__":
# Run the test
test_tool_usage()

print("\n\nNOTE: With the fix applied, the agent should use the mock_search tool")
print("instead of saying 'I do not have access to the internet'.")
Loading