Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 54 additions & 11 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -713,26 +713,69 @@ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, to
)

return current_response

def _build_system_prompt(self, tools=None):
"""Build the system prompt with tool information.

Args:
tools: Optional list of tools to use (defaults to self.tools)

Returns:
str: The system prompt or None if use_system_prompt is False
"""
if not self.use_system_prompt:
return None

system_prompt = f"""{self.backstory}\n
Your Role: {self.role}\n
Your Goal: {self.goal}"""

# Add tool usage instructions if tools are available
# Use provided tools or fall back to self.tools
tools_to_use = tools if tools is not None else self.tools
if tools_to_use:
tool_names = []
for tool in tools_to_use:
try:
if callable(tool) and hasattr(tool, '__name__'):
tool_names.append(tool.__name__)
elif isinstance(tool, dict) and isinstance(tool.get('function'), dict) and 'name' in tool['function']:
tool_names.append(tool['function']['name'])
elif isinstance(tool, str):
tool_names.append(tool)
elif hasattr(tool, "to_openai_tool"):
# Handle MCP tools
openai_tools = tool.to_openai_tool()
if isinstance(openai_tools, list):
for t in openai_tools:
if isinstance(t, dict) and 'function' in t and 'name' in t['function']:
tool_names.append(t['function']['name'])
elif isinstance(openai_tools, dict) and 'function' in openai_tools:
tool_names.append(openai_tools['function']['name'])
except (AttributeError, KeyError, TypeError) as e:
logging.warning(f"Could not extract tool name from {tool}: {e}")
continue

if tool_names:
system_prompt += f"\n\nYou have access to the following tools: {', '.join(tool_names)}. Use these tools when appropriate to help complete your tasks. Always use tools when they can help provide accurate information or perform actions."

return system_prompt

def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None):
def _build_messages(self, prompt, temperature=0.2, output_json=None, output_pydantic=None, tools=None):
"""Build messages list for chat completion.

Args:
prompt: The user prompt (str or list)
temperature: Temperature for the chat
output_json: Optional Pydantic model for JSON output
output_pydantic: Optional Pydantic model for JSON output (alias)
tools: Optional list of tools to use (defaults to self.tools)

Returns:
tuple: (messages list, original prompt)
"""
# Build system prompt if enabled
system_prompt = None
if self.use_system_prompt:
system_prompt = f"""{self.backstory}\n
Your Role: {self.role}\n
Your Goal: {self.goal}
"""
# Build system prompt using the helper method
system_prompt = self._build_system_prompt(tools)

# Use openai_client's build_messages method if available
if self._openai_client is not None:
Expand Down Expand Up @@ -1176,7 +1219,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
# Pass everything to LLM class
response_text = self.llm_instance.get_response(
prompt=prompt,
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
system_prompt=self._build_system_prompt(tools),
chat_history=self.chat_history,
temperature=temperature,
tools=tool_param,
Expand Down Expand Up @@ -1492,7 +1535,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
try:
response_text = await self.llm_instance.get_response_async(
prompt=prompt,
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
system_prompt=self._build_system_prompt(tools),
chat_history=self.chat_history,
temperature=temperature,
tools=tools,
Expand All @@ -1506,7 +1549,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
console=self.console,
agent_name=self.name,
agent_role=self.role,
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in self.tools],
agent_tools=[t.__name__ if hasattr(t, '__name__') else str(t) for t in (tools if tools is not None else self.tools)],
execute_tool_fn=self.execute_tool_async,
reasoning_steps=reasoning_steps
)
Expand Down
18 changes: 17 additions & 1 deletion src/praisonai-agents/praisonaiagents/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ def _supports_streaming_tools(self) -> bool:
# missing tool calls or making duplicate calls
return False

def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None, tools=None):
"""Build messages list for LLM completion. Works for both sync and async.

Args:
Expand All @@ -415,6 +415,7 @@ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_
chat_history: Optional list of previous messages
output_json: Optional Pydantic model for JSON output
output_pydantic: Optional Pydantic model for JSON output (alias)
tools: Optional list of tools available

Returns:
tuple: (messages list, original prompt)
Expand Down Expand Up @@ -1858,6 +1859,21 @@ def _build_completion_params(self, **override_params) -> Dict[str, Any]:
# Override with any provided parameters
params.update(override_params)

# Add tool_choice="auto" when tools are provided (unless already specified)
if 'tools' in params and params['tools'] and 'tool_choice' not in params:
# For Gemini models, use tool_choice to encourage tool usage
# More comprehensive Gemini model detection
if any(prefix in self.model.lower() for prefix in ['gemini', 'gemini/', 'google/gemini']):
try:
import litellm
# Check if model supports function calling before setting tool_choice
if litellm.supports_function_calling(model=self.model):
params['tool_choice'] = 'auto'
except Exception as e:
# If check fails, still set tool_choice for known Gemini models
logging.debug(f"Could not verify function calling support: {e}. Setting tool_choice anyway.")
params['tool_choice'] = 'auto'

return params

def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
Expand Down
163 changes: 163 additions & 0 deletions src/praisonai/tests/unit/test_async_gemini_fix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
"""
Test to verify that async agents with Gemini models properly use tools
after the fix for issue #818
"""
import asyncio
import logging
from praisonaiagents import Agent, Task, PraisonAIAgents

# Enable logging to see tool calls
logging.basicConfig(level=logging.INFO)

# Mock search tool
async def mock_search(query: str) -> dict:
"""Mock search tool for testing"""
print(f"[TOOL CALLED] Searching for: {query}")
return {
"query": query,
"results": [
{
"title": f"Result 1 for {query}",
"snippet": f"This is a mock result about {query}",
"url": "https://example.com/1"
},
{
"title": f"Result 2 for {query}",
"snippet": f"Another mock result about {query}",
"url": "https://example.com/2"
}
],
"status": "success"
}

async def test_async_gemini_tools():
"""Test async agents with Gemini models use tools correctly"""

# Create search agent with Gemini model
search_agent = Agent(
name="AsyncSearcher",
role="Research Assistant",
goal="Find information using the search tool",
backstory="You are an expert at finding information online",
tools=[mock_search],
llm={"model": "gemini/gemini-1.5-flash-latest"},
verbose=True
)

# Create analysis agent without tools
analysis_agent = Agent(
name="Analyzer",
role="Data Analyst",
goal="Analyze search results",
backstory="You excel at analyzing and summarizing information",
llm={"model": "gemini/gemini-1.5-flash-latest"},
verbose=True
)

# Create tasks
search_task = Task(
name="search_task",
description="Search for information about 'quantum computing breakthroughs 2024'",
expected_output="Search results with at least 2 relevant findings",
agent=search_agent,
async_execution=True
)

analysis_task = Task(
name="analysis_task",
description="Analyze the search results and provide a summary",
expected_output="A concise summary of the findings",
agent=analysis_agent,
context=[search_task],
async_execution=False
)

# Create workflow
workflow = PraisonAIAgents(
agents=[search_agent, analysis_agent],
tasks=[search_task, analysis_task],
verbose=True
)

# Execute async
print("\n🚀 Starting async agent test with Gemini models...")
result = await workflow.astart()

# Check results
print("\n✅ Test Results:")
print("-" * 50)

# Verify search agent used the tool
search_result = str(result)
if "mock result" in search_result.lower() or "tool called" in search_result.lower():
print("✅ SUCCESS: Search agent properly used the mock_search tool!")
else:
print("❌ FAILURE: Search agent did NOT use the tool (claimed no internet access)")

# Show the actual output
print("\nFinal output:")
print(result)

return result

async def test_multiple_async_agents():
"""Test multiple async agents running in parallel"""

agents = []
tasks = []

# Create 3 search agents
for i in range(3):
agent = Agent(
name=f"AsyncAgent{i}",
role="Researcher",
goal="Search for information",
backstory="Expert researcher",
tools=[mock_search],
llm={"model": "gemini/gemini-1.5-flash-latest"}
)

task = Task(
name=f"task_{i}",
description=f"Search for 'AI advancement #{i+1}'",
expected_output="Search results",
agent=agent,
async_execution=True
)

agents.append(agent)
tasks.append(task)

# Execute all in parallel
workflow = PraisonAIAgents(agents=agents, tasks=tasks)

print("\n🚀 Testing multiple async agents in parallel...")
results = await workflow.astart()

# Verify all agents used tools
success_count = 0
for i, task in enumerate(tasks):
if "mock result" in str(results).lower():
success_count += 1

print(f"\n✅ {success_count}/{len(tasks)} agents successfully used tools")

return results

async def main():
"""Run all async tests"""
try:
# Test 1: Single async agent
await test_async_gemini_tools()

# Test 2: Multiple async agents in parallel
await test_multiple_async_agents()

print("\n🎉 All async tests completed!")

except Exception as e:
print(f"\n❌ Error during testing: {e}")
raise

if __name__ == "__main__":
asyncio.run(main())
64 changes: 64 additions & 0 deletions src/praisonai/tests/unit/test_gemini_tool_choice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""
Simple test to verify that tool_choice='auto' is set for Gemini models
"""
import logging
from praisonaiagents.llm.llm import LLM

# Enable debug logging to see our log message
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')

# Test different Gemini model formats
test_models = [
"gemini/gemini-1.5-flash-8b",
"gemini-1.5-flash-8b",
"gemini/gemini-pro",
"gpt-4", # Non-Gemini model for comparison
]

# Mock tools
mock_tools = [
{
"type": "function",
"function": {
"name": "search",
"description": "Search for information",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string"}
}
}
}
}
]

print("Testing tool_choice setting for different models:\n")

for model in test_models:
print(f"\nTesting model: {model}")
try:
llm = LLM(model=model)
params = llm._build_completion_params(
messages=[{"role": "user", "content": "test"}],
tools=mock_tools
)

tool_choice = params.get('tool_choice', 'NOT SET')
print(f" tool_choice: {tool_choice}")

# Verify behavior
if model.startswith(('gemini-', 'gemini/')):
if tool_choice == 'auto':
print(f" ✅ CORRECT: Gemini model has tool_choice='auto'")
else:
print(f" ❌ ERROR: Gemini model should have tool_choice='auto'")
else:
if tool_choice == 'NOT SET':
print(f" ✅ CORRECT: Non-Gemini model doesn't have tool_choice set")
else:
print(f" ⚠️ WARNING: Non-Gemini model has tool_choice set to '{tool_choice}'")

except Exception as e:
print(f" ❌ ERROR: {e}")

print("\nTest complete!")
Loading
Loading