Skip to content

Commit 7dfaace

Browse files
committed
Add MCP Integration and Update Documentation
- Added `mcp` dependency to `pyproject.toml` for enhanced functionality. - Introduced new example scripts for various MCP tools, including `fetch-mcp.py`, `git-mcp.py`, `ollama.py`, `sentry-mcp.py`, and `time-mcp.py`, to demonstrate their usage. - Updated `airbnb.mdx` to reflect the addition of the Airbnb tool to the AI agent. - Enhanced the `Agent` class to support MCP tools and ensure proper integration with custom LLMs. - Improved logging for better debugging and tool management. - Incremented version number to 0.0.68 in `pyproject.toml` for release clarity.
1 parent fdf44b2 commit 7dfaace

13 files changed

Lines changed: 284 additions & 27 deletions

File tree

docs/mcp/airbnb.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ description: "Guide for integrating Airbnb booking capabilities with PraisonAI a
55
icon: "airbnb"
66
---
77

8-
# Airbnb MCP Integration
8+
## Add Airbnb Tool to AI Agent
99

1010
```mermaid
1111
flowchart LR

examples/mcp/fetch-mcp.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from praisonaiagents import Agent, MCP
2+
import os
3+
4+
# pip install mcp-server-fetch
5+
# Use a single string command with Fetch configuration
6+
fetch_agent = Agent(
7+
instructions="""You are a helpful assistant that can fetch and process web content.
8+
Use the available tools when relevant to retrieve and convert web pages to markdown.""",
9+
llm="gpt-4o-mini",
10+
tools=MCP("python -m mcp_server_fetch")
11+
)
12+
13+
fetch_agent.start("Fetch and convert the content from https://example.com to markdown")

examples/mcp/git-mcp.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from praisonaiagents import Agent, MCP
2+
import os
3+
4+
# pip install mcp-server-git
5+
# Get Git credentials from environment
6+
git_username = os.getenv("GIT_USERNAME")
7+
git_email = os.getenv("GIT_EMAIL")
8+
git_token = os.getenv("GIT_TOKEN") # For private repos
9+
10+
# Use a single string command with Git configuration
11+
git_agent = Agent(
12+
instructions="""You are a helpful assistant that can perform Git operations.
13+
Use the available tools when relevant to manage repositories, commits, and branches.""",
14+
llm="gpt-4o-mini",
15+
tools=MCP("python -m mcp_server_git",
16+
env={
17+
"GIT_USERNAME": git_username,
18+
"GIT_EMAIL": git_email,
19+
"GIT_TOKEN": git_token
20+
})
21+
)
22+
23+
git_agent.start("Clone and analyze the repository at https://github.com/modelcontextprotocol/servers")

examples/mcp/ollama.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from praisonaiagents import Agent, MCP
2+
3+
search_agent = Agent(
4+
instructions="""You help book apartments on Airbnb.""",
5+
llm="ollama/llama3.2",
6+
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
7+
)
8+
9+
search_agent.start("MUST USE airbnb_search Tool to Search. Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference")

examples/mcp/sentry-mcp.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
from praisonaiagents import Agent, MCP
2+
import os
3+
4+
# pip install mcp-server-sentry
5+
# Get Sentry auth token from environment
6+
sentry_token = os.getenv("SENTRY_AUTH_TOKEN")
7+
8+
# Use a single string command with Sentry configuration
9+
sentry_agent = Agent(
10+
instructions="""You are a helpful assistant that can analyze Sentry error reports.
11+
Use the available tools when relevant to inspect and debug application issues.""",
12+
llm="gpt-4o-mini",
13+
tools=MCP("python -m mcp_server_sentry --auth-token", args=[sentry_token])
14+
)
15+
16+
sentry_agent.start("Analyze the most recent critical error in Sentry")

examples/mcp/time-mcp.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from praisonaiagents import Agent, MCP
2+
import os
3+
4+
# pip install mcp-server-time
5+
# Use a single string command with Time Server configuration
6+
time_agent = Agent(
7+
instructions="""You are a helpful assistant that can handle time-related operations.
8+
Use the available tools when relevant to manage timezone conversions and time information.""",
9+
llm="gpt-4o-mini",
10+
tools=MCP("python -m mcp_server_time --local-timezone=America/New_York")
11+
)
12+
13+
time_agent.start("Get the current time in New York and convert it to UTC")

pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ dependencies = [
1616
"python-dotenv>=0.19.0",
1717
"instructor>=1.3.3",
1818
"PyYAML>=6.0",
19+
"mcp==1.6.0",
1920
]
2021

2122
[project.optional-dependencies]
@@ -106,6 +107,7 @@ praisonaiagents = ">=0.0.67"
106107
python-dotenv = ">=0.19.0"
107108
instructor = ">=1.3.3"
108109
PyYAML = ">=6.0"
110+
mcp = "==1.6.0"
109111
pyautogen = {version = ">=0.2.19", optional = true}
110112
crewai = {version = ">=0.32.0", optional = true}
111113
praisonai-tools = {version = ">=0.0.7", optional = true}

src/praisonai-agents/mcp-ollama.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from praisonaiagents import Agent, MCP
2+
3+
search_agent = Agent(
4+
instructions="""You help book apartments on Airbnb.""",
5+
llm="ollama/llama3.2",
6+
tools=MCP("npx -y @openbnb/mcp-server-airbnb --ignore-robots-txt")
7+
)
8+
9+
search_agent.start("Search for Apartments in Paris for 2 nights. 04/28 - 04/30 for 2 adults. All Your Preference. After searching Give me summary")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 89 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -421,6 +421,12 @@ def __init__(
421421
# Pass the entire string so LiteLLM can parse provider/model
422422
self.llm_instance = LLM(model=llm)
423423
self._using_custom_llm = True
424+
425+
# Ensure tools are properly accessible when using custom LLM
426+
if tools:
427+
logging.debug(f"Tools passed to Agent with custom LLM: {tools}")
428+
# Store the tools for later use
429+
self.tools = tools
424430
except ImportError as e:
425431
raise ImportError(
426432
"LLM features requested but dependencies not installed. "
@@ -519,9 +525,20 @@ def execute_tool(self, function_name, arguments):
519525
"""
520526
logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
521527

528+
# Special handling for MCP tools
529+
# Check if tools is an MCP instance with the requested function name
530+
from ..mcp.mcp import MCP
531+
if isinstance(self.tools, MCP):
532+
logging.debug(f"Looking for MCP tool {function_name}")
533+
# Check if any of the MCP tools match the function name
534+
for mcp_tool in self.tools.runner.tools:
535+
if hasattr(mcp_tool, 'name') and mcp_tool.name == function_name:
536+
logging.debug(f"Found matching MCP tool: {function_name}")
537+
return self.tools.runner.call_tool(function_name, arguments)
538+
522539
# Try to find the function in the agent's tools list first
523540
func = None
524-
for tool in self.tools:
541+
for tool in self.tools if isinstance(self.tools, (list, tuple)) else []:
525542
if (callable(tool) and getattr(tool, '__name__', '') == function_name) or \
526543
(inspect.isclass(tool) and tool.__name__ == function_name):
527544
func = tool
@@ -643,24 +660,64 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
643660
logging.warning(f"Tool {tool} not recognized")
644661

645662
try:
646-
if stream:
647-
# Process as streaming response with formatted tools
648-
final_response = self._process_stream_response(
649-
messages,
650-
temperature,
651-
start_time,
652-
formatted_tools=formatted_tools if formatted_tools else None,
653-
reasoning_steps=reasoning_steps
654-
)
663+
# Use the custom LLM instance if available
664+
if self._using_custom_llm and hasattr(self, 'llm_instance'):
665+
if stream:
666+
# Debug logs for tool info
667+
if formatted_tools:
668+
logging.debug(f"Passing {len(formatted_tools)} formatted tools to LLM instance: {formatted_tools}")
669+
670+
# Use the LLM instance for streaming responses
671+
final_response = self.llm_instance.get_response(
672+
prompt=messages[1:], # Skip system message as LLM handles it separately
673+
system_prompt=messages[0]['content'] if messages and messages[0]['role'] == 'system' else None,
674+
temperature=temperature,
675+
tools=formatted_tools if formatted_tools else None,
676+
verbose=self.verbose,
677+
markdown=self.markdown,
678+
stream=True,
679+
console=self.console,
680+
execute_tool_fn=self.execute_tool,
681+
agent_name=self.name,
682+
agent_role=self.role,
683+
reasoning_steps=reasoning_steps
684+
)
685+
else:
686+
# Non-streaming with custom LLM
687+
final_response = self.llm_instance.get_response(
688+
prompt=messages[1:],
689+
system_prompt=messages[0]['content'] if messages and messages[0]['role'] == 'system' else None,
690+
temperature=temperature,
691+
tools=formatted_tools if formatted_tools else None,
692+
verbose=self.verbose,
693+
markdown=self.markdown,
694+
stream=False,
695+
console=self.console,
696+
execute_tool_fn=self.execute_tool,
697+
agent_name=self.name,
698+
agent_role=self.role,
699+
reasoning_steps=reasoning_steps
700+
)
655701
else:
656-
# Process as regular non-streaming response
657-
final_response = client.chat.completions.create(
658-
model=self.llm,
659-
messages=messages,
660-
temperature=temperature,
661-
tools=formatted_tools if formatted_tools else None,
662-
stream=False
663-
)
702+
# Use the standard OpenAI client approach
703+
if stream:
704+
# Process as streaming response with formatted tools
705+
final_response = self._process_stream_response(
706+
messages,
707+
temperature,
708+
start_time,
709+
formatted_tools=formatted_tools if formatted_tools else None,
710+
reasoning_steps=reasoning_steps
711+
)
712+
else:
713+
# Process as regular non-streaming response
714+
final_response = client.chat.completions.create(
715+
model=self.llm,
716+
messages=messages,
717+
temperature=temperature,
718+
tools=formatted_tools if formatted_tools else None,
719+
stream=False
720+
)
664721

665722
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
666723

@@ -748,13 +805,26 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
748805

749806
if self._using_custom_llm:
750807
try:
808+
# Special handling for MCP tools when using provider/model format
809+
tool_param = self.tools if tools is None else tools
810+
811+
# Convert MCP tool objects to OpenAI format if needed
812+
if tool_param is not None:
813+
from ..mcp.mcp import MCP
814+
if isinstance(tool_param, MCP) and hasattr(tool_param, 'to_openai_tool'):
815+
logging.debug("Converting MCP tool to OpenAI format")
816+
openai_tool = tool_param.to_openai_tool()
817+
if openai_tool:
818+
tool_param = [openai_tool]
819+
logging.debug(f"Converted MCP tool: {tool_param}")
820+
751821
# Pass everything to LLM class
752822
response_text = self.llm_instance.get_response(
753823
prompt=prompt,
754824
system_prompt=f"{self.backstory}\n\nYour Role: {self.role}\n\nYour Goal: {self.goal}" if self.use_system_prompt else None,
755825
chat_history=self.chat_history,
756826
temperature=temperature,
757-
tools=self.tools if tools is None else tools,
827+
tools=tool_param,
758828
output_json=output_json,
759829
output_pydantic=output_pydantic,
760830
verbose=self.verbose,

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -289,15 +289,21 @@ def get_response(
289289
if tools:
290290
formatted_tools = []
291291
for tool in tools:
292-
if callable(tool):
292+
# Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
293+
if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
294+
logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
295+
formatted_tools.append(tool)
296+
elif callable(tool):
293297
tool_def = self._generate_tool_definition(tool.__name__)
298+
if tool_def:
299+
formatted_tools.append(tool_def)
294300
elif isinstance(tool, str):
295301
tool_def = self._generate_tool_definition(tool)
302+
if tool_def:
303+
formatted_tools.append(tool_def)
296304
else:
297-
continue
305+
logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
298306

299-
if tool_def:
300-
formatted_tools.append(tool_def)
301307
if not formatted_tools:
302308
formatted_tools = None
303309

0 commit comments

Comments
 (0)