Skip to content

Commit 37bc21d

Browse files
committed
Add sampling support to chatbot example
1 parent af81c53 commit 37bc21d

File tree

4 files changed

+20
-11
lines changed

4 files changed

+20
-11
lines changed

examples/openai_chat_agent/app.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,22 @@
1313
from dotenv import load_dotenv
1414
from langchain_ollama import ChatOllama
1515
from langchain_openai import ChatOpenAI
16-
from mcp_use import MCPAgent, MCPClient
16+
from mcp.types import CreateMessageRequestParams, CreateMessageResult, ErrorData, TextContent
17+
from mcp_use import MCPAgent, MCPClient, load_config_file
1718

1819
SYSTEM_MESSAGE = "You are a helpful assistant that talks to the user and uses tools via MCP."
1920

2021

22+
async def sampling_callback(
23+
context: ClientSession, params: CreateMessageRequestParams
24+
) -> CreateMessageResult | ErrorData:
25+
return CreateMessageResult(
26+
content=TextContent(text='["Cape Town"]', type="text"),
27+
model="gpt-4o-mini",
28+
role="assistant",
29+
)
30+
31+
2132
async def ensure_ollama_running(model: str) -> None:
2233
"""Check that an Ollama server is running."""
2334
try:
@@ -41,7 +52,7 @@ async def run_memory_chat() -> None:
4152
config_file = os.path.join(os.path.dirname(__file__), "config.json")
4253

4354
print("Initializing chat...")
44-
client = MCPClient.from_config_file(config_file)
55+
client = MCPClient(load_config_file(config_file), sampling_callback=sampling_callback)
4556

4657
openai_key = os.getenv("OPENAI_API_KEY")
4758
ollama_model = os.getenv("OLLAMA_MODEL", "llama3.2")
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
{
22
"mcpServers": {
3-
"shop_api": {
3+
"travel_agent": {
44
"command": "python",
5-
"args": ["../shop_api/app.py"]
5+
"args": ["../server_side_llm_travel_planner/app.py"]
66
}
77
}
88
}

examples/openai_chat_agent/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,4 @@ langchain_ollama
44
langchain_community
55
mcp_use
66
python-dotenv
7+
httpx

examples/server_side_llm_travel_planner/app.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from pydantic import Field
77

8-
from enrichmcp import EnrichContext, EnrichMCP, EnrichModel, prefer_fast_model
8+
from enrichmcp import EnrichMCP, EnrichModel, prefer_fast_model
99

1010
app = EnrichMCP(
1111
title="Travel Planner",
@@ -60,7 +60,6 @@ def list_destinations() -> list[Destination]:
6060
@app.retrieve
6161
async def plan_trip(
6262
preferences: Annotated[str, Field(description="Your travel preferences")],
63-
ctx: EnrichContext,
6463
) -> list[Destination]:
6564
"""Return three destinations that best match the given preferences."""
6665

@@ -70,15 +69,13 @@ async def plan_trip(
7069
"given preferences. Reply with a JSON list of names only.\nPreferences: "
7170
f"{preferences}\n\n{bullet_list}"
7271
)
73-
result = await ctx.sampling(
72+
app.get_context()
73+
result = await app.get_context().ask_llm(
7474
prompt,
7575
model_preferences=prefer_fast_model(),
7676
max_tokens=50,
7777
)
78-
try:
79-
names = json.loads(result.content.text)
80-
except Exception:
81-
return []
78+
names = json.loads(result.content.text)
8279
return [d for d in DESTINATIONS if d.name in names]
8380

8481

0 commit comments

Comments
 (0)