-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
446 lines (362 loc) · 17.5 KB
/
agent.py
File metadata and controls
446 lines (362 loc) · 17.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
# =============================================================================
# agent.py — The AI Agent that connects Claude to the MCP Weather Server
# =============================================================================
#
# WHAT IS THIS FILE?
# This is the "brain" of the system. It:
# 1. Launches weather_server.py as a background process
# 2. Asks it "what tools do you have?"
# 3. Passes those tools to Claude
# 4. Runs a conversation loop in the terminal
# 5. When Claude wants to use a tool, this file runs it and feeds results back
#
# THE AGENTIC LOOP (most important concept in this file):
#
# You type a message
# ↓
# Agent sends it to Claude (with the list of available tools)
# ↓
# Claude responds with EITHER:
# (a) A plain text answer → print it, done
# (b) A tool_use request → run the tool, send result back to Claude
# ↓
# Claude reads the result and gives a final answer
# ↓
# Back to the top — waiting for your next message
#
# This loop is the foundation of every AI agent, no matter how complex.
#
# =============================================================================
# --- IMPORTS -----------------------------------------------------------------
import asyncio
import os
import sys
from dotenv import load_dotenv # Reads API keys from a .env file
import anthropic # The official Anthropic Python SDK
from anthropic import Anthropic # The main client class
# These MCP imports let us launch and talk to an MCP server
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
# --- CONFIGURATION -----------------------------------------------------------
# load_dotenv() reads a file called ".env" in the same folder and loads its
# contents as environment variables. Your .env file should look like this:
#
# ANTHROPIC_API_KEY=sk-ant-...
#
# This keeps your secret key out of the source code.
load_dotenv()
# os.environ.get() reads an environment variable.
# If ANTHROPIC_API_KEY isn't set, it returns None (not a crash).
ANTHROPIC_API_KEY = os.environ.get("ANTHROPIC_API_KEY")
# The Claude model we'll use. claude-haiku a basic model.
MODEL = "claude-haiku-4-5-20251001"
# The system prompt is a hidden message sent to Claude at the start of every
# conversation. It sets Claude's personality and tells it what it can do.
SYSTEM_PROMPT = """You are a helpful weather assistant with access to real-time
weather data. You can look up current conditions and multi-day forecasts for
any city in the world.
When a user asks about weather, always use your tools to fetch live data rather
than guessing. Be conversational and helpful — mention things like whether to
bring an umbrella or wear a coat when relevant.
Always use °F unless user asks for it in °C
Always use weather emojis liberally throughout your responses to make them
visually engaging. Use emojis like:
- ☀️ for clear/sunny conditions
- 🌤️ for mainly clear or partly cloudy
- ⛅ for partly cloudy
- 🌥️ for mostly cloudy
- ☁️ for overcast
- 🌫️ for fog
- 🌦️ for light rain or showers
- 🌧️ for moderate/heavy rain
- ⛈️ for thunderstorms
- 🌨️ for snow
- ❄️ for heavy snow or freezing conditions
- 💨 for windy conditions
- 🥶 for very cold temperatures (below 32°F)
- 🥵 for very hot temperatures (above 80°F)
- 🌡️ for temperature readings
- 💧 for humidity or precipitation
- 🌬️ for wind
Use them in headers, bullet points, and inline with text — not just once, but
wherever they naturally fit to bring the forecast to life.
Do NOT use Markdown formatting. No **bold**, no *italic*, no # headers, no
backticks. Plain text and emojis only.
Always format your response using this exact template:
╭─────────────────────────────────╮
│ {City Name} {temp emoji} │
│ {weather emoji} {Conditions} │
╰─────────────────────────────────╯
🌡️ Temp {temp}°F (feels {feels}°F)
💧 Humidity {humidity}%
🌬️ Wind {speed} mph {direction}
🌧️ Precip {precip}
📅 {N}-Day Forecast
{Day Date} {emoji} {min} → {max}°F 💧 {precip}mm
{Day Date} {emoji} {min} → {max}°F 💧 {precip}mm
...
Then add a one or two sentence plain-text tip about what to wear or expect.
Keep the box width consistent at 35 characters. Align the value columns."""
# --- THE WEATHER AGENT CLASS -------------------------------------------------
# A "class" in Python is a blueprint for an object. It bundles related data
# (attributes) and functions (methods) together. Our WeatherAgent class holds
# everything the agent needs: the Claude client, MCP session, conversation
# history, and the methods to run the loop.
class WeatherAgent:
"""
A conversational weather agent that uses Claude + MCP tools.
Each question is treated as stateless — history is reset on every message.
Weather questions are self-contained, so there's no need to accumulate
history across turns (and it keeps API costs near zero).
Attributes:
client: The Anthropic API client (used to call Claude)
mcp_session: The active connection to the MCP weather server
tools: The list of tools discovered from the MCP server
history: Reused within a single chat() call (tool loop), then discarded
"""
def __init__(self):
"""
__init__ is the constructor — it runs when you create a WeatherAgent().
Here we just set up empty/default values. The real setup happens in
connect() because it involves async operations.
"""
self.client = Anthropic(api_key=ANTHROPIC_API_KEY)
self.mcp_session = None # Will be set when we connect to the MCP server
self.tools = [] # Will be filled with tools from the MCP server
self.history = [] # Reset on every new question — see chat()
async def connect(self, server_script_path: str):
"""
Launches the MCP weather server and connects to it.
This uses StdioServerParameters to launch weather_server.py as a
subprocess and communicate with it via stdin/stdout — just like two
people passing notes through a slot in a wall.
Args:
server_script_path: The file path to weather_server.py
"""
print("🌤 Connecting to weather server...")
# StdioServerParameters tells MCP how to launch the server.
# "command: python" + "args: [weather_server.py]" means:
# run the command: python weather_server.py
server_params = StdioServerParameters(
command="python",
args=[server_script_path],
)
# stdio_client launches the server process and opens communication streams.
# We store the streams as instance variables (self.*) so they stay alive
# for the lifetime of the agent — if we used local variables they'd be
# garbage collected and the connection would close.
self._streams_context = stdio_client(server_params)
streams = await self._streams_context.__aenter__()
# ClientSession is the MCP "handshake" — it initialises the connection
# and confirms both sides are speaking the same protocol version.
self._session_context = ClientSession(*streams)
self.mcp_session = await self._session_context.__aenter__()
await self.mcp_session.initialize()
# Ask the MCP server: "what tools do you have?"
# response.tools is a list of Tool objects (name, description, inputSchema)
response = await self.mcp_session.list_tools()
raw_tools = response.tools
# Claude expects tools in a specific format. We convert from MCP's format
# to Anthropic's format here. They're similar but not identical.
self.tools = [
{
"name": tool.name,
"description": tool.description,
"input_schema": tool.inputSchema, # Note: Anthropic uses input_schema
}
for tool in raw_tools
# ↑ This is a "list comprehension" — a compact way to build a list
# by looping. Equivalent to:
# self.tools = []
# for tool in raw_tools:
# self.tools.append({...})
]
tool_names = [t["name"] for t in self.tools]
print(f"✅ Connected! Available tools: {', '.join(tool_names)}\n")
async def run_tool(self, tool_name: str, tool_input: dict) -> str:
"""
Executes a single tool call on the MCP server and returns the result.
When Claude says "I want to call get_current_weather with city=London",
this function actually does that and returns what the server sends back.
Args:
tool_name: e.g. "get_current_weather"
tool_input: e.g. {"city": "London"}
Returns:
The tool result as a plain string
"""
print(f" 🔧 Calling tool: {tool_name}({tool_input})")
result = await self.mcp_session.call_tool(tool_name, tool_input)
# result.content is a list of content blocks. We join all text blocks
# into one string. (In our server it's always one TextContent, but
# it's good practice to handle multiple.)
text_parts = [
block.text
for block in result.content
if hasattr(block, "text") # Make sure this block has a .text attribute
]
return "\n".join(text_parts)
async def chat(self, user_message: str) -> str:
"""
The core agentic loop. Sends a message to Claude, handles any tool calls
it requests, and returns the final text response.
This is the most important method in the file. Read it carefully.
Args:
user_message: What the user typed
Returns:
Claude's final response as a string
"""
# Step 1: Reset history and start fresh for this question.
# Weather questions are self-contained — there's no value in sending
# previous exchanges to Claude. The only history that matters is the
# within-turn tool loop (user → tool call → tool result → answer),
# which we build below and discard when this method returns.
self.history = []
self.history.append({
"role": "user",
"content": user_message
})
# Step 2: THE AGENTIC LOOP
# We loop because Claude might call multiple tools in sequence.
# e.g. "Compare weather in London and Tokyo" → tool call 1 → tool call 2 → answer
# We keep looping until Claude gives us a plain text response (stop_reason == "end_turn").
while True:
# Step 3: Call Claude with the full conversation history + tools
response = self.client.messages.create(
model=MODEL,
max_tokens=4096,
system=SYSTEM_PROMPT,
tools=self.tools, # The tools we got from the MCP server
messages=self.history
)
# Step 4: What did Claude decide to do?
#
# stop_reason tells us WHY Claude stopped generating:
# "end_turn" → Claude is done, response is a final text answer
# "tool_use" → Claude wants to call one or more tools
#
# response.content is a list of "blocks". Each block is either:
# TextBlock → plain text (Claude's words)
# ToolUseBlock → a tool call request (name + input arguments)
if response.stop_reason == "end_turn":
# Claude is done. Extract the text and return it.
final_text = ""
for block in response.content:
if hasattr(block, "text"):
final_text += block.text
# Add Claude's response to history so future turns have context
self.history.append({
"role": "assistant",
"content": response.content # Store the full content, not just text
})
return final_text
elif response.stop_reason == "tool_use":
# Claude wants to use tools. There might be more than one tool
# call in a single response (e.g. compare two cities at once).
# First, add Claude's response (which contains the tool_use blocks)
# to history. This is required by the Anthropic API.
self.history.append({
"role": "assistant",
"content": response.content
})
# Now process each tool call Claude requested
tool_results = []
for block in response.content:
# Check if this block is a tool call
if block.type == "tool_use":
tool_result = await self.run_tool(block.name, block.input)
# Format the result in the way Anthropic's API expects
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id, # Links this result to the request
"content": tool_result
})
# Add all tool results to history as a "user" message.
# This might seem odd — why is the tool result a "user" message?
# That's just how the Anthropic API works: tool results are fed
# back in as user-role messages so Claude can read them.
self.history.append({
"role": "user",
"content": tool_results
})
# Loop again — Claude will now read the tool results and
# either give a final answer or make more tool calls.
continue
else:
# Unexpected stop reason — shouldn't happen, but handle it safely
return f"Unexpected stop reason: {response.stop_reason}"
async def run(self):
"""
The terminal interaction loop. Prints a welcome message, then repeatedly
reads user input, sends it to chat(), and prints the response.
"""
print("=" * 55)
print(" 🌦 Weather Agent — powered by Claude + Open-Meteo")
print("=" * 55)
print("Ask me anything about the weather!")
print("Type 'quit' or 'exit' to stop.\n")
while True:
# Read input from the terminal. The "You: " is the prompt prefix.
try:
user_input = input("You: ").strip()
except (EOFError, KeyboardInterrupt):
# EOFError: input stream closed (e.g. piped input ended)
# KeyboardInterrupt: user pressed Ctrl+C
print("\n👋 Goodbye!")
break
# Skip empty input (user just pressed Enter)
if not user_input:
continue
# Allow the user to quit gracefully
if user_input.lower() in ("quit", "exit", "q"):
print("👋 Goodbye!")
break
# Send to Claude and print the response
print("\nAgent: ", end="", flush=True)
# flush=True forces the text to appear immediately before the await
try:
response = await self.chat(user_input)
print(response)
except Exception as e:
print(f"[Error: {e}]")
print() # Blank line for readability
async def disconnect(self):
"""
Cleanly shuts down the MCP server connection.
Always clean up connections when done — this is good practice.
"""
if self._session_context:
await self._session_context.__aexit__(None, None, None)
if self._streams_context:
await self._streams_context.__aexit__(None, None, None)
print("🔌 Disconnected from weather server.")
# --- ENTRY POINT -------------------------------------------------------------
async def main():
"""
The top-level async function. Sets up the agent, runs it, and cleans up.
"""
# Validate that the API key is set before doing anything else
if not ANTHROPIC_API_KEY:
print("❌ Error: ANTHROPIC_API_KEY not set.")
print(" Create a .env file with: ANTHROPIC_API_KEY=sk-ant-...")
sys.exit(1) # Exit with error code 1
# Figure out where weather_server.py lives.
# __file__ is the path of the current script (agent.py).
# os.path.dirname gets the folder it's in.
# os.path.join builds the path to weather_server.py in the same folder.
import os
server_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "weather_server.py")
if not os.path.exists(server_path):
print(f"❌ Error: weather_server.py not found at {server_path}")
sys.exit(1)
agent = WeatherAgent()
try:
# Connect to the MCP server
await agent.connect(server_path)
# Start the conversation loop
await agent.run()
finally:
# The 'finally' block always runs — even if an exception occurred.
# This guarantees we always clean up the MCP connection.
await agent.disconnect()
if __name__ == "__main__":
asyncio.run(main())