forked from microsoft/agent-governance-toolkit
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathchat.py
More file actions
141 lines (108 loc) · 3.72 KB
/
chat.py
File metadata and controls
141 lines (108 loc) · 3.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Chat Agent - Interactive Conversational Agent with Memory
Run with: python chat.py
"""
import asyncio
import os
from typing import List, Dict, Optional
from agent_os import KernelSpace, AgentSignal
from memory import EpisodicMemory
# Initialize kernel
kernel = KernelSpace(
policy_file="policies.yaml" if os.path.exists("policies.yaml") else "strict"
)
# Initialize memory
memory = EpisodicMemory(
max_turns=50,
summarize_after=20
)
@kernel.register
async def chat_agent(user_message: str, conversation_id: str = "default") -> str:
"""
Process a chat message and generate a response.
Args:
user_message: The user's input message
conversation_id: Unique ID for this conversation
Returns:
Agent's response
"""
# Get conversation history
history = memory.get_history(conversation_id)
# Build messages for LLM
messages = [
{
"role": "system",
"content": """You are a helpful, friendly assistant.
Be concise but thorough. If you don't know something, say so.
Never provide harmful, illegal, or unethical information."""
}
]
# Add history
for turn in history:
messages.append({"role": "user", "content": turn["user"]})
messages.append({"role": "assistant", "content": turn["assistant"]})
# Add current message
messages.append({"role": "user", "content": user_message})
# Call LLM
try:
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model=os.environ.get("OPENAI_MODEL", "gpt-4"),
messages=messages,
max_tokens=500,
temperature=0.7
)
assistant_message = response.choices[0].message.content
except ImportError:
# Fallback for demo without OpenAI
assistant_message = f"Echo: {user_message} (Install openai package for real responses)"
# Store in memory
memory.add_turn(conversation_id, user_message, assistant_message)
return assistant_message
async def interactive_chat():
"""Run interactive chat loop."""
print("🤖 Chat Agent")
print("=" * 40)
print("Type 'quit' to exit, 'clear' to reset memory")
print("=" * 40)
print()
conversation_id = "interactive"
while True:
try:
# Get user input
user_input = input("You: ").strip()
if not user_input:
continue
if user_input.lower() == "quit":
print("\nGoodbye! 👋")
break
if user_input.lower() == "clear":
memory.clear(conversation_id)
print("Memory cleared.\n")
continue
# Process through kernel
try:
response = await kernel.execute(
chat_agent,
user_input,
conversation_id
)
print(f"\nAgent: {response}\n")
except Exception as e:
if "SIGSTOP" in str(e):
print("\n⚠️ Response flagged for review. Skipping.\n")
elif "SIGKILL" in str(e):
print("\n🛑 Response blocked by policy.\n")
else:
print(f"\n❌ Error: {e}\n")
except KeyboardInterrupt:
print("\n\nGoodbye! 👋")
break
async def main():
"""Main entry point."""
await interactive_chat()
if __name__ == "__main__":
asyncio.run(main())