-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfaq.py
More file actions
146 lines (114 loc) · 5.11 KB
/
faq.py
File metadata and controls
146 lines (114 loc) · 5.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
"""
FAQ Agent -- answers user questions using vector search via tool calling.
Uses the LangGraph-idiomatic pattern: ChatOpenAI.bind_tools() lets the LLM
decide *when* to search the FAQ knowledge base, and ToolNode executes the
call automatically. The agent loops (LLM -> tools -> LLM) until the model
produces a final text answer.
"""
import logging
import os
from pathlib import Path
import yaml
from langchain_core.messages import AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode
from .base import AgentNode
from ..db.config.database import get_async_session
from ..graph.state import ConversationState
from ..services import conversation_service
from ..tools import search_faqs
logger = logging.getLogger(__name__)
_AGENTS_DIR = Path(__file__).parent
_config = yaml.safe_load((_AGENTS_DIR / "config" / "faq.yaml").read_text())
_TOOLS = [search_faqs]
class FAQAgent(AgentNode):
"""Answers frequently-asked questions using a tool-calling loop."""
name: str = _config["name"]
description: str = _config["description"]
def __init__(self):
model_name = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
model_cfg = _config["model"]
self.llm = ChatOpenAI(
model=model_name,
temperature=model_cfg["temperature_contextual"],
max_tokens=model_cfg["max_tokens_contextual"],
).bind_tools(_TOOLS)
self.tool_node = ToolNode(_TOOLS)
self.system_message = SystemMessage(
content=_config["system_prompts"]["contextual"]
)
async def __call__(self, state: ConversationState) -> ConversationState:
"""Run the tool-calling loop and return the updated conversation state."""
logger.debug("=" * 60)
logger.debug("[FAQ] __call__ invoked (tool-calling pattern)")
try:
messages = list(state.get("messages", []))
user_message = [m.content for m in messages if m.type == "human"][-1]
if not any(isinstance(m, SystemMessage) for m in messages):
messages = [self.system_message] + messages
response = await self._tool_calling_loop(messages)
conv_id = state.get("conversation_id")
async for session in get_async_session():
try:
conv_id = await conversation_service.get_or_create_conversation(
session, conv_id
)
await conversation_service.save_message(session, conv_id, "user", user_message)
await conversation_service.save_message(session, conv_id, "assistant", response)
await session.commit()
except Exception as db_err:
await session.rollback()
logger.error(f"[FAQ] Error persisting messages to DB: {db_err}", exc_info=True)
conv_id = state.get("conversation_id")
return {
"agent_context": {
"response": response,
"query_processed": True,
},
"messages": [AIMessage(content=response)],
"conversation_id": conv_id,
}
except Exception as e:
logger.error(f"Error in FAQ agent: {e}", exc_info=True)
fallback = (
"Lo siento, tuve un problema técnico procesando tu consulta.\n\n"
"Mientras tanto, puedes:\n"
"• Contactarnos directamente por nuestras redes sociales\n"
"• Visitar nuestro sitio web para más información\n"
"• Reformular tu pregunta de manera más específica\n\n"
"¿Hay algo más en lo que pueda ayudarte?"
)
return {
"agent_context": {
"response": fallback,
"error": True,
"query_processed": False,
},
"messages": [AIMessage(content=fallback)],
}
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------
async def _tool_calling_loop(
self,
messages: list,
max_iterations: int = 5,
) -> str:
"""Call the LLM, execute any tool calls, feed results back, repeat."""
for _ in range(max_iterations):
ai_msg = await self.llm.ainvoke(messages)
messages.append(ai_msg)
if not ai_msg.tool_calls:
return ai_msg.content or ""
logger.debug(
"[FAQ] LLM requested %d tool call(s)", len(ai_msg.tool_calls)
)
tool_result = await self.tool_node.ainvoke({"messages": messages})
tool_messages = tool_result["messages"]
messages.extend(tool_messages)
last = messages[-1]
return last.content if hasattr(last, "content") and last.content else ""
faq_agent = FAQAgent()
async def handle_faq_query(state: ConversationState) -> ConversationState:
"""Wrapper function for LangGraph."""
return await faq_agent(state)