Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ POSTGRES_DB=ithaka
# Opcion con roles (formato token:rol separados por coma):
# AUTH_TOKENS=token_admin_1:admin,token_lector_1:reader

# External Ithaka API (for wizard postulation submission)
ITHAKA_API_BASE_URL=http://localhost:8000

# Other environment variables (add as needed)
# TWILIO_ACCOUNT_SID=your_twilio_sid
# TWILIO_AUTH_TOKEN=your_twilio_token
290 changes: 79 additions & 211 deletions app/agents/faq.py
Original file line number Diff line number Diff line change
@@ -1,114 +1,73 @@
"""
Agente FAQ - Responde preguntas frecuentes usando búsqueda vectorial
FAQ Agent -- answers user questions using vector search via tool calling.

Uses the LangGraph-idiomatic pattern: ChatOpenAI.bind_tools() lets the LLM
decide *when* to search the FAQ knowledge base, and ToolNode executes the
call automatically. The agent loops (LLM -> tools -> LLM) until the model
produces a final text answer.
"""

import logging
import os
from pathlib import Path
from typing import Any

import numpy as np
import yaml
from jinja2 import Environment, FileSystemLoader
from langchain_core.messages import AIMessage
from openai import AsyncOpenAI
from langchain_core.messages import AIMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import ToolNode

from .base import AgentNode
from ..db.config.database import get_async_session
from ..graph.state import ConversationState
from ..services.embedding_service import embedding_service
from ..services import conversation_service
from ..tools import search_faqs

logger = logging.getLogger(__name__)

_AGENTS_DIR = Path(__file__).parent
_config = yaml.safe_load((_AGENTS_DIR / "config" / "faq.yaml").read_text())
_prompts = Environment(loader=FileSystemLoader(str(_AGENTS_DIR / "prompts")), keep_trailing_newline=True)


def to_serializable(obj):
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, dict):
return {k: to_serializable(v) for k, v in obj.items()}
if isinstance(obj, list):
return [to_serializable(v) for v in obj]
return obj
_TOOLS = [search_faqs]


class FAQAgent(AgentNode):
"""Agente para responder preguntas frecuentes usando base vectorial"""
"""Answers frequently-asked questions using a tool-calling loop."""

name: str = _config["name"]
description: str = _config["description"]

def __init__(self):
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable is required")
model_name = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
model_cfg = _config["model"]

self.client = AsyncOpenAI(api_key=api_key)
self.model = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
self.max_results = int(os.getenv("MAX_FAQ_RESULTS", "5"))
self.similarity_threshold = float(
os.getenv("SIMILARITY_THRESHOLD", "0.4"))
self.llm = ChatOpenAI(
model=model_name,
temperature=model_cfg["temperature_contextual"],
max_tokens=model_cfg["max_tokens_contextual"],
).bind_tools(_TOOLS)

async def __call__(self, state: ConversationState) -> ConversationState:
"""Procesa una consulta FAQ del usuario"""
self.tool_node = ToolNode(_TOOLS)
self.system_message = SystemMessage(
content=_config["system_prompts"]["contextual"]
)

messages = state.get("messages", [])
user_message = [m.content for m in messages if m.type == "human"][-1]
# Build conversation history excluding the current user message (last one)
history = messages[:-1] if messages else []
async def __call__(self, state: ConversationState) -> ConversationState:
"""Run the tool-calling loop and return the updated conversation state."""

logger.debug("=" * 60)
logger.debug("[FAQ] __call__ invoked")
logger.debug(f"[FAQ] User message: {user_message!r}")
logger.debug(f"[FAQ] similarity_threshold={self.similarity_threshold}, max_results={self.max_results}")
logger.debug("[FAQ] __call__ invoked (tool-calling pattern)")

try:
# Obtener sesión de base de datos
async for session in get_async_session():
# Buscar FAQs similares
similar_faqs = await embedding_service.search_similar_faqs(
query=user_message,
session=session,
limit=self.max_results,
similarity_threshold=self.similarity_threshold
)

logger.debug(f"[FAQ] Found {len(similar_faqs)} similar FAQs")
for i, faq in enumerate(similar_faqs):
logger.debug(f"[FAQ] faq[{i}] similarity={faq.get('similarity', '?'):.3f} "
f"q={faq.get('question', '')[:80]!r}")

if similar_faqs:
# Generar respuesta contextualizada con las FAQs encontradas
response = await self._generate_contextual_response(
user_message, similar_faqs, history
)

state["faq_results"] = to_serializable(similar_faqs)
state["next_action"] = "send_response"
state["should_continue"] = False
messages = list(state.get("messages", []))
user_message = [m.content for m in messages if m.type == "human"][-1]

else:
# No se encontraron FAQs relevantes
response = await self._generate_no_results_response(user_message, history)
if not any(isinstance(m, SystemMessage) for m in messages):
messages = [self.system_message] + messages

state["faq_results"] = []
state["next_action"] = "send_response"
state["should_continue"] = False

# Actualizar estado con la respuesta
state["agent_context"] = {
"response": response,
"found_faqs": len(similar_faqs),
"query_processed": True
}
response = await self._tool_calling_loop(messages)

# --- Persistencia en DB ---
conv_id = state.get("conversation_id")
conv_id = state.get("conversation_id")
async for session in get_async_session():
try:
conv_id = await conversation_service.get_or_create_conversation(
session, conv_id
Expand All @@ -118,161 +77,70 @@ async def __call__(self, state: ConversationState) -> ConversationState:
await session.commit()
except Exception as db_err:
await session.rollback()
logger.error(f"[FAQ] Error al persistir mensajes en DB: {db_err}", exc_info=True)
logger.error(f"[FAQ] Error persisting messages to DB: {db_err}", exc_info=True)
conv_id = state.get("conversation_id")

# Devolver delta de messages para que add_messages lo agregue
return {
"agent_context": state["agent_context"],
"faq_results": state.get("faq_results", []),
"next_action": state["next_action"],
"should_continue": state["should_continue"],
"messages": [AIMessage(content=response)],
"conversation_id": conv_id,
}

except Exception as e:
logger.error(f"Error in FAQ query processing: {e}")

# Respuesta de fallback en caso de error
fallback_response = """
Lo siento, tuve un problema técnico procesando tu consulta.

Mientras tanto, puedes:
• Contactarnos directamente por nuestras redes sociales
• Visitar nuestro sitio web para más información
• Reformular tu pregunta de manera más específica

¿Hay algo más en lo que pueda ayudarte?
"""

state["agent_context"] = {
"response": fallback_response,
"error": True,
"query_processed": False
}
state["next_action"] = "send_response"
state["should_continue"] = False
return {
"agent_context": state["agent_context"],
"next_action": state["next_action"],
"should_continue": state["should_continue"],
"messages": [AIMessage(content=fallback_response)]
"agent_context": {
"response": response,
"query_processed": True,
},
"messages": [AIMessage(content=response)],
"conversation_id": conv_id,
}

async def _generate_contextual_response(
self,
user_query: str,
similar_faqs: list[dict[str, Any]],
history: list = None,
) -> str:
"""Genera una respuesta contextualizada basada en FAQs similares"""

try:
faq_context = ""
for i, faq in enumerate(similar_faqs, 1):
faq_context += (
f"\nFAQ {i} (similitud: {faq['similarity']:.2f}):\n"
f"Pregunta: {faq['question']}\n"
f"Respuesta: {faq['answer']}\n"
)

prompt = _prompts.get_template("faq_contextual.j2").render(
user_query=user_query,
faq_context=faq_context,
)

system_content = _config["system_prompts"]["contextual"]
logger.debug("-" * 60)
logger.debug("[FAQ] Contextual LLM call")
logger.debug(f"[FAQ] FAQ context passed to prompt:\n{faq_context}")
logger.debug(f"[FAQ] System prompt:\n{system_content}")
logger.debug(f"[FAQ] User prompt:\n{prompt}")

chat_messages = [{"role": "system", "content": system_content}]
for msg in (history or []):
if msg.type == "human":
chat_messages.append({"role": "user", "content": msg.content})
elif msg.type == "ai" and msg.content:
chat_messages.append({"role": "assistant", "content": msg.content})
chat_messages.append({"role": "user", "content": prompt})

model_cfg = _config["model"]
response = await self.client.chat.completions.create(
model=self.model,
messages=chat_messages,
temperature=model_cfg["temperature_contextual"],
max_tokens=model_cfg["max_tokens_contextual"],
)

answer = response.choices[0].message.content
logger.debug(f"[FAQ] LLM contextual response:\n{answer}")
return answer

except Exception as e:
logger.error(f"Error generating contextual response: {e}")
logger.error(f"Error in FAQ agent: {e}", exc_info=True)
fallback = (
"Lo siento, tuve un problema técnico procesando tu consulta.\n\n"
"Mientras tanto, puedes:\n"
"• Contactarnos directamente por nuestras redes sociales\n"
"• Visitar nuestro sitio web para más información\n"
"• Reformular tu pregunta de manera más específica\n\n"
"¿Hay algo más en lo que pueda ayudarte?"
)
return {
"agent_context": {
"response": fallback,
"error": True,
"query_processed": False,
},
"messages": [AIMessage(content=fallback)],
}

best_faq = similar_faqs[0] if similar_faqs else None
if best_faq:
return (
f"Basándome en tu consulta, creo que esto te puede ayudar:\n\n"
f"**{best_faq['question']}**\n\n"
f"{best_faq['answer']}\n\n"
f"¿Esto responde a tu pregunta o necesitas información adicional?"
)
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------

return "Lo siento, no pude procesar tu consulta correctamente. ¿Podrías reformularla?"
async def _tool_calling_loop(
self,
messages: list,
max_iterations: int = 5,
) -> str:
"""Call the LLM, execute any tool calls, feed results back, repeat."""

async def _generate_no_results_response(self, user_query: str, history: list = None) -> str:
"""Genera respuesta cuando no se encuentran FAQs relevantes"""
for _ in range(max_iterations):
ai_msg = await self.llm.ainvoke(messages)
messages.append(ai_msg)

try:
prompt = _prompts.get_template("faq_no_results.j2").render(
user_query=user_query,
)
if not ai_msg.tool_calls:
return ai_msg.content or ""

system_content = _config["system_prompts"]["no_results"]
logger.debug("-" * 60)
logger.debug("[FAQ] No-results LLM call")
logger.debug(f"[FAQ] System prompt:\n{system_content}")
logger.debug(f"[FAQ] User prompt:\n{prompt}")

chat_messages = [{"role": "system", "content": system_content}]
for msg in (history or []):
if msg.type == "human":
chat_messages.append({"role": "user", "content": msg.content})
elif msg.type == "ai" and msg.content:
chat_messages.append({"role": "assistant", "content": msg.content})
chat_messages.append({"role": "user", "content": prompt})

model_cfg = _config["model"]
response = await self.client.chat.completions.create(
model=self.model,
messages=chat_messages,
temperature=model_cfg["temperature_no_results"],
max_tokens=model_cfg["max_tokens_no_results"],
logger.debug(
"[FAQ] LLM requested %d tool call(s)", len(ai_msg.tool_calls)
)

answer = response.choices[0].message.content
logger.debug(f"[FAQ] LLM no-results response:\n{answer}")
return answer
tool_result = await self.tool_node.ainvoke({"messages": messages})
tool_messages = tool_result["messages"]
messages.extend(tool_messages)

except Exception as e:
logger.error(f"Error generating no results response: {e}")
return (
"No encontré información específica sobre tu consulta en nuestras FAQs.\n\n"
"Te sugiero:\n"
"• Contactar directamente al equipo de Ithaka\n"
"• Revisar nuestro sitio web oficial\n"
"• Seguirnos en redes sociales para estar al día\n\n"
"¿Hay algo más sobre emprendimiento o nuestros programas en lo que pueda ayudarte?"
)
last = messages[-1]
return last.content if hasattr(last, "content") and last.content else ""


# Instancia global del agente
faq_agent = FAQAgent()


async def handle_faq_query(state: ConversationState) -> ConversationState:
"""Función wrapper para LangGraph"""
"""Wrapper function for LangGraph."""
return await faq_agent(state)
12 changes: 11 additions & 1 deletion app/agents/wizard_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from ..graph.state import ConversationState
from .wizard_workflow.wizard_graph import wizard_graph
from ..db.config.database import SessionLocal
from ..services import conversation_service
from ..services import conversation_service, postulation_service

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -79,6 +79,16 @@ async def __call__(self, state: ConversationState) -> ConversationState:
logger.debug(f"[WIZARD_NODE] wizard_status={result.get('wizard_status')}")
logger.debug(f"[WIZARD_NODE] response (first 200 chars): {response_content[:200]!r}")

# --- Envío a API externa ---
if result.get("completed"):
try:
submission = await postulation_service.submit_postulation(
result.get("wizard_responses", {})
)
logger.info(f"[WIZARD_NODE] Postulation submitted to external API: {submission}")
except Exception as e:
logger.error(f"[WIZARD_NODE] Error submitting postulation to external API: {e}", exc_info=True)

# --- Persistencia en DB ---
conv_id = state.get("conversation_id")
wizard_responses = result.get("wizard_responses", {})
Expand Down
Loading
Loading