Skip to content

Commit 3f171e6

Browse files
committed
feat: implement context_builder node with Primary LLM integration
- Add configuration from settings for OpenAI API key - Implement enriched query construction combining paraphrased text and relevant chunks - Add Primary LLM call with specialized system prompt for nutrition/culinary topics - Update state with enriched_query, primary_response, and generated_response - Add error handling and logging - Set generated_response for guard_final compatibility (Nodo 7 Generator not yet implemented)
1 parent 597f594 commit 3f171e6

File tree

1 file changed

+81
-20
lines changed

1 file changed

+81
-20
lines changed
Lines changed: 81 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,19 @@
11
"""Nodo 6: Context Builder - Enriches query with retrieved context."""
22

3+
import logging
4+
35
from app.agents.state import AgentState
4-
from langchain_core.messages import SystemMessage
6+
from app.core.config import settings
7+
from langchain_core.messages import HumanMessage, SystemMessage
58
from langchain_openai import ChatOpenAI
69

7-
llm = ChatOpenAI(model="gpt-5-nano")
10+
logger = logging.getLogger(__name__)
11+
12+
# Initialize Primary LLM with configuration from settings
13+
llm = ChatOpenAI(
14+
model="gpt-4.1-mini", # Primary LLM model for context-aware responses
15+
openai_api_key=settings.openai_api_key,
16+
)
817

918

1019
def context_builder(state: AgentState) -> AgentState:
@@ -23,30 +32,82 @@ def context_builder(state: AgentState) -> AgentState:
2332
Returns:
2433
Updated state with enriched_query and primary_response set
2534
"""
26-
# TODO: Implement context building and primary LLM call
27-
# This should:
28-
# 1. Combine paraphrased_text with relevant_chunks into enriched_query
29-
# 2. Format the query appropriately (e.g., with system prompts, context sections)
30-
# 3. Call Primary LLM with the enriched query
31-
# 4. Store the LLM response in primary_response
32-
33-
# Placeholder: For now, we'll create a simple enriched query
3435
updated_state = state.copy()
36+
37+
# Get paraphrased text and relevant chunks from state
3538
paraphrased = state.get("paraphrased_text", "")
3639
chunks = state.get("relevant_chunks", [])
37-
40+
41+
# Fallback if paraphrased_text is not available
42+
if not paraphrased:
43+
logger.warning("No paraphrased text found in state. Using original prompt.")
44+
messages = state.get("messages", [])
45+
if messages:
46+
paraphrased = messages[-1].content if hasattr(messages[-1], "content") else str(messages[-1])
47+
else:
48+
paraphrased = state.get("prompt", "")
49+
3850
# Build enriched query with context
39-
context_section = "\n\n".join(chunks) if chunks else "No relevant context found."
51+
if chunks:
52+
context_section = "\n\n---\n\n".join([f"Context {i+1}:\n{chunk}" for i, chunk in enumerate(chunks)])
53+
else:
54+
context_section = "No relevant context found in the knowledge base."
55+
logger.warning("No relevant chunks found for context building")
4056

41-
system_content = f"""You are a helpful assistant. Use the following context to answer the user's question.
42-
If the answer is not in the context, say you don't know.
57+
# Create enriched query combining paraphrased text and context
58+
enriched_query = f"""User Question: {paraphrased}
4359
44-
Context:
45-
{context_section}"""
60+
Relevant Context from Knowledge Base:
61+
{context_section}
4662
47-
messages = [SystemMessage(content=system_content)] + state["messages"]
63+
Please provide a comprehensive answer based on the context provided above. If the context does not contain enough information to answer the question, please indicate that clearly."""
64+
65+
# System prompt for the Primary LLM
66+
system_content = """You are a helpful assistant specialized in providing accurate, context-based answers about nutrition and culinary topics.
4867
49-
# Call Primary LLM
50-
response = llm.invoke(messages)
68+
Your task is to:
69+
1. Use the provided context to answer the user's question accurately
70+
2. If the context contains relevant information, provide a comprehensive answer
71+
3. If the context does not contain enough information, clearly state that you don't have sufficient information in the knowledge base
72+
4. Always base your answer on the provided context - do not make up information
73+
5. If the question is not related to the context, politely redirect the conversation
5174
52-
return {"messages": [response]}
75+
Be concise, accurate, and helpful."""
76+
77+
# Prepare messages for LLM
78+
messages_for_llm = [
79+
SystemMessage(content=system_content),
80+
HumanMessage(content=enriched_query)
81+
]
82+
83+
try:
84+
# Call Primary LLM
85+
logger.info("Calling Primary LLM with enriched query")
86+
response = llm.invoke(messages_for_llm)
87+
88+
# Extract response content
89+
primary_response = response.content if hasattr(response, "content") else str(response)
90+
91+
# Update state with enriched query and primary response (as defined in state.py)
92+
updated_state["enriched_query"] = enriched_query
93+
updated_state["primary_response"] = primary_response
94+
95+
# Also set generated_response for guard_final (Nodo 7: Generator not yet implemented)
96+
# For now, generated_response = primary_response
97+
# This allows guard_final to validate the response
98+
updated_state["generated_response"] = primary_response
99+
100+
# Also update messages for LangGraph compatibility
101+
updated_state["messages"] = state.get("messages", []) + [response]
102+
103+
logger.info("Successfully generated primary response from LLM")
104+
105+
except Exception as e:
106+
logger.error(f"Error calling Primary LLM: {e}", exc_info=True)
107+
# Set error state
108+
updated_state["error_message"] = f"Error in context builder: {str(e)}"
109+
updated_state["enriched_query"] = enriched_query
110+
updated_state["primary_response"] = None
111+
updated_state["generated_response"] = None
112+
113+
return updated_state

0 commit comments

Comments
 (0)