forked from dtsense-ai/DTSense_Agent
-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathdtsense_agent.py
More file actions
205 lines (172 loc) Β· 7.69 KB
/
dtsense_agent.py
File metadata and controls
205 lines (172 loc) Β· 7.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
import streamlit as st
import os
from langchain_groq import ChatGroq
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.vectorstores import Pinecone
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, END
from typing import TypedDict, List, Literal, Optional
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone
from dotenv import load_dotenv
from langgraph.checkpoint.memory import MemorySaver
st.set_page_config(page_title="Agentic RAG", layout="wide")
# Load environment variables
load_dotenv()
# --- PINECONE SETUP ---
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
index_name = "medical-index"
@st.cache_resource
def load_vectorstore():
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Check if index exists, if not create it
try:
index = pc.Index(index_name)
# Verify index exists by getting its description
index.describe_index_stats()
except Exception as e:
st.warning(f"Index '{index_name}' not found. Please create it in Pinecone first.")
st.info("Steps: Go to https://app.pinecone.io β Create Index β Name: 'medical-index' β Dimension: 384 β Create")
st.stop()
vectorstore = PineconeVectorStore(index=index, embedding=embeddings)
return vectorstore
vectorstore = load_vectorstore()
retriever = vectorstore.as_retriever()
search_tool = TavilySearchResults(max_results=3, api_key=os.environ.get("TAVILY_API_KEY"))
llm = ChatGroq(model_name="openai/gpt-oss-20b")
# --- Graph State ---
class GraphState(TypedDict):
query: str
docs: Optional[List[Document]]
result: Optional[str]
source: Literal["search", "vectorstore"]
# --- Nodes ---
def route(state: GraphState):
if state["source"] == "auto":
query = state["query"].lower()
# Check for keywords indicating a search intent
if any(word in query for word in ["latest", "current", "today", "news", "berita", "hari ini", "terbaru"]):
# If keywords are found, route to search
return {"source": "search"}
return {"source": "vectorstore"}
else:
return {"source": state["source"]}
def tavily_node(state: GraphState):
query = state["query"]
results = search_tool.invoke(query)
docs = [Document(page_content=res['content'], metadata={"source": res['url']}) for res in results]
return {"docs": docs}
def vectorstore_node(state: GraphState):
query = state["query"]
docs = retriever.invoke(query)
return {"docs": docs}
combine_prompt = ChatPromptTemplate.from_messages([
("system", "Answer the question based on the provided context. please don't make up answers if the information is not available in the context and answer with 'I don't know'."),
("human", "Question: {query}\n\nContext:\n{context}")
])
def combine_node(state: GraphState):
context = "\n\n".join([doc.page_content for doc in state["docs"]])
chain = combine_prompt | llm
result = chain.invoke({"query": state["query"], "context": context})
return {"result": result.content}
# --- Build Graph ---
graph = StateGraph(GraphState)
graph.add_node("router", route)
graph.add_node("search", tavily_node)
graph.add_node("vectorstore", vectorstore_node)
graph.add_node("combine", combine_node)
graph.set_entry_point("router")
graph.add_conditional_edges("router", lambda x: x["source"], {
"search": "search",
"vectorstore": "vectorstore"
})
graph.add_edge("search", "combine")
graph.add_edge("vectorstore", "combine")
graph.add_edge("combine", END)
agentic_rag = graph.compile()
# --- Memory and Runner ---
memory = MemorySaver()
# --- Streamlit UI ---
st.title("π€ Agentic RAG with LangGraph")
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
with st.sidebar:
st.image("data/dtsense.png", width=200, use_container_width=True)
# Bio Information
st.markdown("""
<style>
.center-text {
text-align: center;
}
</style>
<h2 class="center-text">DTSense AI Agent</h2>""",
unsafe_allow_html=True
)
with st.expander("π About DTSense"):
st.markdown(
"""
<div style="text-align: justify;">
DTSense is a platform that provides Generative AI and Agentic AI solutions for various industries, including finance, healthcare, and education. Our mission is to empower businesses with AI-driven insights and automation to enhance decision-making and operational efficiency
</div>
""", unsafe_allow_html=True
)
st.markdown(
"""
[](https://dtsense.id)
[](https://github.com/dtsense)
""", unsafe_allow_html=True
)
with st.sidebar:
with st.expander("π Source Options"):
st.markdown("## Select Retrieval Source")
retrieval_method = st.radio("Choose retrieval source:", ("Auto", "Pinecone", "Tavily"))
with st.expander("Show Workflow Diagram"):
st.markdown("## LangGraph Workflow")
try:
st.image(agentic_rag.get_graph().draw_mermaid_png())
except Exception as e:
st.warning("π Workflow diagram temporarily unavailable. The mermaid.ink API may be down.")
with st.expander("π¬ Conversation History", expanded=True):
search = st.text_input("Search history")
filtered_history = [msg for msg in st.session_state.chat_history if search.lower() in msg["content"].lower()] if search else st.session_state.chat_history
for i, msg in enumerate(filtered_history):
role = "π€" if msg["role"] == "user" else "π€"
st.markdown(f"**{role}**: {msg['content']}")
if st.button("ποΈ Clear History"):
st.session_state.chat_history = []
if st.download_button("β¬οΈ Export History", data="\n".join([f"{msg['role']}: {msg['content']}" for msg in st.session_state.chat_history]), file_name="chat_history.txt"):
pass
# Input
query = st.chat_input("Enter your question:")
if query:
with st.spinner("Thinking..."):
st.session_state.chat_history.append({"role": "user", "content": query})
if retrieval_method == "Pinecone":
source = "vectorstore"
elif retrieval_method == "Auto":
source = "auto"
else:
source = "search"
result = agentic_rag.invoke({"query": query, "source": source}, config={"configurable": {"thread_id": "session-001"}, "checkpoint": memory})
# Display
# st.subheader("Answer")
# st.write(result["result"])
st.session_state.chat_history.append({"role": "assistant", "content": result["result"]})
for msg in st.session_state.chat_history:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
if query:
st.markdown("---")
st.markdown("### Retrieved Documents")
st.subheader("Context Sources")
for i, doc in enumerate(result["docs"]):
st.markdown(f"**Doc {i+1}:** - {doc.metadata.get('source', 'Unknown')}: content: {doc.page_content[:200]}...")
# Display memory history
if st.session_state.chat_history:
st.sidebar.subheader("π¬ History")
for i, entry in enumerate(reversed(st.session_state.chat_history[-10:]), 1):
st.sidebar.markdown(f"**{i}.** {entry['role']}:")
st.sidebar.markdown(f"β {entry['content'][:100]}...")