-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstreamlit_app.py
More file actions
382 lines (329 loc) Β· 13.1 KB
/
streamlit_app.py
File metadata and controls
382 lines (329 loc) Β· 13.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
"""Streamlit demo: McKesson-branded RAG chat over uploaded documents.
Run with:
streamlit run streamlit_app.py
Required environment (see .env):
MCK_CLIENT_ID, MCK_CLIENT_SECRET - Okta credentials for MuleSoft auth
LANGSMITH_API_KEY - to send traces to LangSmith
LANGSMITH_TRACING=true - enable tracing
LANGSMITH_PROJECT=document-rag-poc - LangSmith project name (optional)
"""
from __future__ import annotations
import base64
import os
import tempfile
import uuid
from pathlib import Path
from dotenv import load_dotenv
# Load env BEFORE importing anything that builds clients.
load_dotenv(Path(__file__).parent / ".env")
import streamlit as st
from agents.chat import analyze_with_agents, answer_question, index_document
def get_image_base64(image_path):
"""Convert image to base64 string for embedding in HTML"""
try:
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode()
except Exception:
return None
# ---------------------------------------------------------------------------
# Page config + McKesson-style chrome
# ---------------------------------------------------------------------------
st.set_page_config(
page_title="McKesson Document Assistant",
page_icon="π©Ί",
layout="wide",
initial_sidebar_state="expanded",
)
_MCK_BLUE = "#007DBA"
_MCK_NAVY = "#002F6C"
_MCK_BG_SOFT = "#F4F6F9"
_CUSTOM_CSS = f"""
<style>
/* Hide Streamlit default chrome */
#MainMenu {{visibility: hidden;}}
footer {{visibility: hidden;}}
/* Header band */
.mck-header {{
display: flex;
align-items: center;
gap: 1.5rem;
background: {_MCK_BLUE};
color: white;
padding: 1.5rem 2rem;
border-radius: 8px;
margin-bottom: 2rem;
}}
.mck-logo {{
width: 120px;
height: auto;
border-radius: 4px;
background: white;
padding: 0.5rem;
}}
.mck-title {{
font-size: 1.8rem;
font-weight: 600;
margin: 0;
}}
.mck-sub {{
font-size: 0.9rem;
opacity: 0.85;
margin: 0;
}}
/* Chat bubbles */
[data-testid="stChatMessage"] {{
border-radius: 12px;
padding: 0.25rem 0.5rem;
}}
/* Buttons */
.stButton > button {{
background-color: {_MCK_BLUE};
color: white;
border: none;
border-radius: 6px;
font-weight: 600;
}}
.stButton > button:hover {{
background-color: {_MCK_NAVY};
color: white;
}}
/* Sidebar polish */
section[data-testid="stSidebar"] {{
background-color: {_MCK_BG_SOFT};
}}
section[data-testid="stSidebar"] h2 {{
color: {_MCK_NAVY};
}}
/* Source pill */
.src-pill {{
display: inline-block;
background: {_MCK_BG_SOFT};
color: {_MCK_NAVY};
border: 1px solid #d6dde6;
border-radius: 999px;
padding: 2px 10px;
margin: 2px 4px 2px 0;
font-size: 0.78rem;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', monospace;
}}
</style>
"""
st.markdown(_CUSTOM_CSS, unsafe_allow_html=True)
# Header
st.markdown(
f"""
<div class="mck-header">
<div>
<p class="mck-title">Document Assistant</p>
<p class="mck-sub">Upload a document and ask questions β answers are grounded in your file.</p>
</div>
</div>
""",
unsafe_allow_html=True,
)
# ---------------------------------------------------------------------------
# Session state
# ---------------------------------------------------------------------------
if "messages" not in st.session_state:
st.session_state.messages = []
if "indexed_files" not in st.session_state:
st.session_state.indexed_files = []
if "indexed_paths" not in st.session_state:
st.session_state.indexed_paths = [] # full absolute paths for the agent
if "session_id" not in st.session_state:
st.session_state.session_id = str(uuid.uuid4())[:8]
if "mode" not in st.session_state:
st.session_state.mode = "Multi-agent (orchestrator + fact-checker)"
if "summary_type" not in st.session_state:
st.session_state.summary_type = "executive"
# ---------------------------------------------------------------------------
# Sidebar β upload + status
# ---------------------------------------------------------------------------
with st.sidebar:
# Add McKesson logo at the top of sidebar
logo_base64 = get_image_base64("mc_logo.png")
if logo_base64:
st.markdown(
f'<div style="text-align: center; margin-bottom: 20px;"><img src="data:image/png;base64,{logo_base64}" alt="McKesson Logo" style="width: 120px; height: auto; border-radius: 4px;"></div>',
unsafe_allow_html=True,
)
st.markdown("## Document")
uploaded = st.file_uploader(
"Upload a PDF, TXT, or Markdown file",
type=["pdf", "txt", "md"],
accept_multiple_files=False,
)
if uploaded is not None:
# Persist upload to a stable temp path so the indexer can read it.
upload_dir = Path(tempfile.gettempdir()) / "mck_docs"
upload_dir.mkdir(parents=True, exist_ok=True)
dest = upload_dir / uploaded.name
if dest.name not in st.session_state.indexed_files:
dest.write_bytes(uploaded.getbuffer())
with st.spinner(f"Indexing **{uploaded.name}**..."):
try:
result = index_document(str(dest))
if result["indexed"]:
st.success(
f"Indexed **{uploaded.name}** β {result['chunks']} chunks"
)
else:
st.info(f"**{uploaded.name}** already indexed β ready.")
st.session_state.indexed_files.append(dest.name)
st.session_state.indexed_paths.append(str(dest.resolve()))
except Exception as e:
st.error(f"Failed to index: {e}")
st.markdown("---")
st.markdown("### Session")
st.code(st.session_state.session_id, language=None)
st.markdown("### Indexed files")
if st.session_state.indexed_files:
for f in st.session_state.indexed_files:
st.markdown(f"- `{f}`")
else:
st.caption("No files indexed yet.")
st.markdown("---")
st.markdown("### Analysis Mode")
# Mode selection with clearer descriptions
mode_options = [
("Document Analysis (AI + Fact-Checker)", "Multi-agent (orchestrator + fact-checker)"),
("Quick Q&A (Direct Answers)", "Quick Q&A (single-shot RAG)")
]
selected_display = st.radio(
"",
options=[option[0] for option in mode_options],
index=0 if st.session_state.mode.startswith("Multi") else 1,
label_visibility="collapsed",
)
# Map display selection back to internal mode value
for display, value in mode_options:
if selected_display == display:
st.session_state.mode = value
break
# Show description for selected mode
if st.session_state.mode.startswith("Multi"):
st.caption("π Generate comprehensive document summaries with fact verification")
else:
st.caption("β Get direct answers to specific questions with sources")
# Show summary type options only for document analysis mode
if st.session_state.mode.startswith("Multi"):
st.markdown("**Summary Type:**")
st.session_state.summary_type = st.selectbox(
"",
options=["executive", "detailed", "bullet"],
index=["executive", "detailed", "bullet"].index(
st.session_state.summary_type
),
label_visibility="collapsed",
format_func=lambda x: {
"executive": "π Executive Summary (3-5 sentences)",
"detailed": "π Detailed Analysis (structured sections)",
"bullet": "πΈ Bullet Points (key facts & findings)"
}[x]
)
st.caption("π‘ Your question guides what to focus on in the summary")
else:
st.caption("π‘ Ask specific questions and get direct answers with sources")
st.markdown("---")
st.markdown("### Tracing")
tracing_on = os.getenv("LANGSMITH_TRACING", "").lower() in {"true", "1", "yes"}
project = os.getenv("LANGSMITH_PROJECT", "(default)")
if tracing_on:
st.success(f"LangSmith ON Β· project `{project}`")
else:
st.warning("LangSmith tracing disabled")
if st.button("Clear chat history"):
st.session_state.messages = []
st.rerun()
# ---------------------------------------------------------------------------
# Chat area
# ---------------------------------------------------------------------------
for msg in st.session_state.messages:
with st.chat_message(msg["role"], avatar="π©Ί" if msg["role"] == "assistant" else None):
st.markdown(msg["content"])
if msg.get("sources"):
pills = "".join(
f'<span class="src-pill">{s["source"]} Β· p.{s["page"]}</span>'
for s in msg["sources"]
)
st.markdown(pills, unsafe_allow_html=True)
# Adaptive interface based on mode
if st.session_state.mode.startswith("Multi"):
# Document Analysis Mode - Show button instead of chat input
st.markdown("---")
st.markdown("### π Document Analysis")
if not st.session_state.indexed_paths:
st.warning("π Please upload a document first to begin analysis.")
else:
if st.button("π Analyze Document", type="primary", use_container_width=True):
# Create a user message for the analysis
analysis_prompt = f"Generate a {st.session_state.summary_type} analysis of the document"
st.session_state.messages.append({"role": "user", "content": analysis_prompt})
with st.chat_message("user"):
st.markdown(f"π Analyze document ({st.session_state.summary_type} summary)")
with st.chat_message("assistant", avatar="π©Ί"):
try:
# Use the most recently uploaded document for analysis.
doc_path = st.session_state.indexed_paths[-1]
with st.status(
"Running document analysis...", expanded=True
) as status:
st.write("**1.** Orchestrator analyzing document")
st.write(
f"**2.** Generating {st.session_state.summary_type} summary"
)
st.write("**3.** Fact-checking the analysis")
result = analyze_with_agents(
document_path=doc_path,
query="comprehensive document analysis",
summary_type=st.session_state.summary_type,
thread_id=st.session_state.session_id,
)
status.update(
label="Document analysis complete", state="complete"
)
st.markdown(result["answer"])
st.session_state.messages.append(
{
"role": "assistant",
"content": result["answer"],
"sources": [],
}
)
except Exception as e:
err = f"Sorry β something went wrong: `{type(e).__name__}: {e}`"
st.error(err)
st.session_state.messages.append(
{"role": "assistant", "content": err, "sources": []}
)
else:
# Quick Q&A Mode - Traditional chat interface
prompt = st.chat_input("Ask a question about the uploaded document...")
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant", avatar="π©Ί"):
try:
with st.spinner("Thinking..."):
result = answer_question(prompt)
st.markdown(result["answer"])
if result["sources"]:
pills = "".join(
f'<span class="src-pill">{s["source"]} Β· p.{s["page"]}</span>'
for s in result["sources"]
)
st.markdown(pills, unsafe_allow_html=True)
st.session_state.messages.append(
{
"role": "assistant",
"content": result["answer"],
"sources": result["sources"],
}
)
except Exception as e:
err = f"Sorry β something went wrong: `{type(e).__name__}: {e}`"
st.error(err)
st.session_state.messages.append(
{"role": "assistant", "content": err, "sources": []}
)