Skip to content

Commit 89c45f0

Browse files
committed
v (4.3)
Maxed out UI/UX Integrates AI Chats with a specific doc in the focus 3 different modes Smoother data flow for local implementation
1 parent ab3bdf1 commit 89c45f0

File tree

7 files changed

+1204
-223
lines changed

7 files changed

+1204
-223
lines changed

backend/app.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
11
from fastapi import FastAPI, File, UploadFile, HTTPException
22
from fastapi.middleware.cors import CORSMiddleware
3-
from fastapi.responses import JSONResponse
3+
from fastapi.responses import JSONResponse, FileResponse
4+
from fastapi.staticfiles import StaticFiles
45
import os
56
from pathlib import Path
67
import uuid
78
from routes.documents import router as documents_router
89
from routes.search import router as search_router
910
from routes.vector_search import router as vector_search_router
11+
from routes.chat import router as chat_router
1012

11-
app = FastAPI(title="Document Research Assistant", version="0.3.0")
13+
app = FastAPI(title="Document Research Assistant", version="4.0.0")
1214

1315
app.add_middleware(
1416
CORSMiddleware,
@@ -21,15 +23,29 @@
2123
UPLOAD_DIR = Path("uploads")
2224
UPLOAD_DIR.mkdir(exist_ok=True)
2325

26+
# Get the root directory (parent of backend)
27+
root_dir = Path(__file__).parent.parent
28+
29+
# Mount static files to serve the frontend
30+
app.mount("/static", StaticFiles(directory=root_dir), name="static")
31+
32+
# Include routers with API prefix
2433
app.include_router(documents_router, prefix="/api")
2534
app.include_router(search_router, prefix="/api")
2635
app.include_router(vector_search_router, prefix="/api")
36+
app.include_router(chat_router, prefix="/api")
2737

2838
@app.get("/")
29-
async def root():
30-
return {"message": "Document Research Assistant API", "version": "0.3.0"}
39+
async def serve_frontend():
40+
"""Serve the main frontend HTML file."""
41+
return FileResponse(root_dir / "index.html")
42+
43+
@app.get("/api")
44+
async def api_root():
45+
"""API root endpoint providing information."""
46+
return {"message": "Document Research Assistant API", "version": "4.0.0", "features": ["document_upload", "vector_search", "ai_chat"]}
3147

32-
@app.post("/upload")
48+
@app.post("/api/upload")
3349
async def upload_document(file: UploadFile = File(...)):
3450
if not file.filename.endswith(('.pdf', '.docx', '.txt')):
3551
raise HTTPException(400, "Unsupported file type. Use PDF, DOCX, or TXT.")
@@ -49,7 +65,7 @@ async def upload_document(file: UploadFile = File(...)):
4965
"status": "uploaded"
5066
}
5167

52-
@app.get("/documents")
68+
@app.get("/api/documents")
5369
async def list_documents():
5470
documents = []
5571
for file_path in UPLOAD_DIR.glob("*"):

backend/requirements.txt

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@ sqlalchemy==2.0.23
88
psycopg2-binary==2.9.9
99
pydantic==2.5.0
1010
pandas==2.1.4
11-
sentence-transformers==2.2.2
11+
sentence-transformers>=2.7.0
1212
faiss-cpu==1.7.4
1313
numpy==1.25.2
14+
torch>=2.0.0
15+
transformers>=4.51.0
16+
accelerate>=0.20.0
17+
huggingface-hub>=0.20.0,<1.0.0

backend/routes/chat.py

Lines changed: 179 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,179 @@
1+
from fastapi import APIRouter, HTTPException, Query
2+
from pydantic import BaseModel
3+
from typing import List, Optional
4+
import sys
5+
import os
6+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
7+
8+
from services.llm_service import llm_service
9+
from services.conversation_manager import conversation_manager
10+
from services.vector_store import VectorStore
11+
12+
router = APIRouter()
13+
vector_store = VectorStore()
14+
15+
class ChatRequest(BaseModel):
16+
question: str
17+
document_ids: Optional[List[str]] = None
18+
conversation_id: Optional[str] = None
19+
context_limit: Optional[int] = 5
20+
21+
class ChatResponse(BaseModel):
22+
answer: str
23+
sources: List[dict]
24+
confidence: float
25+
conversation_id: str
26+
tokens_used: Optional[int] = None
27+
28+
class ConversationCreate(BaseModel):
29+
document_ids: Optional[List[str]] = None
30+
31+
@router.post("/chat/ask", response_model=ChatResponse)
32+
async def ask_question(request: ChatRequest):
33+
if not request.question.strip():
34+
raise HTTPException(400, "Question cannot be empty")
35+
36+
# Create conversation if none provided
37+
if not request.conversation_id:
38+
request.conversation_id = conversation_manager.create_conversation(request.document_ids)
39+
40+
# Verify conversation exists
41+
conversation = conversation_manager.get_conversation(request.conversation_id)
42+
if not conversation:
43+
raise HTTPException(404, "Conversation not found")
44+
45+
try:
46+
# Get context from vector search
47+
if request.document_ids:
48+
# Search within specific documents
49+
context_chunks = []
50+
for doc_id in request.document_ids:
51+
if vector_store.document_exists(doc_id):
52+
doc_results = vector_store.search_by_document(
53+
doc_id, request.question, top_k=request.context_limit // len(request.document_ids) + 1
54+
)
55+
context_chunks.extend(doc_results)
56+
else:
57+
# Search across all documents
58+
context_chunks = vector_store.search(request.question, top_k=request.context_limit)
59+
60+
# Get conversation history for context
61+
conversation_history = conversation_manager.get_conversation_history(
62+
request.conversation_id, limit=5
63+
)
64+
65+
# Generate answer using LLM
66+
llm_response = await llm_service.generate_answer(
67+
request.question,
68+
context_chunks,
69+
conversation_history
70+
)
71+
72+
# Save to conversation
73+
conversation_manager.add_message(
74+
request.conversation_id,
75+
request.question,
76+
llm_response["answer"],
77+
llm_response["sources"],
78+
llm_response["confidence"]
79+
)
80+
81+
return ChatResponse(
82+
answer=llm_response["answer"],
83+
sources=llm_response["sources"],
84+
confidence=llm_response["confidence"],
85+
conversation_id=request.conversation_id,
86+
tokens_used=llm_response.get("tokens_used")
87+
)
88+
89+
except Exception as e:
90+
raise HTTPException(500, f"Error processing question: {str(e)}")
91+
92+
@router.post("/chat/conversations")
93+
async def create_conversation(request: ConversationCreate):
94+
conversation_id = conversation_manager.create_conversation(request.document_ids)
95+
return {
96+
"conversation_id": conversation_id,
97+
"document_ids": request.document_ids or [],
98+
"status": "created"
99+
}
100+
101+
@router.get("/chat/conversations/{conversation_id}")
102+
async def get_conversation(conversation_id: str, limit: int = Query(10, ge=1, le=50)):
103+
conversation = conversation_manager.get_conversation(conversation_id)
104+
if not conversation:
105+
raise HTTPException(404, "Conversation not found")
106+
107+
messages = conversation_manager.get_conversation_history(conversation_id, limit)
108+
109+
return {
110+
"conversation_id": conversation_id,
111+
"created_at": conversation["created_at"],
112+
"document_ids": conversation.get("document_ids", []),
113+
"total_messages": conversation["metadata"]["total_questions"],
114+
"messages": messages
115+
}
116+
117+
@router.get("/chat/conversations")
118+
async def list_conversations(limit: int = Query(20, ge=1, le=100)):
119+
conversations = conversation_manager.list_conversations(limit)
120+
return {
121+
"conversations": conversations,
122+
"total": len(conversations)
123+
}
124+
125+
@router.delete("/chat/conversations/{conversation_id}")
126+
async def delete_conversation(conversation_id: str):
127+
success = conversation_manager.delete_conversation(conversation_id)
128+
if not success:
129+
raise HTTPException(404, "Conversation not found")
130+
131+
return {"status": "deleted", "conversation_id": conversation_id}
132+
133+
@router.get("/chat/status")
134+
async def get_chat_status():
135+
return {
136+
"llm_available": llm_service.enabled,
137+
"model_name": llm_service.model_name if llm_service.enabled else None,
138+
"vector_search_available": vector_store.enabled,
139+
"total_documents": len(vector_store.documents),
140+
"total_chunks": len(vector_store.chunks)
141+
}
142+
143+
@router.get("/search/enhanced")
144+
async def enhanced_search(
145+
q: str = Query(..., min_length=1),
146+
generate_answer: bool = Query(False),
147+
document_ids: Optional[List[str]] = Query(None),
148+
limit: int = Query(5, ge=1, le=20)
149+
):
150+
try:
151+
# Get search results
152+
if document_ids:
153+
search_results = []
154+
for doc_id in document_ids:
155+
if vector_store.document_exists(doc_id):
156+
doc_results = vector_store.search_by_document(doc_id, q, top_k=limit // len(document_ids) + 1)
157+
search_results.extend(doc_results)
158+
else:
159+
search_results = vector_store.search(q, top_k=limit)
160+
161+
response = {
162+
"query": q,
163+
"results": search_results,
164+
"total_results": len(search_results)
165+
}
166+
167+
# Generate AI answer if requested
168+
if generate_answer and llm_service.enabled:
169+
llm_response = await llm_service.generate_answer(q, search_results)
170+
response["ai_answer"] = {
171+
"answer": llm_response["answer"],
172+
"confidence": llm_response["confidence"],
173+
"sources": llm_response["sources"]
174+
}
175+
176+
return response
177+
178+
except Exception as e:
179+
raise HTTPException(500, f"Error performing enhanced search: {str(e)}")

0 commit comments

Comments
 (0)