1+ from fastapi import APIRouter , HTTPException , Query
2+ from pydantic import BaseModel
3+ from typing import List , Optional
4+ import sys
5+ import os
6+ sys .path .append (os .path .dirname (os .path .dirname (os .path .abspath (__file__ ))))
7+
8+ from services .llm_service import llm_service
9+ from services .conversation_manager import conversation_manager
10+ from services .vector_store import VectorStore
11+
12+ router = APIRouter ()
13+ vector_store = VectorStore ()
14+
15+ class ChatRequest (BaseModel ):
16+ question : str
17+ document_ids : Optional [List [str ]] = None
18+ conversation_id : Optional [str ] = None
19+ context_limit : Optional [int ] = 5
20+
21+ class ChatResponse (BaseModel ):
22+ answer : str
23+ sources : List [dict ]
24+ confidence : float
25+ conversation_id : str
26+ tokens_used : Optional [int ] = None
27+
28+ class ConversationCreate (BaseModel ):
29+ document_ids : Optional [List [str ]] = None
30+
31+ @router .post ("/chat/ask" , response_model = ChatResponse )
32+ async def ask_question (request : ChatRequest ):
33+ if not request .question .strip ():
34+ raise HTTPException (400 , "Question cannot be empty" )
35+
36+ # Create conversation if none provided
37+ if not request .conversation_id :
38+ request .conversation_id = conversation_manager .create_conversation (request .document_ids )
39+
40+ # Verify conversation exists
41+ conversation = conversation_manager .get_conversation (request .conversation_id )
42+ if not conversation :
43+ raise HTTPException (404 , "Conversation not found" )
44+
45+ try :
46+ # Get context from vector search
47+ if request .document_ids :
48+ # Search within specific documents
49+ context_chunks = []
50+ for doc_id in request .document_ids :
51+ if vector_store .document_exists (doc_id ):
52+ doc_results = vector_store .search_by_document (
53+ doc_id , request .question , top_k = request .context_limit // len (request .document_ids ) + 1
54+ )
55+ context_chunks .extend (doc_results )
56+ else :
57+ # Search across all documents
58+ context_chunks = vector_store .search (request .question , top_k = request .context_limit )
59+
60+ # Get conversation history for context
61+ conversation_history = conversation_manager .get_conversation_history (
62+ request .conversation_id , limit = 5
63+ )
64+
65+ # Generate answer using LLM
66+ llm_response = await llm_service .generate_answer (
67+ request .question ,
68+ context_chunks ,
69+ conversation_history
70+ )
71+
72+ # Save to conversation
73+ conversation_manager .add_message (
74+ request .conversation_id ,
75+ request .question ,
76+ llm_response ["answer" ],
77+ llm_response ["sources" ],
78+ llm_response ["confidence" ]
79+ )
80+
81+ return ChatResponse (
82+ answer = llm_response ["answer" ],
83+ sources = llm_response ["sources" ],
84+ confidence = llm_response ["confidence" ],
85+ conversation_id = request .conversation_id ,
86+ tokens_used = llm_response .get ("tokens_used" )
87+ )
88+
89+ except Exception as e :
90+ raise HTTPException (500 , f"Error processing question: { str (e )} " )
91+
92+ @router .post ("/chat/conversations" )
93+ async def create_conversation (request : ConversationCreate ):
94+ conversation_id = conversation_manager .create_conversation (request .document_ids )
95+ return {
96+ "conversation_id" : conversation_id ,
97+ "document_ids" : request .document_ids or [],
98+ "status" : "created"
99+ }
100+
101+ @router .get ("/chat/conversations/{conversation_id}" )
102+ async def get_conversation (conversation_id : str , limit : int = Query (10 , ge = 1 , le = 50 )):
103+ conversation = conversation_manager .get_conversation (conversation_id )
104+ if not conversation :
105+ raise HTTPException (404 , "Conversation not found" )
106+
107+ messages = conversation_manager .get_conversation_history (conversation_id , limit )
108+
109+ return {
110+ "conversation_id" : conversation_id ,
111+ "created_at" : conversation ["created_at" ],
112+ "document_ids" : conversation .get ("document_ids" , []),
113+ "total_messages" : conversation ["metadata" ]["total_questions" ],
114+ "messages" : messages
115+ }
116+
117+ @router .get ("/chat/conversations" )
118+ async def list_conversations (limit : int = Query (20 , ge = 1 , le = 100 )):
119+ conversations = conversation_manager .list_conversations (limit )
120+ return {
121+ "conversations" : conversations ,
122+ "total" : len (conversations )
123+ }
124+
125+ @router .delete ("/chat/conversations/{conversation_id}" )
126+ async def delete_conversation (conversation_id : str ):
127+ success = conversation_manager .delete_conversation (conversation_id )
128+ if not success :
129+ raise HTTPException (404 , "Conversation not found" )
130+
131+ return {"status" : "deleted" , "conversation_id" : conversation_id }
132+
133+ @router .get ("/chat/status" )
134+ async def get_chat_status ():
135+ return {
136+ "llm_available" : llm_service .enabled ,
137+ "model_name" : llm_service .model_name if llm_service .enabled else None ,
138+ "vector_search_available" : vector_store .enabled ,
139+ "total_documents" : len (vector_store .documents ),
140+ "total_chunks" : len (vector_store .chunks )
141+ }
142+
143+ @router .get ("/search/enhanced" )
144+ async def enhanced_search (
145+ q : str = Query (..., min_length = 1 ),
146+ generate_answer : bool = Query (False ),
147+ document_ids : Optional [List [str ]] = Query (None ),
148+ limit : int = Query (5 , ge = 1 , le = 20 )
149+ ):
150+ try :
151+ # Get search results
152+ if document_ids :
153+ search_results = []
154+ for doc_id in document_ids :
155+ if vector_store .document_exists (doc_id ):
156+ doc_results = vector_store .search_by_document (doc_id , q , top_k = limit // len (document_ids ) + 1 )
157+ search_results .extend (doc_results )
158+ else :
159+ search_results = vector_store .search (q , top_k = limit )
160+
161+ response = {
162+ "query" : q ,
163+ "results" : search_results ,
164+ "total_results" : len (search_results )
165+ }
166+
167+ # Generate AI answer if requested
168+ if generate_answer and llm_service .enabled :
169+ llm_response = await llm_service .generate_answer (q , search_results )
170+ response ["ai_answer" ] = {
171+ "answer" : llm_response ["answer" ],
172+ "confidence" : llm_response ["confidence" ],
173+ "sources" : llm_response ["sources" ]
174+ }
175+
176+ return response
177+
178+ except Exception as e :
179+ raise HTTPException (500 , f"Error performing enhanced search: { str (e )} " )
0 commit comments