33import logging
44from fastapi import FastAPI , HTTPException , Request
55from fastapi .responses import JSONResponse
6+ from contextlib import asynccontextmanager
67from pydantic import BaseModel , Field
78import pandas as pd
89import joblib
1112import uvicorn
1213from src .models .predict import HeartDiseasePredictor
1314from src .utils .logger import setup_logger
15+ from src .monitoring .logger import MonitoringLogger
16+ from src .monitoring .engine import MonitoringEngine
17+ from src .utils .version_utils import get_model_version
18+ from fastapi import BackgroundTasks
1419
1520# Initialize Production-Grade Logger
1621logger = setup_logger ("API-ENGINE" )
1722
18- app = FastAPI (
19- title = "CardioSense AI Inference Gateway" ,
20- description = "Production-grade clinical API for real-time risk assessment with traceability and audit logging." ,
21- version = "2.1.0"
22- )
23-
24- # Configuration & Loaders
25- MODEL_PATH = "models/heart_disease_model.joblib"
26- PREPROCESSOR_PATH = "models/preprocessor.joblib"
27- METADATA_PATH = "models/model_metadata.json"
28-
29- predictor = None
30- metadata = None
31- model_version = "Unknown"
32-
33- @app .on_event ("startup" )
34- def startup_event ():
23+ @asynccontextmanager
24+ async def lifespan (app : FastAPI ):
25+ """
26+ Lifespan context manager for initializing clinical model artifacts
27+ and cleaning up resources on shutdown.
28+ """
3529 global predictor , metadata , model_version
3630 logger .info ("Initializing Clinical Intelligence System..." )
3731
@@ -46,10 +40,34 @@ def startup_event():
4640 try :
4741 with open (METADATA_PATH , 'r' ) as f :
4842 metadata = json .load (f )
49- model_version = metadata .get ("version" , "1.0.0" )
43+ model_version = metadata .get ("version" , "2.4.0" )
44+ app .version = model_version # Sync FastAPI version if metadata updated
5045 logger .info (f"Clinical metadata loaded. Active Model Version: { model_version } " )
5146 except Exception as e :
5247 logger .error (f"Startup Warning: Could not parse metadata. { e } " )
48+
49+ yield
50+ # Cleanup logic (if needed) can go here
51+ logger .info ("Shutting down Clinical Intelligence System..." )
52+
53+ app = FastAPI (
54+ title = "CardioSense AI: Clinical Decision Support API" ,
55+ description = "Medical-grade cardiovascular risk stratification engine with integrated ACC/AHA safety guardrails and multi-modal explainability." ,
56+ version = get_model_version (),
57+ lifespan = lifespan
58+ )
59+
60+ # Configuration & Loaders
61+ MODEL_PATH = "models/heart_disease_model.joblib"
62+ PREPROCESSOR_PATH = "models/preprocessor.joblib"
63+ METADATA_PATH = "models/model_metadata.json"
64+
65+ predictor = None
66+ metadata = None
67+ model_version = "Unknown"
68+ mon_logger = MonitoringLogger ()
69+ mon_engine = MonitoringEngine ()
70+
5371
5472# --- PRODUCTION MIDDLEWARE ---
5573
@@ -129,11 +147,11 @@ def health_check():
129147 }
130148
131149@app .post ("/predict" )
132- def predict_risk (request : Request , data : PatientData ):
150+ def predict_risk (request : Request , data : PatientData , background_tasks : BackgroundTasks ):
133151 """
134- Executes real-time clinical risk prediction.
152+ Executes real-time clinical risk prediction with background monitoring .
135153 """
136- request_id = request .headers .get ("X-Request-ID" , "N/A" )
154+ request_id = request .headers .get ("X-Request-ID" , str ( uuid . uuid4 ()) )
137155
138156 if not predictor :
139157 logger .warning (f"REQ [{ request_id } ] | Inference attempted while model is offline." )
@@ -142,16 +160,49 @@ def predict_risk(request: Request, data: PatientData):
142160 input_df = pd .DataFrame ([data .dict ()])
143161 prediction , probability = predictor .predict (input_df )
144162
163+ prob_val = round (float (probability [0 ][1 ]), 4 )
145164 result = {
146165 "prediction" : int (prediction [0 ]),
147- "risk_probability" : round ( float ( probability [ 0 ][ 1 ]), 4 ) ,
166+ "risk_probability" : prob_val ,
148167 "status" : "Positive (High Risk)" if prediction [0 ] == 1 else "Negative (Low Risk)" ,
149168 "model_version" : model_version ,
150169 "request_id" : request_id
151170 }
152171
153- logger .info (f"REQ [{ request_id } ] | Inference Successful | Result: { result ['status' ]} ({ result ['risk_probability' ]} )" )
172+ # Async Persistence for Drift Monitoring
173+ background_tasks .add_task (
174+ mon_logger .log_prediction ,
175+ request_id , input_df , prediction [0 ], prob_val , model_version
176+ )
177+
178+ logger .info (f"REQ [{ request_id } ] | Inference Successful | Result: { result ['status' ]} " )
154179 return result
155180
181+ @app .post ("/feedback/{request_id}" )
182+ def submit_feedback (request_id : str , actual_outcome : int ):
183+ """
184+ Clinician endpoint to submit ground truth outcome (0: Healthy, 1: Disease)
185+ for Concept Drift monitoring.
186+ """
187+ try :
188+ mon_logger .log_feedback (request_id , actual_outcome )
189+ return {"status" : "Feedback recorded" , "request_id" : request_id }
190+ except Exception as e :
191+ raise HTTPException (status_code = 500 , detail = str (e ))
192+
193+ @app .get ("/monitoring/status" )
194+ def get_monitoring_status ():
195+ """
196+ Returns a high-level summary of data and concept drift.
197+ """
198+ drift_stats = mon_engine .run_drift_analysis (window_size = 100 )
199+ perf_stats = mon_engine .run_performance_audit ()
200+
201+ return {
202+ "drift" : drift_stats ,
203+ "performance" : perf_stats ,
204+ "timestamp" : time .time ()
205+ }
206+
156207if __name__ == "__main__" :
157208 uvicorn .run (app , host = "0.0.0.0" , port = 8000 )
0 commit comments