|
2 | 2 | import joblib |
3 | 3 | import pandas as pd |
4 | 4 | import logging |
5 | | -import sys |
| 5 | +import sys |
6 | 6 | from pydantic import ValidationError |
7 | 7 | from src.schemas import PredictRequest |
8 | 8 |
|
9 | 9 |
|
10 | 10 | # Configure logging |
11 | | -log_file_path = 'api_logs.log' |
| 11 | +log_file_path = "api_logs.log" |
12 | 12 | logging.basicConfig( |
13 | 13 | level=logging.INFO, |
14 | | - format='*** FLASK API LOG: %(asctime)s - %(levelname)s - %(message)s ***', |
| 14 | + format="*** FLASK API LOG: %(asctime)s - %(levelname)s - %(message)s ***", |
15 | 15 | handlers=[ |
16 | 16 | logging.FileHandler(log_file_path), |
17 | | - logging.StreamHandler(sys.stdout) # Explicitly use sys.stdout for terminal output |
18 | | - ] |
| 17 | + logging.StreamHandler( |
| 18 | + sys.stdout |
| 19 | + ), # Explicitly use sys.stdout for terminal output |
| 20 | + ], |
19 | 21 | ) |
20 | 22 |
|
21 | 23 | app = Flask(__name__) |
22 | 24 |
|
23 | 25 | # Path to the trained model |
24 | | -MODEL_PATH = 'models/model.joblib' |
| 26 | +MODEL_PATH = "models/model.joblib" |
25 | 27 |
|
26 | 28 | try: |
27 | 29 | model = joblib.load(MODEL_PATH) |
|
33 | 35 | logging.error(f"Error loading model: {e}") |
34 | 36 | model = None |
35 | 37 |
|
| 38 | + |
36 | 39 | # Endpoints |
37 | | -@app.route('/', methods=['GET']) |
| 40 | +@app.route("/", methods=["GET"]) |
38 | 41 | def health_check(): |
39 | 42 | """Health check endpoint.""" |
40 | 43 | logging.info("Health check requested.") |
41 | | - return jsonify({'status': 'healthy', 'model_loaded': model is not None}), 200 |
| 44 | + return jsonify({"status": "healthy", "model_loaded": model is not None}), 200 |
| 45 | + |
42 | 46 |
|
43 | | -@app.route('/predict', methods=['POST']) |
| 47 | +@app.route("/predict", methods=["POST"]) |
44 | 48 | def predict(): |
45 | 49 | logging.info("Prediction endpoint hit.") |
46 | 50 | if model is None: |
47 | 51 | logging.error("Prediction requested but model is not loaded.") |
48 | | - return jsonify({'error': 'Model not loaded. Please ensure the model is trained and available.'}), 500 |
| 52 | + return jsonify( |
| 53 | + { |
| 54 | + "error": "Model not loaded. Please ensure the model is trained and available." |
| 55 | + } |
| 56 | + ), 500 |
49 | 57 |
|
50 | 58 | # Parse JSON |
51 | 59 | try: |
52 | 60 | data = request.get_json(force=True) |
53 | 61 | except Exception: |
54 | 62 | logging.warning("Invalid JSON body.") |
55 | | - return jsonify({'error': 'Invalid JSON body.'}), 400 |
| 63 | + return jsonify({"error": "Invalid JSON body."}), 400 |
56 | 64 |
|
57 | 65 | # Validation using Pydantic |
58 | 66 | try: |
59 | 67 | payload = PredictRequest.model_validate(data) |
60 | 68 | except ValidationError as e: |
61 | 69 | logging.warning(f"Validation error: {e}") |
62 | | - return jsonify({'error': 'Invalid input', 'details': e.errors()}), 422 |
| 70 | + return jsonify({"error": "Invalid input", "details": e.errors()}), 422 |
63 | 71 |
|
64 | 72 | # Build DataFrame using aliases to match training feature names |
65 | 73 | input_df = pd.DataFrame([payload.model_dump(by_alias=True)]) |
66 | 74 |
|
67 | 75 | # Feature alignment |
68 | | - if hasattr(model, 'feature_names_in_'): |
69 | | - expected_features = list(model.feature_names_in_) # scikit-learn feature names |
| 76 | + if hasattr(model, "feature_names_in_"): |
| 77 | + expected_features = list(model.feature_names_in_) # scikit-learn feature names |
70 | 78 | missing_features = set(expected_features) - set(input_df.columns) |
71 | 79 | for feature in missing_features: |
72 | 80 | input_df[feature] = 0 |
73 | 81 | input_df = input_df[expected_features] |
74 | 82 | else: |
75 | | - logging.warning("model.feature_names_in_ not found. Relying on input JSON for feature order.") |
| 83 | + logging.warning( |
| 84 | + "model.feature_names_in_ not found. Relying on input JSON for feature order." |
| 85 | + ) |
76 | 86 |
|
77 | 87 | # Inference |
78 | 88 | try: |
79 | 89 | prediction = model.predict(input_df) |
80 | 90 | prediction_proba = model.predict_proba(input_df) |
81 | 91 | except Exception as e: |
82 | 92 | logging.error(f"Error during prediction: {e}", exc_info=True) |
83 | | - return jsonify({'error': f'An internal error occurred: {e}'}), 500 |
| 93 | + return jsonify({"error": f"An internal error occurred: {e}"}), 500 |
84 | 94 |
|
85 | 95 | result = { |
86 | | - 'prediction': int(prediction[0]), |
87 | | - 'probability_benign': float(prediction_proba[0][0]), # Class 0 is benign |
88 | | - 'probability_malignant': float(prediction_proba[0][1]), # Class 1 is malignant |
| 96 | + "prediction": int(prediction[0]), |
| 97 | + "probability_benign": float(prediction_proba[0][0]), # Class 0 is benign |
| 98 | + "probability_malignant": float(prediction_proba[0][1]), # Class 1 is malignant |
89 | 99 | } |
90 | 100 | logging.info(f"Prediction successful: {result}") |
91 | 101 | return jsonify(result), 200 |
92 | 102 |
|
93 | | -if __name__ == '__main__': |
94 | | - app.run(debug=False, use_reloader=False, host='0.0.0.0', port=5000) |
| 103 | + |
| 104 | +if __name__ == "__main__": |
| 105 | + app.run(debug=False, use_reloader=False, host="0.0.0.0", port=5000) |
0 commit comments