|
1 | 1 | from networksecurity.entity.artifact_entity import DataIngestionArtifact |
2 | 2 | from networksecurity.entity.artifact_entity import DataValidationArtifact |
3 | 3 | from networksecurity.entity.config_entity import DataValidationConfig |
| 4 | +from networksecurity.constant.training_pipeline import SCHEMA_FILE_PATH |
| 5 | + |
4 | 6 | from networksecurity.exception.exception import NetworkSecurityException |
5 | 7 | from networksecurity.logging.logger import logging |
| 8 | +from networksecurity.utils.main_utils.utils import read_yaml_file |
6 | 9 |
|
7 | 10 | from scipy.stats import ks_2samp |
8 | 11 | import os, sys |
|
11 | 14 | import numpy as np |
12 | 15 |
|
13 | 16 |
|
| 17 | +class DataValidation: |
| 18 | + |
| 19 | + def __init__(self, data_ingestion_artifact: DataIngestionArtifact, data_validation_config: DataValidationConfig): |
| 20 | + |
| 21 | + try: |
| 22 | + self.data_ingestion_artifact = data_ingestion_artifact |
| 23 | + self.data_validation_config = data_validation_config |
| 24 | + self._schema_config = read_yaml_file(SCHEMA_FILE_PATH) |
| 25 | + except Exception as e: |
| 26 | + raise NetworkSecurityException(e, sys) |
| 27 | + |
| 28 | + @staticmethod |
| 29 | + def read_data(file_path)-> pd.DataFrame: |
| 30 | + try: |
| 31 | + return pd.read_csv(file_path) |
| 32 | + except Exception as e: |
| 33 | + raise NetworkSecurityException(e, sys) |
| 34 | + |
| 35 | + def validate_number_of_columns(self, dataframe: pd.DataFrame)-> bool: |
| 36 | + try: |
| 37 | + number_of_columns = len(self._schema_config) |
| 38 | + logging.info(f'required number of columns : {number_of_columns}') |
| 39 | + logging.info(f'dataframe has columns : {len(dataframe.columns)}') |
| 40 | + |
| 41 | + if len(dataframe.columns) == number_of_columns: |
| 42 | + return True |
| 43 | + else: |
| 44 | + return False |
| 45 | + except Exception as e: |
| 46 | + raise NetworkSecurityException(e, sys) |
| 47 | + |
| 48 | + def detect_dataset_drift(self, base_df, current_df, threshold=0.05)-> bool: |
| 49 | + try: |
| 50 | + status = True |
| 51 | + report = {} |
| 52 | + |
| 53 | + for column in base_df.columns: |
| 54 | + d1 = base_df[column] |
| 55 | + d2 = current_df[column] |
| 56 | + is_same_dist = ks_2samp(d1, d2) |
| 57 | + |
| 58 | + if threshold <= is_same_dist.pvalue: |
| 59 | + is_found = False |
| 60 | + else: |
| 61 | + is_found = True |
| 62 | + status = False |
| 63 | + report.update({column: { |
| 64 | + "p_value": float(is_same_dist.p_value), |
| 65 | + "drift_status": is_found |
| 66 | + }}) |
| 67 | + |
| 68 | + drift_report_file_path = self.data_validation_config.data_drift_report_file_path |
| 69 | + |
| 70 | + # create directory |
| 71 | + dir_path = os.path.dirname(drift_report_file_path) |
| 72 | + os.makedirs(dir_path,exist_ok=True) |
| 73 | + except Exception as e: |
| 74 | + raise NetworkSecurityException(e, sys) |
| 75 | + |
| 76 | + |
| 77 | + def initiate_data_validation(self) -> DataValidationArtifact: |
| 78 | + |
| 79 | + try: |
| 80 | + train_file_path = self.data_ingestion_artifact.trained_file_path |
| 81 | + test_file_path = self.data_ingestion_artifact.test_file_path |
| 82 | + |
| 83 | + # read data from train and test |
| 84 | + train_dataframe = DataValidation.read_data(train_file_path) |
| 85 | + test_dataframe = DataValidation.read_data(test_file_path) |
| 86 | + |
| 87 | + # validate number of columns for train set |
| 88 | + status = self.validate_number_of_columns(dataframe = train_dataframe) |
| 89 | + if status == False: |
| 90 | + error_message = "Train dataframe doesnot contain all columns.\n" |
| 91 | + |
| 92 | + # validate number of columns for test set |
| 93 | + status = self.validate_number_of_columns(dataframe = test_dataframe) |
| 94 | + if status == False: |
| 95 | + error_message = "Test dataframe doesnot contain all columns.\n" |
| 96 | + |
| 97 | + ## check the data drift |
| 98 | + |
| 99 | + |
| 100 | + |
| 101 | + except Exception as e: |
| 102 | + raise NetworkSecurityException(e, sys) |
| 103 | + |
| 104 | + |
0 commit comments