-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathNetworkPacketValidatorGA.py
More file actions
726 lines (602 loc) · 31.8 KB
/
NetworkPacketValidatorGA.py
File metadata and controls
726 lines (602 loc) · 31.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
#!/usr/bin/env python3
# PacketValidator - Independent script to validate generated network packets
# For ACI IoT 2023 Dataset
import pandas as pd
import numpy as np
import os
import time
import warnings
import traceback
import argparse
import glob
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import IsolationForest
from sklearn.svm import OneClassSVM
# Suppress warnings
warnings.filterwarnings('ignore')
class PacketValidator:
def __init__(self, original_dataset_path, generated_packets_dir, report_path):
"""
Initialize the packet validator
Args:
original_dataset_path: Path to the original ACI IoT 2023 dataset
generated_packets_dir: Directory containing generated packet CSV files
report_path: Directory to save validation reports
"""
self.original_dataset_path = original_dataset_path
self.generated_packets_dir = generated_packets_dir
self.report_path = report_path
self.flow_id_column = 'Flow ID'
self.drop_columns = ['Src IP', 'Dst IP', 'Timestamp']
self.scaler = None
self.classes = []
self.models = {}
# Create report directory if it doesn't exist
if not os.path.exists(report_path):
os.makedirs(report_path)
print("Packet Validator initialized.")
def load_original_data(self):
"""
Load the original ACI IoT 2023 dataset
"""
print(f"Loading original dataset from {self.original_dataset_path}...")
try:
# Check if file exists
if not os.path.exists(self.original_dataset_path):
print(f"Error: Original dataset file not found at {self.original_dataset_path}")
return False
# Load data
self.original_data = pd.read_csv(self.original_dataset_path)
print(f"Original dataset loaded with shape: {self.original_data.shape}")
# Check if dataset is empty
if self.original_data.empty:
print("Error: Original dataset is empty")
return False
# Identify columns to drop
columns_to_drop = [col for col in self.drop_columns if col in self.original_data.columns]
# Check if 'Label' column exists, if not try to find another label column
if 'Label' not in self.original_data.columns:
label_candidates = ['label', 'class', 'Class', 'category', 'Category', 'Attack', 'attack_type']
label_found = False
for candidate in label_candidates:
if candidate in self.original_data.columns:
self.original_data.rename(columns={candidate: 'Label'}, inplace=True)
label_found = True
break
if not label_found:
print("Error: No label column found in original dataset")
return False
# Extract classes
self.classes = self.original_data['Label'].unique()
print(f"Found {len(self.classes)} classes: {self.classes}")
# Check if flow_id_column exists
if self.flow_id_column not in self.original_data.columns:
print(f"Warning: {self.flow_id_column} column not found in original dataset")
# Try to find another suitable column
flow_id_candidates = ['Flow ID', 'flow_id', 'FlowID', 'flow-id', 'flow.id']
flow_id_found = False
for candidate in flow_id_candidates:
if candidate in self.original_data.columns and candidate != self.flow_id_column:
print(f"Using {candidate} as Flow ID column")
self.original_data.rename(columns={candidate: self.flow_id_column}, inplace=True)
flow_id_found = True
break
if not flow_id_found:
print(f"Warning: No Flow ID column found. Creating a dummy one.")
self.original_data[self.flow_id_column] = [f"flow_{i}" for i in range(len(self.original_data))]
# Preprocess data (drop columns, handle missing values)
self.original_data = self.original_data.drop(columns=columns_to_drop, errors='ignore')
# Replace infinities with NaN and then fill NaN values
self.original_data = self.original_data.replace([np.inf, -np.inf], np.nan)
# Check for data quality
nan_counts = self.original_data.isna().sum()
if nan_counts.sum() > 0:
print(f"Found {nan_counts.sum()} missing values in the original dataset")
print(nan_counts[nan_counts > 0])
# Fill numeric columns with mean, non-numeric with most frequent value
numeric_cols = self.original_data.select_dtypes(include=np.number).columns
for col in self.original_data.columns:
if col in numeric_cols:
col_mean = self.original_data[col].mean()
if pd.isna(col_mean): # If mean is NaN, use 0
self.original_data[col] = self.original_data[col].fillna(0)
else:
self.original_data[col] = self.original_data[col].fillna(col_mean)
else:
# Only fill non-numeric columns that aren't Flow ID or Label
if col not in [self.flow_id_column, 'Label']:
if self.original_data[col].mode().shape[0] > 0:
self.original_data[col] = self.original_data[col].fillna(self.original_data[col].mode()[0])
else:
self.original_data[col] = self.original_data[col].fillna("Unknown")
# Identify non-numeric columns
non_numeric_cols = self.original_data.select_dtypes(exclude=np.number).columns.tolist()
# Remove Label from non_numeric_cols if it's there
if 'Label' in non_numeric_cols:
non_numeric_cols.remove('Label')
# Remove Flow ID from non_numeric_cols if it's there
if self.flow_id_column in non_numeric_cols:
non_numeric_cols.remove(self.flow_id_column)
# One-hot encode non-numeric columns that aren't Flow ID or Label
if non_numeric_cols:
print(f"One-hot encoding non-numeric columns: {non_numeric_cols}")
try:
self.original_data = pd.get_dummies(self.original_data, columns=non_numeric_cols)
except Exception as e:
print(f"Error one-hot encoding: {e}")
# Try alternative approach: encode each column separately
for col in non_numeric_cols:
try:
# Get dummies for this column
dummies = pd.get_dummies(self.original_data[col], prefix=col)
# Drop the original column and join with dummies
self.original_data = self.original_data.drop(columns=[col]).join(dummies)
except Exception as e2:
print(f"Error encoding column {col}: {e2}")
# Last resort: drop the column
self.original_data = self.original_data.drop(columns=[col], errors='ignore')
print("Original data preprocessing completed.")
return True
except Exception as e:
print(f"Error loading original dataset: {e}")
print(traceback.format_exc())
return False
def prepare_training_data(self):
"""
Prepare training data for anomaly detection models
"""
print("Preparing training data...")
try:
# Create a copy of the data for training
X = self.original_data.copy()
# Check if Label column exists
if 'Label' not in X.columns:
print("Error: Label column not found")
return None
# Save label, then drop it from training set
self.labels = X['Label'].copy()
X = X.drop('Label', axis=1)
# Keep Flow ID for reference but don't use for training
if self.flow_id_column in X.columns:
X = X.drop(self.flow_id_column, axis=1)
# Handle any remaining non-numeric columns
non_numeric_cols = X.select_dtypes(exclude=np.number).columns.tolist()
if non_numeric_cols:
print(f"Warning: Found additional non-numeric columns: {non_numeric_cols}")
print("Attempting to convert to numeric or drop...")
for col in non_numeric_cols:
try:
X[col] = pd.to_numeric(X[col], errors='coerce')
X[col] = X[col].fillna(X[col].mean() if not pd.isna(X[col].mean()) else 0)
except Exception as e:
print(f"Error converting column {col}: {e}")
X = X.drop(columns=[col])
# Handle any constant columns (zero variance)
constant_cols = [col for col in X.columns if X[col].nunique() <= 1]
if constant_cols:
print(f"Dropping {len(constant_cols)} constant columns: {constant_cols}")
X = X.drop(columns=constant_cols)
# Check if any columns left after preprocessing
if X.empty or X.shape[1] == 0:
print("Error: No features left after preprocessing")
return None
# Final check for any remaining issues
X = X.replace([np.inf, -np.inf], np.nan)
if X.isna().any().any():
print("Warning: Still have missing values after preprocessing. Filling with zeros.")
X = X.fillna(0)
# Scale the features
try:
self.scaler = StandardScaler()
X_scaled = self.scaler.fit_transform(X)
except Exception as e:
print(f"Error scaling features: {e}")
print("Using unscaled data...")
# Create a dummy scaler that just returns the input
class DummyScaler:
def transform(self, X):
return X
def inverse_transform(self, X):
return X
self.scaler = DummyScaler()
X_scaled = X.values
# Save the feature names for later use
self.feature_names = X.columns.tolist()
print(f"Training data prepared with {X_scaled.shape[1]} features.")
return X_scaled
except Exception as e:
print(f"Error preparing training data: {e}")
print(traceback.format_exc())
return None
def train_anomaly_models(self, X_scaled):
"""
Train anomaly detection models for each class
"""
print("Training anomaly detection models for each class...")
for cls in self.classes:
print(f"\nTraining models for class: {cls}")
# Filter data for this class
class_indices = np.where(self.labels == cls)[0]
X_class = X_scaled[class_indices]
# Skip if no data for this class
if len(X_class) == 0:
print(f"Warning: No data found for class {cls}. Skipping model training.")
continue
# Adjust parameters based on dataset size
nu = min(0.1, 1.0 / (len(X_class) + 1)) # Avoid nu > 1/n which causes error
contamination = min(0.1, 1.0 / (len(X_class) + 1))
# Train One-Class SVM model with error handling
print(f"Training One-Class SVM for class {cls}...")
try:
ocsvm = OneClassSVM(nu=nu, kernel="rbf", gamma='scale')
ocsvm.fit(X_class)
except Exception as e:
print(f"Error training One-Class SVM for class {cls}: {e}")
# Fallback to a more robust setting
try:
ocsvm = OneClassSVM(nu=0.01, kernel="linear")
ocsvm.fit(X_class)
except Exception as e2:
print(f"Fallback One-Class SVM failed for class {cls}: {e2}")
# Create a dummy model that accepts everything as normal
class DummyPositiveModel:
def predict(self, X):
return np.ones(len(X))
ocsvm = DummyPositiveModel()
# Train Isolation Forest model with error handling
print(f"Training Isolation Forest for class {cls}...")
try:
isolation_forest = IsolationForest(contamination=contamination, random_state=42)
isolation_forest.fit(X_class)
except Exception as e:
print(f"Error training Isolation Forest for class {cls}: {e}")
# Create a dummy model that accepts everything as normal
class DummyPositiveModel:
def predict(self, X):
return np.ones(len(X))
isolation_forest = DummyPositiveModel()
# Store the models
self.models[cls] = {
'ocsvm': ocsvm,
'isolation_forest': isolation_forest
}
print("Model training completed.")
def load_generated_packets(self):
"""
Load generated packet files for validation
"""
print(f"Loading generated packets from {self.generated_packets_dir}...")
self.generated_packets = {}
try:
# Find all generated packet files
pattern = os.path.join(self.generated_packets_dir, '*_generated.csv')
packet_files = glob.glob(pattern)
if not packet_files:
print(f"Error: No generated packet files found in {self.generated_packets_dir}")
return False
print(f"Found {len(packet_files)} generated packet files")
# Load each file
for file_path in packet_files:
try:
# Extract class name from file name
file_name = os.path.basename(file_path)
class_name = file_name.replace('_generated.csv', '')
# For file names that don't match class names directly, try to map them
mapped_class = None
for cls in self.classes:
safe_cls_name = cls.replace('/', '_').replace(' ', '_').replace('\\', '_')
if safe_cls_name == class_name:
mapped_class = cls
break
if mapped_class is None:
print(f"Warning: Could not map file {file_name} to any class. Using name as is.")
mapped_class = class_name
# Load data
df = pd.read_csv(file_path)
# Check if dataframe is empty
if df.empty:
print(f"Warning: Empty dataframe for file {file_path}")
continue
# Check if we have the required columns
required_cols = [self.flow_id_column, 'Label']
missing_cols = [col for col in required_cols if col not in df.columns]
if missing_cols:
print(f"Warning: Missing required columns in {file_path}: {missing_cols}")
# Add missing columns with default values if needed
for col in missing_cols:
if col == 'Label':
df[col] = mapped_class
elif col == self.flow_id_column:
df[col] = [f"generated_flow_{i}" for i in range(len(df))]
# Store the data
self.generated_packets[mapped_class] = df
print(f"Loaded {len(df)} packets for class {mapped_class}")
except Exception as e:
print(f"Error loading file {file_path}: {e}")
if not self.generated_packets:
print("Error: No valid generated packet files could be loaded")
return False
print("Generated packets loaded successfully.")
return True
except Exception as e:
print(f"Error loading generated packets: {e}")
print(traceback.format_exc())
return False
def validate_generated_packets(self):
"""
Validate generated packets using trained anomaly detection models
"""
print("Validating generated packets...")
validation_results = []
detailed_results = {}
for cls, df in self.generated_packets.items():
print(f"\nValidating packets for class: {cls}")
# Check if models exist for this class
if cls not in self.models:
print(f"Warning: No models found for class {cls}")
continue
# Remove Flow ID and Label for validation if they exist
X = df.drop([col for col in [self.flow_id_column, 'Label'] if col in df.columns], axis=1)
# Check if we have any features left
if X.empty:
print(f"Warning: No features left after dropping required columns for class {cls}")
continue
# Check for any non-numeric columns
non_numeric_cols = X.select_dtypes(exclude=np.number).columns.tolist()
if non_numeric_cols:
print(f"Warning: Non-numeric columns found: {non_numeric_cols}. Converting to numeric.")
for col in non_numeric_cols:
X[col] = pd.to_numeric(X[col], errors='coerce')
X = X.fillna(0) # Fill NaN values with 0
# Align feature columns with training data
missing_cols = set(self.feature_names) - set(X.columns)
extra_cols = set(X.columns) - set(self.feature_names)
if missing_cols:
print(f"Warning: Missing columns in generated data: {missing_cols}")
# Add missing columns with zeros
for col in missing_cols:
X[col] = 0
if extra_cols:
print(f"Warning: Extra columns in generated data: {extra_cols}")
# Drop extra columns
X = X.drop(columns=extra_cols)
# Reorder columns to match training data
try:
X = X[self.feature_names]
except Exception as e:
print(f"Error reordering columns: {e}")
# Try to continue with available features
common_cols = list(set(X.columns) & set(self.feature_names))
if not common_cols:
print(f"Error: No common features between original and generated data for class {cls}")
continue
X = X[common_cols]
# We need to use only common columns for validation
print(f"Using {len(common_cols)} common features for validation")
# Scale the features
try:
X_scaled = self.scaler.transform(X)
except Exception as e:
print(f"Error scaling features: {e}")
# Try to continue with unscaled data
X_scaled = X.values
# Get models for this class
models = self.models[cls]
ocsvm = models['ocsvm']
isolation_forest = models['isolation_forest']
# Validate with One-Class SVM
try:
ocsvm_preds = ocsvm.predict(X_scaled)
ocsvm_anomaly_count = np.sum(ocsvm_preds == -1)
ocsvm_anomaly_rate = (ocsvm_anomaly_count / len(ocsvm_preds)) * 100
print(f"One-Class SVM - Anomaly Rate: {ocsvm_anomaly_rate:.2f}%")
except Exception as e:
print(f"Error validating with One-Class SVM: {e}")
ocsvm_anomaly_count = 0
ocsvm_anomaly_rate = 0
# Validate with Isolation Forest
try:
if_preds = isolation_forest.predict(X_scaled)
if_anomaly_count = np.sum(if_preds == -1)
if_anomaly_rate = (if_anomaly_count / len(if_preds)) * 100
print(f"Isolation Forest - Anomaly Rate: {if_anomaly_rate:.2f}%")
except Exception as e:
print(f"Error validating with Isolation Forest: {e}")
if_anomaly_count = 0
if_anomaly_rate = 0
# Store validation results
validation_results.append({
'Class': cls,
'Total Packets': len(X_scaled),
'OCSVM Anomaly Count': ocsvm_anomaly_count,
'OCSVM Anomaly Rate (%)': ocsvm_anomaly_rate,
'OCSVM Status': 'PASS' if ocsvm_anomaly_rate < 30 else 'FAIL',
'Isolation Forest Anomaly Count': if_anomaly_count,
'Isolation Forest Anomaly Rate (%)': if_anomaly_rate,
'Isolation Forest Status': 'PASS' if if_anomaly_rate < 30 else 'FAIL',
'Overall Status': 'PASS' if (ocsvm_anomaly_rate < 30 and if_anomaly_rate < 30) else 'FAIL'
})
# Store detailed results
packet_results = []
if self.flow_id_column in df.columns:
flow_ids = df[self.flow_id_column].values
else:
flow_ids = [f"flow_{i}" for i in range(len(X_scaled))]
for i in range(len(X_scaled)):
packet_results.append({
'Flow ID': flow_ids[i],
'OCSVM Prediction': 'Anomaly' if ocsvm_preds[i] == -1 else 'Normal',
'Isolation Forest Prediction': 'Anomaly' if if_preds[i] == -1 else 'Normal'
})
detailed_results[cls] = packet_results
# Check if the anomaly rate is below 30% (as per requirements)
if ocsvm_anomaly_rate < 30 and if_anomaly_rate < 30:
print(f"Success: Generated packets for class {cls} pass the anomaly detection test")
else:
print(f"Warning: Generated packets for class {cls} have high anomaly rate (> 30%)")
# Create summary dataframe and save to CSV
summary_df = pd.DataFrame(validation_results)
# Calculate overall statistics
if not summary_df.empty:
total_packets = summary_df['Total Packets'].sum()
total_ocsvm_anomaly = summary_df['OCSVM Anomaly Count'].sum()
total_if_anomaly = summary_df['Isolation Forest Anomaly Count'].sum()
overall_ocsvm_rate = (total_ocsvm_anomaly / total_packets) * 100 if total_packets > 0 else 0
overall_if_rate = (total_if_anomaly / total_packets) * 100 if total_packets > 0 else 0
# Add a row for overall statistics
overall_stats = {
'Class': 'OVERALL',
'Total Packets': total_packets,
'OCSVM Anomaly Count': total_ocsvm_anomaly,
'OCSVM Anomaly Rate (%)': overall_ocsvm_rate,
'OCSVM Status': 'PASS' if overall_ocsvm_rate < 30 else 'FAIL',
'Isolation Forest Anomaly Count': total_if_anomaly,
'Isolation Forest Anomaly Rate (%)': overall_if_rate,
'Isolation Forest Status': 'PASS' if overall_if_rate < 30 else 'FAIL',
'Overall Status': 'PASS' if (overall_ocsvm_rate < 30 and overall_if_rate < 30) else 'FAIL'
}
summary_df = pd.concat([summary_df, pd.DataFrame([overall_stats])], ignore_index=True)
# Save summary to CSV
summary_path = os.path.join(self.report_path, "anomaly_validation_summary.csv")
summary_df.to_csv(summary_path, index=False)
print(f"Saved anomaly validation summary to {summary_path}")
# Generate visualization
self.generate_anomaly_visualization(summary_df)
return summary_df
def generate_anomaly_visualization(self, summary_df):
"""
Generate visualizations of anomaly detection results
Args:
summary_df: Summary dataframe of validation results
"""
try:
# Filter out the OVERALL row for the bar chart
plot_df = summary_df[summary_df['Class'] != 'OVERALL'].copy()
# Create a bar chart comparing OCSVM and Isolation Forest
plt.figure(figsize=(12, 8))
# Create grouped bar chart
x = np.arange(len(plot_df))
width = 0.35
fig, ax = plt.subplots(figsize=(14, 8))
rects1 = ax.bar(x - width/2, plot_df['OCSVM Anomaly Rate (%)'], width, label='One-Class SVM')
rects2 = ax.bar(x + width/2, plot_df['Isolation Forest Anomaly Rate (%)'], width, label='Isolation Forest')
# Add 30% threshold line
ax.axhline(y=30, color='r', linestyle='--', alpha=0.7, label='30% Threshold')
# Add labels and title
ax.set_xlabel('Attack Class')
ax.set_ylabel('Anomaly Rate (%)')
ax.set_title('Anomaly Detection Comparison: One-Class SVM vs Isolation Forest')
ax.set_xticks(x)
ax.set_xticklabels(plot_df['Class'])
plt.xticks(rotation=45, ha='right')
ax.legend()
# Add text labels above bars
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.annotate(f'{height:.1f}%',
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.tight_layout()
# Save the plot
plt.savefig(os.path.join(self.report_path, "anomaly_comparison.png"), dpi=300)
plt.close()
# Create pie charts for overall results
plt.figure(figsize=(15, 7))
# Get overall stats
overall_row = summary_df[summary_df['Class'] == 'OVERALL'].iloc[0]
# OCSVM summary
plt.subplot(1, 2, 1)
ocsvm_normal = overall_row['Total Packets'] - overall_row['OCSVM Anomaly Count']
ocsvm_anomaly = overall_row['OCSVM Anomaly Count']
if ocsvm_anomaly > 0:
plt.pie([ocsvm_normal, ocsvm_anomaly],
labels=['Normal', 'Anomaly'],
autopct='%1.1f%%',
colors=['#4CAF50', '#F44336'],
explode=(0, 0.1),
shadow=True)
else:
plt.pie([1], labels=['Normal (100%)'], colors=['#4CAF50'])
plt.title('One-Class SVM Results')
# Isolation Forest summary
plt.subplot(1, 2, 2)
if_normal = overall_row['Total Packets'] - overall_row['Isolation Forest Anomaly Count']
if_anomaly = overall_row['Isolation Forest Anomaly Count']
if if_anomaly > 0:
plt.pie([if_normal, if_anomaly],
labels=['Normal', 'Anomaly'],
autopct='%1.1f%%',
colors=['#4CAF50', '#F44336'],
explode=(0, 0.1),
shadow=True)
else:
plt.pie([1], labels=['Normal (100%)'], colors=['#4CAF50'])
plt.title('Isolation Forest Results')
plt.suptitle('Overall Anomaly Detection Results', fontsize=16)
plt.tight_layout()
# Save figure
plt.savefig(os.path.join(self.report_path, "anomaly_summary_pie_charts.png"), dpi=300)
plt.close()
print("Generated anomaly detection visualizations")
except Exception as e:
print(f"Error generating visualizations: {e}")
print(traceback.format_exc())
def run(self):
"""
Run the complete validation pipeline
"""
print("Starting the validation pipeline...")
try:
# Step 1: Load and preprocess original data
if not self.load_original_data():
print("Error loading original data. Exiting.")
return
# Step 2: Prepare training data
X_scaled = self.prepare_training_data()
if X_scaled is None:
print("Error preparing training data. Exiting.")
return
# Step 3: Train anomaly detection models
self.train_anomaly_models(X_scaled)
# Step 4: Load generated packets
if not self.load_generated_packets():
print("Error loading generated packets. Exiting.")
return
# Step 5: Validate generated packets
self.validate_generated_packets()
print("Validation pipeline completed successfully!")
except Exception as e:
print(f"Error in validation pipeline: {e}")
print(traceback.format_exc())
if __name__ == "__main__":
# Parse command line arguments
parser = argparse.ArgumentParser(description='Validate generated network packets against ACI IoT 2023 dataset')
parser.add_argument('--original', type=str,
default="/Users/mayankraj/Desktop/RESEARCH/Thesis Codes /archive/ACI-IoT-2023.csv",
help='Path to the original ACI IoT 2023 dataset')
parser.add_argument('--generated', type=str,
default="generated_packets",
help='Directory containing generated packet CSV files')
parser.add_argument('--report', type=str,
default="/Users/mayankraj/Desktop/RESEARCH/Project 2 V3",
help='Directory to save validation reports')
args = parser.parse_args()
try:
# Create validator and run validation
validator = PacketValidator(
original_dataset_path=args.original,
generated_packets_dir=args.generated,
report_path=args.report
)
validator.run()
except Exception as e:
print(f"Error: {e}")
print(traceback.format_exc())
print("Program terminated with errors.")