-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFeature_selection
More file actions
68 lines (51 loc) · 2.26 KB
/
Feature_selection
File metadata and controls
68 lines (51 loc) · 2.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import folium
train_set=pd.read_csv('teamdakotas/final_train.csv')
test_set=pd.read_csv('teamdakotas/final_test.csv')
train_set.columns
# Identify non-numeric columns
non_numeric_cols = train_set.select_dtypes(exclude=['number']).columns
print(f'Non-Numeric Columns: {non_numeric_cols}')
# Drop non-numeric columns from features
X = train_set.drop(non_numeric_cols, axis=1).drop(['YLD_BE', 'TWT', 'MST'], axis=1)
y_yield = train_set['YLD_BE']
y_twt = train_set['TWT']
y_mst = train_set['MST']
# Convert specific columns to numeric, handling errors
X = X.apply(pd.to_numeric, errors='coerce')
# Handle any remaining NaNs (e.g., fill with mean)
X.fillna(X.mean(), inplace=True)
# Verify all features are numeric
print(X.dtypes)
from sklearn.ensemble import RandomForestRegressor
# Train the model for Yield
model_yield = RandomForestRegressor(n_estimators=100, random_state=42)
model_yield.fit(X, y_yield)
# Train the model for Total Weight
model_twt = RandomForestRegressor(n_estimators=100, random_state=42)
model_twt.fit(X, y_twt)
# Train the model for Moisture Content
model_mst = RandomForestRegressor(n_estimators=100, random_state=42)
model_mst.fit(X, y_mst)
# Get feature importances
feature_importance_yield = model_yield.feature_importances_
feature_importance_twt = model_twt.feature_importances_
feature_importance_mst = model_mst.feature_importances_
# Combine feature importances
combined_importance = (feature_importance_yield + feature_importance_twt - feature_importance_mst) / 3
# Create a DataFrame for better visualization
features = X.columns
importance_df = pd.DataFrame({'Feature': features, 'Importance': combined_importance})
importance_df = importance_df.sort_values(by='Importance', ascending=False)
print(importance_df.head(10)) # Display the top 10 features
# Create a DataFrame for better visualization
# Combine feature importances
combined_importance = feature_importance_yield
features = X.columns
importance_df = pd.DataFrame({'Feature': features, 'Importance': combined_importance})
importance_df = importance_df.sort_values(by='Importance', ascending=False)
print(importance_df.head(20)) # Display the top 10 features
top_feature_names = importance_df['Feature'].tolist()