4
4
import os
5
5
from gymnasium import spaces
6
6
import numpy as np
7
- from gymnasium import Env
8
- from gymnasium .spaces import Discrete , Box , Dict , Tuple , MultiBinary , MultiDiscrete
9
- import os , subprocess , time , signal , random
7
+ import os
10
8
import pandas as pd
11
9
import logging
12
- import json
13
- from typing import Optional , Union
10
+ from typing import Optional
14
11
import math
15
12
import pathlib
16
- import random
17
- from src .automotive_feature_engineering .sna_handling import SnaHandling
18
- from src .automotive_feature_engineering .feature_extraction import FeatureExtraction
19
- from src .automotive_feature_engineering .feature_encoding import FeatureEncoding
20
- from src .automotive_feature_engineering .feature_selection import FeatureSelection
21
- from src .automotive_feature_engineering .feature_scaling import FeatureScaling
22
- from src .automotive_feature_engineering .feature_interactions import FeatureInteractions
23
- from src .automotive_feature_engineering .main_feature_engineering import (
24
- FeatureEngineering ,
25
- )
26
- from src .automotive_feature_engineering .utils .utils import combine_dfs , get_feature_df
13
+ from automotive_feature_engineering .sna_handling import SnaHandling
14
+ from automotive_feature_engineering .feature_extraction import FeatureExtraction
15
+ from automotive_feature_engineering .feature_encoding import FeatureEncoding
16
+ from automotive_feature_engineering .feature_selection import FeatureSelection
17
+ from automotive_feature_engineering .feature_scaling import FeatureScaling
18
+ from automotive_feature_engineering .feature_interactions import FeatureInteractions
19
+
20
+ from automotive_feature_engineering .utils .utils import get_feature_df
27
21
from sklearn .model_selection import train_test_split
28
- import ray
29
- import src .automotive_feature_engineering .utils .utils as utils
22
+ import automotive_feature_engineering .utils .utils as utils
30
23
31
24
# from ray.rllib import agents
32
25
from ray .rllib .utils import try_import_tf
33
- from ray .rllib .examples .models .action_mask_model import (
34
- ActionMaskModel ,
35
- TorchActionMaskModel ,
36
- )
37
- from sklearn .metrics import mean_squared_error , mean_absolute_error , r2_score
26
+
27
+ from sklearn .metrics import r2_score
38
28
from sklearn .ensemble import RandomForestRegressor
39
29
40
30
tf = try_import_tf ()
49
39
50
40
class EnergaizeEnv2 (gym .Env ):
51
41
def __init__ (self , env_config ) -> None :
52
- # super(EnergaizeEnv2, self).__init__()
53
42
54
43
### Training and Test Data
55
44
# self.df_list = env_config["df"]
@@ -128,8 +117,6 @@ def step(
128
117
) -> tuple [dict [np .array , np .array ], float , bool , bool , dict ]:
129
118
### Increase sequence length
130
119
self .current_sequence_length += 1
131
- print ("In Step" )
132
- print (f"Current Sequence Length: " , self .current_sequence_length )
133
120
134
121
### Set placeholder for info
135
122
infos = {}
@@ -170,12 +157,8 @@ def step(
170
157
171
158
### Take action
172
159
try :
173
- print ("in try" )
174
160
self .total_steps += 1
175
- print ("Total Steps: " , self .total_steps )
176
- print ("take action" )
177
161
self ._take_action (action )
178
- print ("action taken" )
179
162
180
163
if self .df_train_X_train .shape [1 ] > 20000 :
181
164
reward = - 1
@@ -188,7 +171,6 @@ def step(
188
171
return obs , reward , terminated , truncated , infos
189
172
190
173
# poly features not possible if df too large
191
-
192
174
if len (self .df_train_X_train .columns ) > 200 :
193
175
self .action_mask [14 ] = 0
194
176
elif len (self .df_train_X_train .columns ) <= 200 and 14 not in self .state :
@@ -305,24 +287,19 @@ def reset(
305
287
##########################################
306
288
def _take_action (self , action : int ) -> None :
307
289
if action == 0 :
308
- print ("Platzhalter " )
290
+ print ("Placeholder " )
309
291
# 0 -> remove highly correlated features
310
292
elif action == 1 :
311
293
print (f"Take Action { action } " )
312
294
feature_selection = FeatureSelection ()
313
- print ("feature_selection object created" )
314
- # self.df_train = combine_dfs([self.df_train, self.df_train_target])
315
- print ("Self Dok Path" , self .alt_docu )
316
- print ("Self alt_config" , self .alt_config )
317
- print ("Feture Selection Object" , self .feature_selection )
295
+
318
296
importances = self .feature_selection .calc_globalFeatureImportance (
319
297
self .alt_docu ,
320
298
"randomforest" ,
321
299
self .df_train_X_train ,
322
300
self .df_train_y_train ,
323
301
self .alt_config ,
324
302
)
325
- print ("importance function called" )
326
303
# ### Remove "file" and "I_" before processing
327
304
# self.df, self.df_target = get_feature_df(
328
305
# self.df, fuse_prefix=self.fuse_prefix
@@ -612,7 +589,6 @@ def _take_action(self, action: int) -> None:
612
589
# Calculate reward
613
590
##########################################
614
591
def _calculate_performance (self ) -> float :
615
- # print("CAALLLLAAAAAAAAAA")
616
592
617
593
### Add "file" and "I_" before processing
618
594
# self.df = combine_dfs([self.df, self.df_target])
@@ -650,23 +626,6 @@ def _calculate_performance(self) -> float:
650
626
651
627
pred_val = regr .predict (X_val ).reshape (- 1 , 1 )
652
628
653
- # ### PAWD
654
- # # measured energy [As] on validation data
655
- # val_energy_integral = self.__calculate_integral(self.rl_raster, y_val)
656
- # val_energy = float(val_energy_integral[-1])
657
-
658
- # # predicted energy [As] on training data
659
- # val_predenergy_integral = self.__calculate_integral(
660
- # self.rl_raster, pred_val
661
- # )
662
- # val_predenergy = float(val_predenergy_integral[-1])
663
- # val_energy_percdev = float(100.0 * (val_predenergy / val_energy) - 100.0)
664
-
665
- # model_xval.append(X_val)
666
- # val_energy_list.append(val_energy)
667
- # val_energy_percdev_list_abs_weighed.append(
668
- # np.abs(val_energy_percdev) * val_energy
669
- # )
670
629
671
630
### R2
672
631
valR2 .append (r2_score (y_val , pred_val , multioutput = "raw_values" ))
@@ -675,19 +634,11 @@ def _calculate_performance(self) -> float:
675
634
rewards = []
676
635
### R2 average
677
636
r2_avg = np .average (valR2 )
678
- print ("r2_avg " , r2_avg )
679
- # ### PAWD
680
- # pawd = -np.abs(
681
- # np.sum(val_energy_percdev_list_abs_weighed) / np.sum(val_energy_list)
682
- # )
637
+
683
638
print ("STATE CALC" , self .state )
684
- # print("r2_avg ", r2_avg, "PAWD ", pawd)
685
639
686
640
transformed_r2 = self .expo_r2 (r2_avg )
687
641
rewards .append (transformed_r2 )
688
- # rewards.append(transformed_r2)
689
- # transformed_pawd = self.expo_pawd(pawd)
690
- # rewards.append(transformed_pawd)
691
642
692
643
### Signals
693
644
if self .df_train_X_train .shape [1 ] > 400 :
0 commit comments