@@ -69,15 +69,7 @@ def uploat_to_mlflow(temp_dir, **context):
69
69
print (f"Experiment { experiment_name } was not found, creating new" )
70
70
experiment_id = client .create_experiment (experiment_name )
71
71
72
- run = client .create_run (experiment_id )
73
- print (f"Uploading to experiment { experiment_name } /{ experiment_id } /{ run .info .run_id } " )
74
-
75
- print ("Uploading model" )
76
- client .log_artifact (
77
- run_id = run .info .run_id ,
78
- local_path = os .path .join (temp_dir , 'model.dat' ),
79
- artifact_path = "model" ,
80
- )
72
+ print (f"Uploading to experiment { experiment_name } /{ experiment_id } " )
81
73
82
74
print ("Uploading model search results" )
83
75
df = pd .read_csv (os .path .join (temp_dir , 'pd.csv' ), index_col = 0 )
@@ -86,7 +78,7 @@ def uploat_to_mlflow(temp_dir, **context):
86
78
metrics = ['mean_test_score' , 'mean_fit_time' ]
87
79
88
80
for i , p in enumerate (dct ['params' ].values ()):
89
- with mlflow .start_run (experiment_id = experiment_id ):
81
+ with mlflow .start_run (experiment_id = experiment_id ) as run :
90
82
p = json .loads (p .replace ('\' ' , '"' ))
91
83
for parname , parvalue in p .items ():
92
84
mlflow .log_param (key = parname , value = parvalue )
@@ -98,6 +90,15 @@ def uploat_to_mlflow(temp_dir, **context):
98
90
print (f"Logging metric { m } { dct [m ][i ]} " )
99
91
mlflow .log_metric (key = m , value = dct [m ][i ])
100
92
93
+ if dct ['rank_test_score' ][i ]== 1 :
94
+ print ('This is the best model' )
95
+ print ("Uploading model to run: " , run .info .run_id )
96
+ mlflow .log_artifact (
97
+ local_path = os .path .join (temp_dir , 'model.dat' ),
98
+ artifact_path = "model" ,
99
+ )
100
+
101
+
101
102
#clean up
102
103
shutil .rmtree (temp_dir )
103
104
0 commit comments