|
15 | 15 | import matplotlib.pyplot as plt |
16 | 16 | import matplotlib.gridspec as gridspec |
17 | 17 | import numpy as np |
| 18 | +from pathlib import Path |
18 | 19 | from tensorforce.environments import Environment |
19 | 20 |
|
20 | 21 |
|
@@ -105,8 +106,8 @@ def sample_ep_plot(self, states, actions, rewards, ep, savename): |
105 | 106 | ax.grid(alpha=0.5) |
106 | 107 |
|
107 | 108 | plt.tight_layout() |
108 | | - plt.savefig(fr'checkpoints\{savename}_sample.png', dpi=600) |
109 | | - plt.savefig(fr'checkpoints\{savename}_sample.svg') |
| 109 | + plt.savefig(Path(f'checkpoints/{savename}_sample.png'), dpi=600) |
| 110 | + plt.savefig(Path(f'checkpoints/{savename}_sample.svg')) |
110 | 111 | plt.close() |
111 | 112 |
|
112 | 113 | def trainingprogress_plot(self, df, summed_actions, name): |
@@ -142,8 +143,8 @@ def trainingprogress_plot(self, df, summed_actions, name): |
142 | 143 | ax3.set_xlabel('episodes') |
143 | 144 |
|
144 | 145 | plt.tight_layout() |
145 | | - plt.savefig(fr'checkpoints\{name}_progress.png', dpi=600) |
146 | | - plt.savefig(fr'checkpoints\{name}_progress.svg') |
| 146 | + plt.savefig(Path(f'checkpoints/{name}_progress.png'), dpi=600) |
| 147 | + plt.savefig(Path(f'checkpoints/{name}_progress.svg')) |
147 | 148 | plt.close() |
148 | 149 |
|
149 | 150 |
|
@@ -285,7 +286,7 @@ def save(self, AGENT, train_start, ep, states, actions, rewards, df, |
285 | 286 | self.trainingprogress_plot(df, summed_actions, name) |
286 | 287 |
|
287 | 288 | agent.save(directory='checkpoints', filename=name, format='hdf5') |
288 | | - df.to_csv(fr'checkpoints\{name}.csv', index=False) |
| 289 | + df.to_csv(Path(f'checkpoints/{name}.csv', index=False)) |
289 | 290 |
|
290 | 291 |
|
291 | 292 | if __name__ == "__main__": |
@@ -314,7 +315,7 @@ def save(self, AGENT, train_start, ep, states, actions, rewards, df, |
314 | 315 | ax.set_xlabel('cutters to change') |
315 | 316 | ax.set_ylabel('total maintenance time [min]') |
316 | 317 | plt.tight_layout() |
317 | | - plt.savefig(r'graphics\cutter_changing_function.svg') |
| 318 | + plt.savefig(Path('graphics/cutter_changing_function.svg')) |
318 | 319 |
|
319 | 320 | ########################################################################## |
320 | 321 | # 3D plot of reward functions |
@@ -366,4 +367,4 @@ def save(self, AGENT, train_start, ep, states, actions, rewards, df, |
366 | 367 | ax.set_zlabel('reward') |
367 | 368 | ax.set_zlim(top=1) |
368 | 369 |
|
369 | | - plt.savefig(r'graphics\reward_function.svg') |
| 370 | + plt.savefig(Path(f'graphics/reward_function.svg')) |
0 commit comments