forked from flyingleafe/dqnroute
-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathRun.py
More file actions
1279 lines (1062 loc) · 50.3 KB
/
Run.py
File metadata and controls
1279 lines (1062 loc) · 50.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import argparse
import yaml
import re
import hashlib
import base64
from pathlib import Path
from tqdm import tqdm
from typing import *
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import torch
import networkx as nx
from dqnroute.constants import TORCH_MODELS_DIR
from dqnroute.event_series import split_dataframe
from dqnroute.generator import gen_episodes
from dqnroute.networks.common import get_optimizer
from dqnroute.networks.embeddings import Embedding, LaplacianEigenmap
from dqnroute.networks.q_network import QNetwork
from dqnroute.networks.actor_critic_networks import PPOActor, PPOCritic
from dqnroute.simulation.common import mk_job_id, add_cols, DummyProgressbarQueue
from dqnroute.simulation.conveyors import ConveyorsRunner
from dqnroute.utils import AgentId, get_amatrix_cols, make_batches, stack_batch, mk_num_list
from dqnroute.verification.ml_util import Util
from dqnroute.verification.router_graph import RouterGraph
from dqnroute.verification.adversarial import PGDAdversary
from dqnroute.verification.markov_analyzer import MarkovAnalyzer
from dqnroute.verification.symbolic_analyzer import SymbolicAnalyzer, LipschitzBoundComputer
from dqnroute.verification.nnet_verifier import NNetVerifier, marabou_float2str
from dqnroute.verification.embedding_packer import EmbeddingPacker
NETWORK_FILENAME = "../network.nnet"
PROPERTY_FILENAME = "../property.txt"
parser = argparse.ArgumentParser(
description="Script to train, simulate and verify deep neural networks for baggage routing.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# general parameters
parser.add_argument("config_files", type=str, nargs="+",
help="YAML config file(s) with the conveyor topology graph, input scenario and settings "
"of routing algorithms (all files will be concatenated into one)")
parser.add_argument("--routing_algorithms", type=str, default="dqn_emb,centralized_simple,link_state,simple_q,ppo_emb",
help="comma-separated list of routing algorithms to run "
"(possible entries: dqn_emb, centralized_simple, link_state, simple_q, ppo_emb, random)")
parser.add_argument("--command", type=str, default="run",
help="possible options: run, compute_expected_cost, embedding_adversarial_search, "
"embedding_adversarial_verification, q_adversarial_search, q_adversarial_verification")
parser.add_argument("--random_seed", type=int, default=42,
help="random seed for pretraining and training")
parser.add_argument("--pretrain_num_episodes", type=int, default=10000,
help="number of episodes for supervised pretraining")
parser.add_argument("--pretrain_num_epochs", type=int, default=32,
help="number of episodes for supervised pretraining")
parser.add_argument("--force_pretrain", action="store_true",
help="whether not to load previously saved pretrained models and force recomputation")
parser.add_argument("--train_num_episodes", type=int, default=10000,
help="number of episodes for supervised pretraining")
parser.add_argument("--force_train", action="store_true",
help="whether not to load previously saved trained models and force recomputation")
parser.add_argument("--skip_graphviz", action="store_true",
help="do not visualize graphs with Graphviz")
# common verification / adversarial search parameters
parser.add_argument("--cost_bound", type=float, default=100.0,
help="upper bound on expected delivery cost to verify")
parser.add_argument("--simple_path_cost", action="store_true",
help="use the number of transitions instead of the total conveyor length as path cost")
parser.add_argument("--input_eps_l_inf", type=float, default=0.1,
help="maximum L_∞ discrepancy of input embeddings in adversarial robustness "
"verification or search (default: 0.1)")
parser.add_argument("--single_source", type=int, default=None,
help="index of the single source to consider (if not specified, all sources will "
"be considered)")
parser.add_argument("--single_sink", type=int, default=None,
help="index of the single sink to consider (if not specified, all sinks will "
"be considered)")
parser.add_argument("--learning_step_indices", type=str, default=None,
help="in learning step verification, consider only learning steps with these indices "
"comma-separated list without spaces; all steps will be considered if not specified)")
# parameters specific to adversarial search with PGD (embedding_adversarial_search)
parser.add_argument("--input_eps_l_2", type=float, default=1.5,
help="maximum (scaled by dimension) L_2 discrepancy of input embeddings in "
"adversarial search")
parser.add_argument("--adversarial_search_use_l_2", action="store_true",
help="use L_2 norm (scaled by dimension) instead of L_∞ norm during adversarial search")
# parameters specific to learning step verification
# (q_adversarial_search, q_adversarial_verification)
parser.add_argument("--verification_lr", type=float, default=0.001,
help="learning rate in learning step verification")
parser.add_argument("--input_max_delta_q", type=float, default=10.0,
help="maximum ΔQ in learning step verification")
parser.add_argument("--q_adversarial_no_points", type=int, default=351,
help="number of points used to create plots in command q_adversarial")
parser.add_argument("--q_adversarial_verification_no_points", type=int, default=351,
help="number of points to search for counterexamples before estimating the Lipschitz "
"constant in command q_adversarial_lipschitz (setting to less than 2 disables "
"this search)")
# parameters specific to verification with Marabou
# (embedding_adversarial_verification, embedding_adversarial_full_verification)
parser.add_argument("--marabou_path", type=str, default=None,
help="path to the Marabou executable")
parser.add_argument("--linux_marabou_memory_limit_mb", type=int, default=None,
help="set a memory limit in MB for Marabou (use only on Linux; default: no limit)")
args = parser.parse_args()
# dqn_emb = DQNroute-LE, centralized_simple = BSR
router_types_supported = 'dqn_emb ppo_emb centralized_simple link_state simple_q reinforce_emb'.split(' ')
router_types = args.routing_algorithms
assert len(router_types) > 0, '--routing_algorithms cannot be empty'
router_types = re.split(', *', args.routing_algorithms)
assert len(set(router_types) - set(router_types_supported)) == 0, \
f'unsupported algorithm in --routing_algorithms was found; supported ones: {router_types_supported}'
dqn_emb_exists = 'dqn_emb' in router_types
ppo_emb_exists = 'ppo_emb' in router_types
reinforce_emb_exists = 'reinforce_emb' in router_types
nn_loading_needed = "dqn_emb" in router_types or args.command != "run"
random_seed = args.random_seed
# Create directories for logs and results
for dirname in ['../logs', '../img']:
os.makedirs(dirname, exist_ok=True)
# 1. load scenario from one or more config files
string_scenario, filename_suffix = [], []
for config_filename in args.config_files:
filename_suffix += [os.path.split(config_filename)[1].replace(".yaml", "")]
with open(config_filename, "r") as f:
string_scenario += f.readlines()
string_scenario = "".join(string_scenario)
scenario = yaml.safe_load(string_scenario)
print(f"Configuration files: {args.config_files}")
router_settings = scenario["settings"]["router"]
emb_dim = router_settings["embedding"]["dim"]
softmax_temperature = router_settings["dqn"]["softmax_temperature"]
probability_smoothing = router_settings["dqn"]["probability_smoothing"]
# graphs size = #sources + #diverters + #sinks + #(conveyors leading to other conveyors)
lengths = [len(scenario["configuration"][x]) for x in ["sources", "diverters", "sinks"]] \
+ [len([c for c in scenario["configuration"]["conveyors"].values()
if c["upstream"]["type"] == "conveyor"])]
graph_size = sum(lengths)
filename_suffix = "__".join(filename_suffix)
filename_suffix = f"_{emb_dim}_{graph_size}_{filename_suffix}.bin"
print(f"Embedding dimension: {emb_dim}, graph size: {graph_size}")
# pretrain common params and function
pretrain_data_size = args.pretrain_num_episodes
pretrain_epochs_num = args.pretrain_num_epochs
force_pretrain = args.force_pretrain
def gen_episodes_progress(router_type, num_episodes, **kwargs):
with tqdm(total=num_episodes) as bar:
return gen_episodes(router_type, bar=bar, num_episodes=num_episodes, **kwargs)
class CachedEmbedding(Embedding):
def __init__(self, InnerEmbedding, dim, **kwargs):
super().__init__(dim, **kwargs)
self.InnerEmbedding = InnerEmbedding
self.inner_kwargs = kwargs
self.fit_embeddings = {}
def fit(self, graph, **kwargs):
h = hash_graph(graph)
if h not in self.fit_embeddings:
embed = self.InnerEmbedding(dim=self.dim, **self.inner_kwargs)
embed.fit(graph, **kwargs)
self.fit_embeddings[h] = embed
def transform(self, graph, idx):
h = hash_graph(graph)
return self.fit_embeddings[h].transform(idx)
def hash_graph(graph):
if type(graph) != np.ndarray:
graph = nx.to_numpy_matrix(graph, nodelist=sorted(graph.nodes))
m = hashlib.sha256()
m.update(graph.tobytes())
return base64.b64encode(m.digest()).decode("utf-8")
def add_inp_cols(tag, dim):
return mk_num_list(tag + "_", dim) if dim > 1 else tag
# train common params and function
train_data_size = args.train_num_episodes
force_train = args.force_train
# TODO check whether setting a random seed makes training deterministic
def run_single(
run_params: dict,
router_type: str,
random_seed: int,
**kwargs
):
job_id = mk_job_id(router_type, random_seed)
with tqdm(desc=job_id) as bar:
queue = DummyProgressbarQueue(bar)
runner = ConveyorsRunner(run_params=run_params, router_type=router_type, random_seed=random_seed,
progress_queue=queue, omit_training=False, **kwargs)
event_series = runner.run(**kwargs)
return event_series, runner
# DQN part (pre-train + train)
def pretrain_dqn(
generated_data_size: int,
num_epochs: int,
dir_with_models: str,
pretrain_filename: str = None,
pretrain_dataset_filename: str = None,
use_full_topology: bool = True,
):
def qnetwork_batches(net, data, batch_size=64, embedding=None):
n = graph_size
data_cols = []
amatrix_cols = get_amatrix_cols(n)
for tag, dim in net.add_inputs:
data_cols.append(amatrix_cols if tag == "amatrix" else add_inp_cols(tag, dim))
for a, b in make_batches(data.shape[0], batch_size):
batch = data[a:b]
addr = batch["addr"].values
dst = batch["dst"].values
nbr = batch["neighbour"].values
if embedding is not None:
amatrices = batch[amatrix_cols].values
new_btch = []
for addr_, dst_, nbr_, A in zip(addr, dst, nbr, amatrices):
A = A.reshape(n, n)
embedding.fit(A)
new_addr = embedding.transform(A, int(addr_))
new_dst = embedding.transform(A, int(dst_))
new_nbr = embedding.transform(A, int(nbr_))
new_btch.append((new_addr, new_dst, new_nbr))
[addr, dst, nbr] = stack_batch(new_btch)
addr_inp = torch.FloatTensor(addr)
dst_inp = torch.FloatTensor(dst)
nbr_inp = torch.FloatTensor(nbr)
inputs = tuple(torch.FloatTensor(batch[cols].values) for cols in data_cols)
output = torch.FloatTensor(batch["predict"].values)
yield (addr_inp, dst_inp, nbr_inp) + inputs, output
def qnetwork_pretrain_epoch(net, optimizer, data, **kwargs):
loss_func = torch.nn.MSELoss()
for batch, target in qnetwork_batches(net, data, **kwargs):
optimizer.zero_grad()
output = net(*batch)
loss = loss_func(output, target.unsqueeze(1))
loss.backward()
optimizer.step()
yield float(loss)
def qnetwork_pretrain(net, data, optimizer="rmsprop", **kwargs):
optimizer = get_optimizer(optimizer)(net.parameters())
epochs_losses = []
for _ in tqdm(range(num_epochs), desc='DQN Pretraining...'):
sum_loss = 0
loss_cnt = 0
for loss in qnetwork_pretrain_epoch(net, optimizer, data, **kwargs):
sum_loss += loss
loss_cnt += 1
epochs_losses.append(sum_loss / loss_cnt)
if pretrain_filename is not None:
# label changed by Igor:
net.change_label(pretrain_filename)
# net._label = pretrain_filename
net.save()
return epochs_losses
data_conv = gen_episodes_progress(
'dqn_oneout', # TODO fix it
generated_data_size,
ignore_saved=True,
context="conveyors",
random_seed=random_seed,
run_params=scenario,
save_path=pretrain_dataset_filename,
use_full_topology=use_full_topology
)
data_conv.loc[:, "working"] = 1.0
shuffled_data = data_conv.sample(frac=1)
conv_emb = CachedEmbedding(LaplacianEigenmap, dim=emb_dim)
network_args = {
'scope': dir_with_models,
'activation': router_settings['dqn']['activation'],
'layers': router_settings['dqn']['layers'],
'embedding_dim': emb_dim,
}
conveyor_network_ng_emb = QNetwork(graph_size, **network_args)
conveyor_network_ng_emb_losses = qnetwork_pretrain(
conveyor_network_ng_emb,
shuffled_data,
embedding=conv_emb
)
return conveyor_network_ng_emb_losses
def train_dqn(
progress_step: int,
router_type: str,
dir_with_models: str,
pretrain_filename: str,
train_filename: str,
random_seed: int,
work_with_files: bool,
retrain: bool,
use_reinforce: bool = True,
use_combined_model: bool = False
):
scenario["settings"]["router"][router_type]["use_reinforce"] = use_reinforce
scenario["settings"]["router"][router_type]["use_combined_model"] = use_combined_model
scenario["settings"]["router"][router_type]["scope"] = dir_with_models
scenario["settings"]["router"][router_type]["load_filename"] = pretrain_filename
if retrain:
# TODO get rid of this environmental variable
if "OMIT_TRAINING" in os.environ:
del os.environ["OMIT_TRAINING"]
else:
os.environ["OMIT_TRAINING"] = "True"
event_series, runner = run_single(
run_params=scenario,
router_type=router_type,
progress_step=progress_step,
ignore_saved=[True],
random_seed=random_seed
)
world = runner.world
some_router = next(iter(next(iter(world.handlers.values())).routers.values()))
net = some_router.brain
net.change_label(train_filename)
# save or load the trained network
if work_with_files:
if retrain:
if some_router.use_single_neural_network:
net.save()
else:
print(
"Warning: saving/loading models trained in simulation is only implemented "
"when use_single_neural_network = True. The models were not saved to disk."
)
else:
net.restore()
return event_series, world
def dqn_experiments(
n: int,
use_combined_model: bool = True,
use_full_topology: bool = True,
use_reinforce: bool = True,
process_pretrain: bool = True,
process_train: bool = True
):
dqn_logs = []
for _ in range(n):
if process_pretrain:
print('Pretraining DQN Models...')
dqn_losses = pretrain_dqn(
pretrain_data_size,
pretrain_epochs_num,
dir_with_models,
pretrain_filename,
data_path,
use_full_topology=use_full_topology,
)
else:
print(f'Using the already pretrained model...')
if process_train:
print('Training DQN Model...')
dqn_log, dqn_world = train_dqn(
train_data_size,
'dqn_emb',
dir_with_models,
pretrain_filename,
train_filename,
random_seed,
True,
True,
use_reinforce=use_reinforce,
use_combined_model=use_combined_model
)
else:
print('Skip training process...')
dqn_logs.append(dqn_log.getSeries(add_avg=True))
return dqn_logs
# whole pipeline
if dqn_emb_exists:
dqn_serieses = []
dqn_emp_config = scenario['settings']['router']['dqn_emb']
dir_with_models = 'conveyor_models_dqn'
pretrain_filename = f'pretrained{filename_suffix}'
pretrain_path = Path(TORCH_MODELS_DIR) / dir_with_models / pretrain_filename
data_filename = f'pretrain_data_ppo{filename_suffix}'
data_path = f'../logs/{data_filename}'
train_filename = f'trained{filename_suffix}'
train_path = Path(TORCH_MODELS_DIR) / dir_with_models / train_filename
do_pretrain = force_pretrain or not pretrain_path.exists() or True
do_train = force_train or not train_path.exists() or args.command == 'run' or True
print(f'Model: {pretrain_path}')
dqn_combined_model_results = dqn_experiments(1, True, True, True, False, True)
dqn_single_model_results = dqn_experiments(1, False, True, True, False, True)
# PPO part (pre-train + train)
def pretrain_ppo(
generated_data_size: int,
num_epochs: int,
actor_config: dict,
critic_config: dict,
dir_with_models: str,
actor_pretrain_filename: str = None,
critic_pretrain_filename: str = None,
pretrain_dataset_filename: str = None
) -> Tuple[np.ndarray, np.ndarray]:
def ppo_batches(data, batch_size=64, embedding=None):
n = graph_size
amatrix_cols = get_amatrix_cols(n)
for a, b in make_batches(data.shape[0], batch_size):
batch = data[a:b]
addr = batch["addr"].values
dst = batch["dst"].values
new_addr = batch['next_addr'].values
v_func = batch['addr_v_func'].values
allowed_neighbours = []
if embedding is not None:
amatrices = batch[amatrix_cols].values
nets_inputs = []
actor_outputs = []
for addr_, dst_, new_addr_, A in zip(addr, dst, new_addr, amatrices):
A = A.reshape(n, n)
embedding.fit(A)
current_neighbours = []
for idx, distance in enumerate(A[int(addr_)]):
if distance != 0:
current_neighbours.append(
embedding.transform(A, idx)
)
allowed_neighbours.append(current_neighbours)
addr_emb = embedding.transform(A, int(addr_))
dst_emb = embedding.transform(A, int(dst_))
new_addr_emb = embedding.transform(A, int(new_addr_))
nets_inputs.append([addr_emb, dst_emb])
actor_outputs.append(new_addr_emb)
[addr, dst] = stack_batch(nets_inputs)
new_addr = np.array(actor_outputs)
net_input = (torch.FloatTensor(addr), torch.FloatTensor(dst))
actor_output = torch.FloatTensor(new_addr)
critic_output = torch.FloatTensor(v_func)
yield net_input, actor_output, critic_output, allowed_neighbours
def critic_pretrain_epoch(net, data, **kwargs):
loss_func = torch.nn.MSELoss()
for critic_input, actor_target, critic_target, allowed_neighbours in ppo_batches(data, **kwargs):
net.optimizer.zero_grad()
output = net(*critic_input)
loss = loss_func(output, critic_target.unsqueeze(1))
loss.backward()
net.optimizer.step()
yield float(loss)
def actor_pretrain_epoch(net, data, **kwargs):
loss_func = torch.nn.MSELoss()
for actor_input, actor_target, critic_target, allowed_neighbours in ppo_batches(data, **kwargs):
net.optimizer.zero_grad()
output = net(*actor_input)
loss = loss_func(output, actor_target)
loss.backward()
net.optimizer.step()
yield float(loss)
def critic_pretrain(net, data, **kwargs) -> np.ndarray:
critic_losses = []
for _ in tqdm(range(num_epochs), desc='Critic pretrain'):
sum_loss = 0
loss_cnt = 0
for loss in critic_pretrain_epoch(net, data, **kwargs):
sum_loss += loss
loss_cnt += 1
critic_losses.append(sum_loss / loss_cnt)
if critic_pretrain_filename is not None:
net.change_label(pretrain_filename)
# net._label = critic_pretrain_filename
net.save()
return np.array(critic_losses, dtype=np.float32)
def actor_pretrain(net, data, **kwargs) -> np.ndarray:
actor_losses = []
for _ in tqdm(range(num_epochs), desc='Actor pretrain'):
sum_loss = 0
loss_cnt = 0
for loss in actor_pretrain_epoch(net, data, **kwargs):
sum_loss += loss
loss_cnt += 1
actor_losses.append(sum_loss / loss_cnt)
if actor_pretrain_filename is not None:
net.change_label(pretrain_filename)
# net._label = actor_pretrain_filename
net.save()
return np.array(actor_losses, dtype=np.float32)
def networks_pretrain(
data: pd.DataFrame,
actor_model: torch.nn.Module,
critic_model: torch.nn.Module,
conv_emb=None,
) -> Tuple[np.ndarray, np.ndarray]:
actor_losses = actor_pretrain(
actor_model, data, embedding=conv_emb
)
critic_losses = critic_pretrain(
critic_model, data, embedding=conv_emb
)
return actor_losses, critic_losses
data = gen_episodes_progress(
'ppo_emb', # TODO fix it
generated_data_size,
ignore_saved=True,
context="conveyors",
random_seed=random_seed,
run_params=scenario,
save_path=pretrain_dataset_filename
)
shuffled_data = data.sample(frac=1)
conv_emb = CachedEmbedding(LaplacianEigenmap, dim=emb_dim)
actor_args = {
'scope': dir_with_models,
'embedding_dim': emb_dim
}
actor_args = dict(**actor_config, **actor_args)
actor_model = PPOActor(**actor_args)
critic_args = {
'scope': dir_with_models,
'embedding_dim': emb_dim
}
critic_args = dict(**critic_config, **critic_args)
critic_model = PPOCritic(**critic_args)
actor_losses, critic_losses = networks_pretrain(shuffled_data, actor_model, critic_model, conv_emb=conv_emb)
return actor_losses, critic_losses
def train_ppo(
progress_step: int,
router_type: str,
dir_with_models: str,
actor_pretrain_filename: str,
critic_pretrain_filename: str,
actor_train_filename: str,
critic_train_filename: str,
random_seed: int,
work_with_files: bool,
retrain: bool
):
scenario["settings"]["router"][router_type]["dir_with_models"] = dir_with_models
scenario["settings"]["router"][router_type]["actor_load_filename"] = actor_pretrain_filename
scenario["settings"]["router"][router_type]["critic_load_filename"] = critic_pretrain_filename
event_series, runner = run_single(
run_params=scenario,
router_type=router_type,
progress_step=progress_step,
ignore_saved=[True],
random_seed=random_seed
)
world = runner.world
some_router = next(iter(next(iter(world.handlers.values())).routers.values()))
actor_model = some_router.actor
actor_model.change_label(actor_train_filename)
critic_model = some_router.critic
critic_model.change_label(critic_train_filename)
if work_with_files:
if retrain:
if False: # some_router.use_single_neural_network: TODO implement
actor_model.save()
critic_model.save()
else:
print("Warning: saving/loaded models trained in simulation is only implemented "
"when use_single_neural_network = True. The models were not saved to disk.")
else:
actor_model.restore()
critic_model.restore()
return event_series, world
if ppo_emb_exists:
ppo_emb_config = scenario['settings']['router']['ppo_emb']
actor_config = ppo_emb_config['actor']
critic_config = ppo_emb_config['critic']
dir_with_models = 'conveyor_models_ppo'
actor_pretrain_filename = f'actor_pretrained{filename_suffix}'
actor_pretrain_path = Path(TORCH_MODELS_DIR) / dir_with_models / actor_pretrain_filename
critic_pretrain_filename = f'critic_pretrained{filename_suffix}'
critic_pretrain_path = Path(TORCH_MODELS_DIR) / dir_with_models / critic_pretrain_filename
actor_trained_filename = f'actor_trained{filename_suffix}'
actor_trained_path = Path(TORCH_MODELS_DIR) / dir_with_models / actor_trained_filename
critic_trained_filename = f'critic_trained{filename_suffix}'
critic_trained_path = Path(TORCH_MODELS_DIR) / dir_with_models / critic_trained_filename
do_pretrain = force_pretrain or not actor_pretrain_path.exists() or not critic_pretrain_path.exists()
do_train = force_train or not actor_trained_path.exists() or not critic_trained_path.exists()
print(f'Actor: {actor_pretrain_path}')
print(f'Critic: {critic_pretrain_path}')
if do_pretrain:
print('Pretraining PPO Models...')
actor_losses, critic_losses = pretrain_ppo(
pretrain_data_size,
pretrain_epochs_num,
actor_config,
critic_config,
dir_with_models,
actor_pretrain_filename,
critic_pretrain_filename,
'../logs/data_conveyor_ppo.csv'
)
print(f'Actor loss: {actor_losses.tolist()}')
print(f'Critic loss: {critic_losses.tolist()}')
else:
print('Using already pretrained models')
if do_train:
print('Training PPO Model...')
ppo_log, ppo_world = train_ppo(
train_data_size,
'ppo_emb',
dir_with_models,
actor_pretrain_filename,
critic_pretrain_filename,
actor_trained_filename,
critic_trained_filename,
random_seed,
True,
True
)
else:
print('Skip training process...')
# REINFORCE part (pre-train + train)
def pretrain_reinforce(
generated_data_size: int,
num_epochs: int,
actor_config: dict,
dir_with_models: str,
actor_pretrain_filename: str = None,
pretrain_dataset_filename: str = None
) -> np.ndarray:
def reinforce_batches(data, batch_size=64, embedding=None):
n = graph_size
amatrix_cols = get_amatrix_cols(n)
for a, b in make_batches(data.shape[0], batch_size):
batch = data[a:b]
addr = batch["addr"].values
dst = batch["dst"].values
new_addr = batch['next_addr'].values
allowed_neighbours = []
if embedding is not None:
amatrices = batch[amatrix_cols].values
nets_inputs = []
actor_outputs = []
for addr_, dst_, new_addr_, A in zip(addr, dst, new_addr, amatrices):
A = A.reshape(n, n)
embedding.fit(A)
current_neighbours = []
for idx, distance in enumerate(A[int(addr_)]):
if distance != 0:
current_neighbours.append(
embedding.transform(A, idx)
)
allowed_neighbours.append(current_neighbours)
addr_emb = embedding.transform(A, int(addr_))
dst_emb = embedding.transform(A, int(dst_))
new_addr_emb = embedding.transform(A, int(new_addr_))
nets_inputs.append([addr_emb, dst_emb])
actor_outputs.append(new_addr_emb)
[addr, dst] = stack_batch(nets_inputs)
new_addr = np.array(actor_outputs)
net_input = (torch.FloatTensor(addr), torch.FloatTensor(dst))
actor_output = torch.FloatTensor(new_addr)
yield net_input, actor_output, allowed_neighbours
def actor_pretrain_epoch(net, data, **kwargs):
loss_func = torch.nn.MSELoss()
for actor_input, actor_target, allowed_neighbours in reinforce_batches(data, **kwargs):
net.optimizer.zero_grad()
output = net(*actor_input)
loss = loss_func(output, actor_target)
loss.backward()
net.optimizer.step()
yield float(loss)
def actor_pretrain(net, data, **kwargs) -> np.ndarray:
actor_losses = []
for _ in tqdm(range(num_epochs), desc='Actor pretrain'):
sum_loss = 0
loss_cnt = 0
for loss in actor_pretrain_epoch(net, data, **kwargs):
sum_loss += loss
loss_cnt += 1
actor_losses.append(sum_loss / loss_cnt)
if actor_pretrain_filename is not None:
net.change_label(actor_pretrain_filename)
# net._label = actor_pretrain_filename
net.save()
return np.array(actor_losses, dtype=np.float32)
data = gen_episodes_progress(
'ppo_emb', # TODO fix it
generated_data_size,
ignore_saved=True,
context="conveyors",
random_seed=random_seed,
run_params=scenario,
save_path=pretrain_dataset_filename
)
shuffled_data = data.sample(frac=1)
conv_emb = CachedEmbedding(LaplacianEigenmap, dim=emb_dim)
actor_args = {
'scope': dir_with_models,
'embedding_dim': emb_dim
}
actor_args = dict(**actor_config, **actor_args)
actor_model = PPOActor(**actor_args)
actor_losses = actor_pretrain(
actor_model, shuffled_data, embedding=conv_emb
)
return actor_losses
def train_reinforce(
progress_step: int,
router_type: str,
dir_with_models: str,
pretrain_filename: str,
train_filename: str,
random_seed: int,
work_with_files: bool,
retrain: bool
):
scenario["settings"]["router"][router_type]["dir_with_models"] = dir_with_models
scenario["settings"]["router"][router_type]["load_filename"] = pretrain_filename
event_series, runner = run_single(
run_params=scenario,
router_type=router_type,
progress_step=progress_step,
ignore_saved=[True],
random_seed=random_seed
)
world = runner.world
some_router = next(iter(next(iter(world.handlers.values())).routers.values()))
actor_model = some_router.actor
actor_model.change_label(train_filename)
if work_with_files:
if retrain:
# print(dir(some_router))
if some_router.use_single_network:
actor_model.save()
else:
print("Warning: saving/loaded models trained in simulation is only implemented "
"when use_single_neural_network = True. The models were not saved to disk.")
else:
actor_model.restore()
return event_series, world
# pretrain
if reinforce_emb_exists:
reinforce_serieses = []
from dqnroute.agents.routers.reinforce import PackageHistory
from collections import defaultdict
reinforce_emb_config = scenario['settings']['router']['reinforce_emb']
reinforce_config = reinforce_emb_config['actor']
dir_with_models = 'conveyor_models_reinforce'
reinforce_pretrain_filename = f'pretrained{filename_suffix}'
reinforce_pretrain_path = Path(TORCH_MODELS_DIR) / dir_with_models / reinforce_pretrain_filename
trained_filename = f'actor_trained{filename_suffix}'
trained_path = Path(TORCH_MODELS_DIR) / dir_with_models / trained_filename
do_pretrain = force_pretrain or not reinforce_pretrain_path.exists() or True
do_train = force_train or not trained_path.exists() or True
print(f'Reinforce model: {reinforce_pretrain_path}')
for _ in range(10):
PackageHistory.routers = defaultdict(dict)
PackageHistory.rewards = defaultdict(list)
PackageHistory.log_probs = defaultdict(list)
PackageHistory.finished_packages = set()
PackageHistory.started_packages = set()
if do_pretrain:
print('Pretraining REINFORCE Models...')
reinforce_losses = pretrain_reinforce(
pretrain_data_size,
pretrain_epochs_num,
reinforce_config,
dir_with_models,
reinforce_pretrain_filename,
'../logs/data_conveyor_reinforce.csv'
)
print(f'Actor loss: {reinforce_losses.tolist()}')
else:
print('Using already pretrained models')
if do_train:
print('Training REINFORCE Model...')
reinforce_log, reinforce_world = train_reinforce(
train_data_size,
'reinforce_emb',
dir_with_models,
reinforce_pretrain_filename,
trained_filename,
random_seed,
True,
True
)
else:
print('Skip training process...')
reinforce_serieses.append(reinforce_log.getSeries(add_avg=True))
def train(
progress_step: int,
router_type: str,
random_seed: int,
):
event_series, runner = run_single(
run_params=scenario,
router_type=router_type,
progress_step=progress_step,
ignore_saved=[True],
random_seed=random_seed
)
world = None
return event_series, world
# 4. load the router graph
def visualize(g: RouterGraph):
gv_graph = g.to_graphviz()
prefix = f"../img/topology_graph{filename_suffix}."
gv_graph.write(prefix + "gv")
for prog in ["dot", "circo", "twopi"]:
prog_prefix = f"{prefix}{prog}."
for fmt in ["pdf", "png"]:
path = f"{prog_prefix}{fmt}"
print(f"Drawing {path} ...")
gv_graph.draw(path, prog=prog, args="-Gdpi=300 -Gmargin=0 -Grankdir=LR")
def get_symbolic_analyzer() -> SymbolicAnalyzer:
return SymbolicAnalyzer(g, softmax_temperature, probability_smoothing,
args.verification_lr, delta_q_max=args.input_max_delta_q)
def get_nnet_verifier() -> NNetVerifier:
assert args.marabou_path is not None, \
"You must specify --verification_marabou_path for command embedding_adversarial_verification."
return NNetVerifier(g, args.marabou_path, NETWORK_FILENAME, PROPERTY_FILENAME, probability_smoothing,
softmax_temperature, emb_dim, args.linux_marabou_memory_limit_mb)
def get_sources(sink: AgentId) -> List[AgentId]:
"""
:return: the list of all sources that are reachable from the specified sink. If a single source
was specified in command line arguments, only this source will be returned.
"""
return [source for source in g.get_sources_for_node(sink)
if args.single_source is None or source[1] == args.single_source]
def get_sinks() -> List[Tuple[AgentId, torch.Tensor]]:
"""
:return: the list of all sinks. If a single sink was specified in command line arguments, only
this sink will be returned.
"""
return [(sink, g.node_to_embeddings(sink, sink)[0]) for sink in g.sinks
if args.single_sink is None or sink[1] == args.single_sink]
def get_source_sink_pairs(message: str, ma_verbose: bool = False) -> \
Generator[Tuple[AgentId, AgentId, torch.Tensor, MarkovAnalyzer], None, None]:
for sink, sink_embedding in get_sinks():
sink_embeddings = sink_embedding.repeat(2, 1)
for source in get_sources(sink):
print(f"{message} from {source} to {sink}...")
ma = MarkovAnalyzer(g, source, sink, args.simple_path_cost, ma_verbose)
yield source, sink, sink_embedding, ma
def get_learning_step_indices() -> Optional[Set[int]]: