forked from radixark/miles
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_async.py
More file actions
77 lines (61 loc) · 3.25 KB
/
train_async.py
File metadata and controls
77 lines (61 loc) · 3.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import ray
from miles.ray.placement_group import create_placement_groups, create_rollout_manager, create_training_models
from miles.utils.arguments import parse_args
from miles.utils.logging_utils import configure_logger
from miles.utils.misc import should_run_periodic_action
from miles.utils.tracking_utils import init_tracking
# The framework supports other asynchronous approaches such as fully async (which is shown in examples/full_async).
def train(args):
assert not args.colocate, "Colocation is not supported for async training."
configure_logger()
# allocate the GPUs
pgs = create_placement_groups(args)
init_tracking(args)
# create the rollout manager, with sglang engines inside.
# need to initialize rollout manager first to calculate num_rollout
rollout_manager, num_rollout_per_epoch = create_rollout_manager(args, pgs["rollout"])
# create the actor and critic models
actor_model, critic_model = create_training_models(args, pgs, rollout_manager)
# always update weight first so that sglang has the loaded weights from training.
actor_model.update_weights()
if args.check_weight_update_equal:
ray.get(rollout_manager.check_weights.remote(action="compare"))
# async train loop.
rollout_data_next_future = rollout_manager.generate.remote(args.start_rollout_id)
for rollout_id in range(args.start_rollout_id, args.num_rollout):
# Sync the last generation
if rollout_data_next_future is not None:
rollout_data_curr_ref = ray.get(rollout_data_next_future)
# Start the next rollout early.
if rollout_id + 1 < args.num_rollout:
rollout_data_next_future = rollout_manager.generate.remote(rollout_id + 1)
if args.use_critic:
critic_train_handle = critic_model.async_train(rollout_id, rollout_data_curr_ref)
if rollout_id >= args.num_critic_only_steps:
ray.get(actor_model.async_train(rollout_id, rollout_data_curr_ref))
ray.get(critic_train_handle)
else:
ray.get(actor_model.async_train(rollout_id, rollout_data_curr_ref))
if should_run_periodic_action(rollout_id, args.save_interval, num_rollout_per_epoch, args.num_rollout):
actor_model.save_model(
rollout_id,
force_sync=rollout_id == args.num_rollout - 1,
)
if args.use_critic:
critic_model.save_model(
rollout_id,
force_sync=rollout_id == args.num_rollout - 1,
)
if args.rollout_global_dataset:
ray.get(rollout_manager.save.remote(rollout_id))
if (rollout_id + 1) % args.update_weights_interval == 0:
# sync generate before update weights to prevent update weight in the middle of generation
rollout_data_curr_ref = ray.get(x) if (x := rollout_data_next_future) is not None else None
rollout_data_next_future = None
actor_model.update_weights()
if should_run_periodic_action(rollout_id, args.eval_interval, num_rollout_per_epoch):
ray.get(rollout_manager.eval.remote(rollout_id))
ray.get(rollout_manager.dispose.remote())
if __name__ == "__main__":
args = parse_args()
train(args)