-
Notifications
You must be signed in to change notification settings - Fork 373
Expand file tree
/
Copy pathray_trainer.py
More file actions
637 lines (533 loc) · 29.8 KB
/
ray_trainer.py
File metadata and controls
637 lines (533 loc) · 29.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FSDP PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import json
import os
import uuid
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from enum import IntEnum, auto
from typing import Any, Dict, List, Optional, Type
import numpy as np
import ray
import torch
from ray.experimental.tqdm_ray import tqdm
from torchdata.stateful_dataloader import StatefulDataLoader
from transformers import PreTrainedTokenizer, ProcessorMixin
from ..protocol import DataProto, pad_dataproto_to_divisor, unpad_dataproto
from ..single_controller.base import Worker
from ..single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from ..single_controller.ray.base import create_colocated_worker_cls
from ..utils import torch_functional as VF
from ..utils.checkpoint import CHECKPOINT_TRACKER, remove_obsolete_ckpt
from ..utils.logger import Tracker
from ..utils.py_functional import convert_dict_to_str, timer
from ..utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance
from ..workers.fsdp_workers import FSDPWorker
from ..workers.reward import FunctionRewardManager
from . import core_algos
from .config import PPOConfig
from .core_algos import AdvantageEstimator, FixedKLController, KLController, compute_kl, get_kl_controller
from .metrics import compute_data_metrics, compute_throughout_metrics, compute_timing_metrics, reduce_metrics
class Role(IntEnum):
"""
To create more roles dynamically, you can subclass Role and add new members
"""
Actor = auto()
Rollout = auto()
ActorRollout = auto()
Critic = auto()
RefPolicy = auto()
RewardModel = auto()
ActorRolloutRef = auto()
@dataclass
class ResourcePoolManager:
"""
Define a resource pool specification. Resource pool will be initialized first.
"""
resource_pool_spec: dict[str, list[int]]
mapping: dict[Role, str]
resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict)
def create_resource_pool(self):
for resource_pool_name, process_on_nodes in self.resource_pool_spec.items():
# max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool
# For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one.
# For Megatron backend, we recommend using max_colocate_count>1 that can utilize different WorkerGroup for differnt models
resource_pool = RayResourcePool(
process_on_nodes=process_on_nodes, use_gpu=True, max_colocate_count=1, name_prefix=resource_pool_name
)
self.resource_pool_dict[resource_pool_name] = resource_pool
self._check_resource_available()
def get_resource_pool(self, role: Role) -> RayResourcePool:
"""Get the resource pool of the worker."""
return self.resource_pool_dict[self.mapping[role]]
def get_num_gpus(self) -> int:
"""Get the number of gpus in this cluster."""
return sum([n_gpus for process_on_nodes in self.resource_pool_spec.values() for n_gpus in process_on_nodes])
def _check_resource_available(self):
"""Check if the resource pool can be satisfied in this ray cluster."""
gpus_available = ray.available_resources().get("GPU", 0)
gpus_required = self.get_num_gpus()
if gpus_available < gpus_required:
raise ValueError(f"Total available GPUs {gpus_available} is less than total desired GPUs {gpus_required}.")
def apply_kl_penalty(data: DataProto, kl_ctrl: KLController, kl_penalty="kl"):
token_level_scores = data.batch["token_level_scores"]
batch_size = data.batch.batch_size[0]
response_mask = data.batch["response_mask"]
# compute kl between ref_policy and current policy
kld = compute_kl(data.batch["old_log_probs"], data.batch["ref_log_probs"], kl_penalty=kl_penalty)
kld = kld * response_mask # (batch_size, response_length)
data.batch["token_level_rewards"] = token_level_scores - kl_ctrl.kl_coef * kld
current_kl = VF.masked_mean(kld, mask=response_mask, dim=-1) # average over sequence
current_kl = torch.mean(current_kl, dim=0).item()
metrics = {"critic/kl": current_kl, "critic/kl_coef": kl_ctrl.kl_coef}
# According to https://github.com/huggingface/trl/blob/v0.11.0/trl/trainer/ppo_trainer.py#L880
kl_ctrl.update(current_kl=current_kl, n_steps=batch_size)
return data, metrics
def compute_advantage(data: DataProto, adv_estimator: AdvantageEstimator, gamma: float = 1.0, lam: float = 1.0):
token_level_rewards = data.batch["token_level_rewards"]
response_mask = data.batch["response_mask"]
index = data.non_tensor_batch["uid"]
if adv_estimator == AdvantageEstimator.GAE:
values = data.batch["values"]
advantages, returns = core_algos.compute_gae_advantage_return(
token_level_rewards, values, response_mask, gamma, lam
)
elif adv_estimator == AdvantageEstimator.GRPO:
advantages, returns = core_algos.compute_grpo_outcome_advantage(token_level_rewards, response_mask, index)
elif adv_estimator == AdvantageEstimator.REINFORCE_PLUS_PLUS:
advantages, returns = core_algos.compute_reinforce_plus_plus_outcome_advantage(
token_level_rewards, response_mask, gamma
)
elif adv_estimator == AdvantageEstimator.REMAX:
reward_baselines = data.batch["reward_baselines"]
advantages, returns = core_algos.compute_remax_outcome_advantage(
token_level_rewards, reward_baselines, response_mask
)
elif adv_estimator == AdvantageEstimator.RLOO:
advantages, returns = core_algos.compute_rloo_outcome_advantage(token_level_rewards, response_mask, index)
else:
raise NotImplementedError
data.batch["advantages"] = advantages
data.batch["returns"] = returns
return data
class RayPPOTrainer:
"""
Note that this trainer runs on the driver process on a single CPU/GPU node.
"""
def __init__(
self,
config: PPOConfig,
tokenizer: PreTrainedTokenizer,
processor: Optional[ProcessorMixin],
train_dataloader: StatefulDataLoader,
val_dataloader: StatefulDataLoader,
role_worker_mapping: dict[Role, Type[Worker]],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: Type[RayWorkerGroup] = RayWorkerGroup,
reward_fn: Optional[FunctionRewardManager] = None,
val_reward_fn: Optional[FunctionRewardManager] = None,
):
self.tokenizer = tokenizer
self.processor = processor
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.config = config
self.reward_fn = reward_fn
self.val_reward_fn = val_reward_fn
self.val_reward_score = 0.0
self.best_val_reward_score = -1.0
self.best_global_step = None
self.hybrid_engine = config.worker.hybrid_engine
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reward_model = Role.RewardModel in role_worker_mapping
self.ray_worker_group_cls = ray_worker_group_cls
# define KL control
if config.algorithm.disable_kl:
self.use_reference_policy = False
self.kl_ctrl = FixedKLController(init_kl_coef=0.0)
print("KL is disabled, no KL metrics will be logged. Please set `kl_coef=0` to log KL metrics.")
else:
self.use_reference_policy = True
self.kl_ctrl = get_kl_controller(config.algorithm)
if config.algorithm.adv_estimator == AdvantageEstimator.GAE:
self.use_critic = True
else:
self.use_critic = False
if config.algorithm.adv_estimator not in list(AdvantageEstimator):
raise NotImplementedError(f"Unknown advantage estimator: {config.algorithm.adv_estimator}.")
if config.data.rollout_batch_size % config.worker.actor.global_batch_size != 0:
raise ValueError("Rollout batch size must be divisible by actor global batch size.")
if (
config.data.rollout_batch_size * config.worker.rollout.n
) % config.worker.actor.micro_batch_size_per_device_for_experience != 0:
raise ValueError(
"Rollout batch size * rollout.n must be divisible by actor micro batch size for experience."
)
if self.use_critic:
if config.data.rollout_batch_size % config.worker.critic.global_batch_size != 0:
raise ValueError("Rollout batch size must be divisible by critic global batch size.")
if (
config.data.rollout_batch_size * config.worker.rollout.n
) % config.worker.critic.micro_batch_size_per_device_for_experience != 0:
raise ValueError(
"Rollout batch size * rollout.n must be divisible by critic micro batch size for experience."
)
if (
config.algorithm.adv_estimator in (AdvantageEstimator.GRPO, AdvantageEstimator.RLOO)
and config.worker.rollout.n == 1
):
raise ValueError("GRPO and RLOO algorithm need `config.worker.rollout.n > 1`.")
if config.trainer.max_steps is not None:
self.training_steps = config.trainer.max_steps
else:
self.training_steps = len(train_dataloader) * config.trainer.total_epochs
config.worker.actor.optim.training_steps = self.training_steps
config.worker.critic.optim.training_steps = self.training_steps
print(f"Total training steps: {self.training_steps}")
def _maybe_log_val_generations(
self, inputs: List[str], outputs: List[str], labels: List[str], scores: List[float]
) -> None:
"""Log a table of validation samples"""
if self.config.trainer.val_generations_to_log <= 0:
return
# Create tuples of (input, output, score) and sort by input text
samples = list(zip(inputs, outputs, labels, scores))
samples.sort(key=lambda x: x[0]) # Sort by input text
# Use fixed random seed for deterministic shuffling
rng = np.random.RandomState(42)
rng.shuffle(samples)
samples = samples[: self.config.trainer.val_generations_to_log]
self.logger.log_generation(samples, self.global_step)
def _validate(self) -> Dict[str, Any]:
reward_tensor_lst = []
# Lists to collect samples for the table
sample_inputs, sample_outputs, sample_labels, sample_scores = [], [], [], []
reward_metrics_lst = defaultdict(list)
print("Start validation...")
self.actor_rollout_ref_wg.prepare_rollout_engine()
for batch_dict in self.val_dataloader:
test_batch = DataProto.from_single_dict(batch_dict)
# Store original inputs
input_ids = test_batch.batch["input_ids"]
input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]
sample_inputs.extend(input_texts)
test_gen_batch = test_batch.pop(
batch_keys=["input_ids", "attention_mask", "position_ids"],
non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data"],
)
test_gen_batch.meta_info = self.config.worker.rollout.val_override_config
test_gen_batch.meta_info["min_pixels"] = self.config.data.min_pixels
test_gen_batch.meta_info["max_pixels"] = self.config.data.max_pixels
test_gen_batch, pad_size = pad_dataproto_to_divisor(test_gen_batch, self.actor_rollout_ref_wg.world_size)
test_output_gen_batch = self.actor_rollout_ref_wg.generate_sequences(test_gen_batch)
test_output_gen_batch = unpad_dataproto(test_output_gen_batch, pad_size=pad_size)
# Store generated outputs
output_ids = test_output_gen_batch.batch["responses"]
output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids]
sample_outputs.extend(output_texts)
sample_labels.extend(test_batch.non_tensor_batch["ground_truth"].tolist())
test_batch = test_batch.union(test_output_gen_batch)
# evaluate using reward_function
reward_tensor, reward_metrics = ray.get(self.val_reward_fn.compute_reward.remote(test_batch))
# Store scores
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_tensor_lst.append(reward_tensor)
for key, value in reward_metrics.items():
reward_metrics_lst[key].extend(value)
self.actor_rollout_ref_wg.release_rollout_engine()
self._maybe_log_val_generations(sample_inputs, sample_outputs, sample_labels, sample_scores)
self.val_reward_score = torch.cat(reward_tensor_lst, dim=0).sum(-1).mean().item()
val_reward_metrics = {f"val/{key}_reward": value for key, value in reduce_metrics(reward_metrics_lst).items()}
print("Finish validation.")
return {"val/reward_score": self.val_reward_score, **val_reward_metrics}
def init_workers(self) -> None:
"""Init resource pool and worker group"""
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
# create actor and rollout
if self.hybrid_engine:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRolloutRef)
actor_rollout_ref_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.ActorRolloutRef], config=self.config.worker, role="actor_rollout_ref"
)
self.resource_pool_to_cls[resource_pool]["actor_rollout_ref"] = actor_rollout_ref_cls
else:
raise NotImplementedError
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
critic_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.Critic], config=self.config.worker, role="critic"
)
self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls
# create a reward model if reward_fn is None
if self.use_reward_model:
# we create a RM here
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel)
rm_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.RewardModel], config=self.config.worker, role="reward"
)
self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg: Dict[str, FSDPWorker] = {}
self.wg_dicts = []
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
# keep the referece of WorkerDict to support ray >= 2.31. Ref: https://github.com/ray-project/ray/pull/45699
self.wg_dicts.append(wg_dict)
if self.use_critic:
self.critic_wg = all_wg["critic"]
self.critic_wg.init_model()
if self.use_reward_model:
self.rm_wg = all_wg["rm"]
self.rm_wg.init_model()
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_ref_wg = all_wg["actor_rollout_ref"]
self.actor_rollout_ref_wg.init_model()
def _save_checkpoint(self) -> None:
# path: {save_checkpoint_path}/global_step_{global_step}/{actor,critic}
if self.val_reward_score > self.best_val_reward_score:
self.best_val_reward_score = self.val_reward_score
self.best_global_step = self.global_step
remove_obsolete_ckpt(
self.config.trainer.save_checkpoint_path,
self.global_step,
self.best_global_step,
self.config.trainer.save_limit,
)
folder_path = os.path.join(self.config.trainer.save_checkpoint_path, f"global_step_{self.global_step}")
actor_path = os.path.join(folder_path, "actor")
self.actor_rollout_ref_wg.save_checkpoint(actor_path, save_model_only=self.config.trainer.save_model_only)
if self.use_critic:
critic_path = os.path.join(folder_path, "critic")
self.critic_wg.save_checkpoint(critic_path, save_model_only=self.config.trainer.save_model_only)
dataloader_path = os.path.join(folder_path, "dataloader.pt")
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_path)
checkpointer_tracker_info = {
"best_global_step": self.best_global_step,
"best_val_reward_score": round(self.best_val_reward_score, 4),
"last_global_step": self.global_step,
"last_actor_path": os.path.abspath(actor_path),
}
checkpointer_tracker_path = os.path.join(self.config.trainer.save_checkpoint_path, CHECKPOINT_TRACKER)
with open(checkpointer_tracker_path, "w") as f:
json.dump(checkpointer_tracker_info, f, ensure_ascii=False, indent=2)
def _load_checkpoint(self) -> None:
if self.config.trainer.load_checkpoint_path is None:
return
if "global_step_" not in self.config.trainer.load_checkpoint_path.strip(os.path.sep).split(os.path.sep)[-1]:
raise ValueError("`load_checkpoint_path` should end with `global_step_*`.")
print(f"Load from checkpoint: {self.config.trainer.load_checkpoint_path}.")
self.global_step = int(self.config.trainer.load_checkpoint_path.strip(os.path.sep).split("global_step_")[-1])
actor_path = os.path.join(self.config.trainer.load_checkpoint_path, "actor")
self.actor_rollout_ref_wg.load_checkpoint(actor_path)
if self.use_critic:
critic_path = os.path.join(self.config.trainer.load_checkpoint_path, "critic")
self.critic_wg.load_checkpoint(critic_path)
dataloader_path = os.path.join(self.config.trainer.load_checkpoint_path, "dataloader.pt")
if os.path.exists(dataloader_path):
dataloader_state_dict = torch.load(dataloader_path, weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
else:
print(f"No dataloader state found at {dataloader_path}, will start from scratch.")
def _balance_batch(self, batch: DataProto, metrics: Dict[str, Any], logging_prefix: str = "global_seqlen") -> None:
"""Reorder the data on single controller such that each dp rank gets similar total tokens"""
attention_mask = batch.batch["attention_mask"]
batch_size = attention_mask.shape[0]
global_seqlen_lst = batch.batch["attention_mask"].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,)
world_size = self.actor_rollout_ref_wg.world_size
global_partition_lst = get_seqlen_balanced_partitions(
global_seqlen_lst, k_partitions=world_size, equal_size=True
)
# reorder based on index. The data will be automatically equally partitioned by dispatch function
global_idx = torch.tensor([j for partition in global_partition_lst for j in partition])
batch.reorder(global_idx)
global_balance_stats = log_seqlen_unbalance(
seqlen_list=global_seqlen_lst, partitions=global_partition_lst, prefix=logging_prefix
)
metrics.update(global_balance_stats)
def _make_batch_data(self, metrics: Dict[str, Any]) -> DataProto:
batch = None
while True:
try:
batch_dict = next(self.data_iterator)
except StopIteration:
self.data_iterator = iter(self.train_dataloader)
batch_dict = next(self.data_iterator)
meta_info = {"min_pixels": self.config.data.min_pixels, "max_pixels": self.config.data.max_pixels}
new_batch: DataProto = DataProto.from_single_dict(batch_dict, meta_info=meta_info)
# pop those keys for generation
gen_batch = new_batch.pop(
batch_keys=["input_ids", "attention_mask", "position_ids"],
non_tensor_batch_keys=["raw_prompt_ids", "multi_modal_data"],
meta_info_keys=["min_pixels", "max_pixels"],
)
# generate a batch
gen_batch_output = self.actor_rollout_ref_wg.generate_sequences(gen_batch)
if self.config.algorithm.adv_estimator == "remax":
gen_baseline_batch = deepcopy(gen_batch)
gen_baseline_batch.meta_info["temperature"] = 0
gen_baseline_batch.meta_info["n"] = 1
gen_baseline_output = self.actor_rollout_ref_wg.generate_sequences(gen_baseline_batch)
new_batch = new_batch.union(gen_baseline_output)
reward_baseline_tensor, _ = ray.get(self.reward_fn.compute_reward.remote(new_batch))
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
new_batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
new_batch.batch["reward_baselines"] = reward_baseline_tensor
del gen_baseline_batch, gen_baseline_output
new_batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(new_batch.batch))], dtype=object
)
# repeat to align with repeated responses in rollout
new_batch = new_batch.repeat(repeat_times=self.config.worker.rollout.n, interleave=True)
new_batch = new_batch.union(gen_batch_output)
# filter group
# TODO: implement DAPO
batch = DataProto.concat([batch, new_batch]) if batch is not None else new_batch
if len(batch) < self.config.data.rollout_batch_size * self.config.worker.rollout.n:
continue
else:
return batch[: self.config.data.rollout_batch_size * self.config.worker.rollout.n]
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
self.logger = Tracker(loggers=self.config.trainer.logger, config=self.config.to_dict())
self.global_step = 0
main_tqdm = tqdm(range(self.training_steps), desc="Running step", position=0)
val_metrics: Optional[Dict[str, Any]] = None
# load checkpoint before doing anything
self._load_checkpoint()
main_tqdm.update(self.global_step)
# perform validation before training
# currently, we only support validation using the reward_function.
if self.val_reward_fn is not None and self.config.trainer.val_before_train:
val_metrics = self._validate()
self.logger.log(data=val_metrics, step=self.global_step)
if self.config.trainer.val_only:
return
self.data_iterator = iter(self.train_dataloader)
while self.global_step < self.training_steps:
self.global_step += 1
metrics, timing_raw = {}, {}
with timer("step", timing_raw):
# make a batch of data
with timer("gen", timing_raw):
self.actor_rollout_ref_wg.prepare_rollout_engine()
batch = self._make_batch_data(metrics=metrics)
self.actor_rollout_ref_wg.release_rollout_engine()
# balance the number of valid tokens on each dp rank.
# NOTE: this breaks the order of data inside the batch.
# Please take care when you implement group based adv computation such as GRPO and rloo
self._balance_batch(batch, metrics=metrics)
# compute global valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# compute reward
if "token_level_scores" not in batch.batch:
with timer("reward", timing_raw):
reward_ref = self.reward_fn.compute_reward.remote(batch)
# recompute old_log_probs
with timer("old", timing_raw):
old_log_probs = self.actor_rollout_ref_wg.compute_log_probs(batch)
batch = batch.union(old_log_probs)
# compute ref_log_probs
if self.use_reference_policy:
with timer("ref", timing_raw):
ref_log_probs = self.actor_rollout_ref_wg.compute_ref_log_probs(batch)
batch = batch.union(ref_log_probs)
# compute values
if self.use_critic:
with timer("values", timing_raw):
values = self.critic_wg.compute_values(batch)
batch = batch.union(values)
with timer("adv", timing_raw):
if "token_level_scores" not in batch.batch:
# get token level scores asynchronously
reward_tensor, reward_metrics = ray.get(reward_ref)
batch.batch["token_level_scores"] = reward_tensor
reward_metrics = {f"reward/{k}": v for k, v in reduce_metrics(reward_metrics).items()}
metrics.update(reward_metrics)
# apply kl penalty if available
if not self.config.algorithm.use_kl_loss and self.use_reference_policy:
# apply kl penalty to reward
batch, kl_metrics = apply_kl_penalty(batch, self.kl_ctrl, self.config.algorithm.kl_penalty)
metrics.update(kl_metrics)
else:
batch.batch["token_level_rewards"] = batch.batch["token_level_scores"]
# compute advantages, executed on the driver process
batch = compute_advantage(
batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
)
# update critic
if self.use_critic:
with timer("update_critic", timing_raw):
critic_output = self.critic_wg.update_critic(batch)
critic_metrics = reduce_metrics(critic_output.non_tensor_batch)
metrics.update(critic_metrics)
# update actor
if self.config.trainer.critic_warmup <= self.global_step:
with timer("update_actor", timing_raw):
actor_output = self.actor_rollout_ref_wg.update_actor(batch)
actor_metrics = reduce_metrics(actor_output.non_tensor_batch)
metrics.update(actor_metrics)
# validate
if (
self.val_reward_fn is not None
and self.config.trainer.val_freq > 0
and self.global_step % self.config.trainer.val_freq == 0
):
with timer("validation", timing_raw):
val_metrics = self._validate()
metrics.update(val_metrics)
if self.config.trainer.save_freq > 0 and self.global_step % self.config.trainer.save_freq == 0:
with timer("save_checkpoint", timing_raw):
self._save_checkpoint()
# collect metrics
num_gpus = self.resource_pool_manager.get_num_gpus()
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, num_gpus=num_gpus))
self.logger.log(data=metrics, step=self.global_step)
main_tqdm.update()
# perform validation after training
if self.val_reward_fn is not None:
if (
val_metrics is None
or self.config.trainer.val_freq <= 0
or self.global_step % self.config.trainer.val_freq != 0
):
val_metrics = self._validate()
self.logger.log(data=val_metrics, step=self.global_step)
print(f"Final validation metrics: {convert_dict_to_str(val_metrics)}")
if self.config.trainer.save_freq <= 0 or self.global_step % self.config.trainer.save_freq != 0:
self._save_checkpoint()