Skip to content

Commit b890bf5

Browse files
committed
support discrete profiling for mindspeed
1 parent 17c61be commit b890bf5

File tree

4 files changed

+15
-15
lines changed

4 files changed

+15
-15
lines changed

docs/ascend_tutorial/ascend_profiling_en.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
Data collection based on FSDP backend on Ascend devices(en)
1+
Data collection based on FSDP or MindSpeed(Megatron) on Ascend devices(en)
22
==========================================================================================
33

44
Last updated: 08/14/2025.
55

66
This is a tutorial for data collection using the GRPO or DAPO algorithm
7-
based on FSDP on Ascend devices.
7+
based on FSDP or MindSpeed(Megatron) on Ascend devices.
88

99
Configuration
1010
-------------

docs/ascend_tutorial/ascend_profiling_zh.rst

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
Data collection based on FSDP backend on Ascend devices(zh)
1+
Data collection based on FSDP or MindSpeed(Megatron) on Ascend devices(zh)
22
====================================
33

4-
在昇腾设备上基于FSDP后端进行数据采集
4+
在昇腾设备上基于FSDP或MindSpeed(Megatron)后端进行数据采集
55

66
Last updated: 08/14/2025.
77

8-
这是一份在昇腾设备上基于FSDP后端使用GRPO或DAPO算法进行数据采集的教程
8+
这是一份在昇腾设备上基于FSDP或MindSpeed(Megatron)后端,使用GRPO或DAPO算法进行数据采集的教程
99

1010
配置
1111
----

verl/workers/fsdp_workers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1483,7 +1483,7 @@ def init_model(self):
14831483
)
14841484

14851485
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
1486-
@DistProfiler.annotate(color="cyan")
1486+
@DistProfiler.annotate(color="cyan", role="compute_values")
14871487
def compute_values(self, data: DataProto):
14881488
if self._is_offload_param:
14891489
load_fsdp_model_to_gpu(self.critic_module)
@@ -1503,7 +1503,7 @@ def compute_values(self, data: DataProto):
15031503
return output
15041504

15051505
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
1506-
@DistProfiler.annotate(color="pink")
1506+
@DistProfiler.annotate(color="pink", role="update_critic")
15071507
def update_critic(self, data: DataProto):
15081508
if self._is_offload_param:
15091509
load_fsdp_model_to_gpu(self.critic_module)
@@ -1867,7 +1867,7 @@ def _switch_chat_template(self, data: DataProto):
18671867
return DataProto.from_dict(rm_inputs)
18681868

18691869
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward"))
1870-
@DistProfiler.annotate(color="brown")
1870+
@DistProfiler.annotate(color="brown", role="compute_rm_score")
18711871
def compute_rm_score(self, data: DataProto):
18721872
import itertools
18731873

verl/workers/megatron_workers.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -631,7 +631,7 @@ async def trainer_mode(self):
631631

632632
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
633633
@GPUMemoryLogger(role="update_actor", logger=logger)
634-
@DistProfiler.annotate(color="red")
634+
@DistProfiler.annotate(color="red", role="actor_update")
635635
def update_actor(self, data: DataProto):
636636
assert self._is_actor
637637
if self._is_offload_param:
@@ -674,7 +674,7 @@ def update_actor(self, data: DataProto):
674674

675675
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="rollout"))
676676
@GPUMemoryLogger(role="generate_sequences", logger=logger)
677-
@DistProfiler.annotate(color="red")
677+
@DistProfiler.annotate(color="red", role="rollout_generate")
678678
def generate_sequences(self, prompts: DataProto):
679679
assert self._is_rollout
680680
prompts = prompts.to(get_device_name())
@@ -724,7 +724,7 @@ def generate_sequences(self, prompts: DataProto):
724724

725725
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
726726
@GPUMemoryLogger(role="compute_ref_log_prob", logger=logger)
727-
@DistProfiler.annotate(color="olive")
727+
@DistProfiler.annotate(color="olive", role="ref_compute_log_prob")
728728
def compute_ref_log_prob(self, data: DataProto):
729729
assert self._is_ref
730730
if self._ref_is_offload_param:
@@ -746,7 +746,7 @@ def compute_ref_log_prob(self, data: DataProto):
746746

747747
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
748748
@GPUMemoryLogger(role="compute_log_prob", logger=logger)
749-
@DistProfiler.annotate(color="blue")
749+
@DistProfiler.annotate(color="blue", role="actor_compute_log_prob")
750750
def compute_log_prob(self, data: DataProto):
751751
assert self._is_actor
752752
if self._is_offload_param:
@@ -1079,7 +1079,7 @@ def init_model(self):
10791079
)
10801080

10811081
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
1082-
@DistProfiler.annotate(color="cyan")
1082+
@DistProfiler.annotate(color="cyan", role="compute_values")
10831083
def compute_values(self, data: DataProto):
10841084
micro_batch_size = self.config.ppo_micro_batch_size_per_gpu
10851085
data.meta_info["micro_batch_size"] = micro_batch_size
@@ -1096,7 +1096,7 @@ def compute_values(self, data: DataProto):
10961096
return output
10971097

10981098
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="critic"))
1099-
@DistProfiler.annotate(color="pink")
1099+
@DistProfiler.annotate(color="pink", role="update_critic")
11001100
def update_critic(self, data: DataProto):
11011101
data = data.to(get_device_id())
11021102

@@ -1313,7 +1313,7 @@ def init_model(self):
13131313
# TODO: reward model use itself tokenizer instead of sft tokenizer
13141314
# the input_ids, responses, attention_mask and position_ids may be different!
13151315
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="reward"))
1316-
@DistProfiler.annotate(color="brown")
1316+
@DistProfiler.annotate(color="brown", role="compute_rm_score")
13171317
def compute_rm_score(self, data: DataProto):
13181318
data.meta_info["micro_batch_size"] = self.config.micro_batch_size_per_gpu
13191319
data.meta_info["max_token_len"] = self.config.forward_max_token_len_per_gpu

0 commit comments

Comments
 (0)