forked from verl-project/verl
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathe2e_ppo_grpo_trainer_trtllm.yml
More file actions
285 lines (270 loc) · 11.9 KB
/
e2e_ppo_grpo_trainer_trtllm.yml
File metadata and controls
285 lines (270 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
# # Tests layout
# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance:
# - `tests/trainer` for testing functionality related to `verl/trainer`
# - `tests/models` for testing functionality related to `verl/models`
# - ...
# There are a few folders with `special_` prefix, created for special purposes:
# - `special_distributed`: unit tests that must run with multiple GPUs
# - `special_e2e`: end-to-end tests with training/generation scripts
# - `special_npu`: tests for NPUs
# - `special_sanity`: a suite of quick sanity tests
# - `special_standalone`: a set of test that are designed to run in dedicated environments
# Accelerators for tests
# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`.
# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment.
# # Workflow layout
# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs:
# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml`
# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml`
# 3. End-to-end tests: `e2e_*.yml`
# 4. Unit tests
# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py`
# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix.
# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when
# - new workflow yaml is added to `.github/workflows`
# - new tests are added to workflow mentioned in 2.
name: e2e_ppo_trainer_megatron_trtllm
on:
# Trigger the workflow on push or pull request,
# but only for the main branch.
# For push, for now only anti-patterns are specified so it is more conservative
# and achieves higher coverage.
push:
branches:
- main
- v0.*
paths:
- "**/*.py"
# Other entrypoints
- "!verl/trainer/fsdp_sft_trainer.py"
# Recipes
- "!recipe/**"
# FSDP
- "!verl/workers/**/*dp_*.py"
pull_request:
branches:
- main
- v0.*
paths:
- "**/*.py"
# Other entrypoints
- "!docker/**"
# Docs
- "!**/*.md"
- "!docs/**"
- "!examples/**"
- "!tests/**"
- "!verl/trainer/main_*.py"
- "!verl/trainer/fsdp_sft_trainer.py"
# Recipes
- "!recipe/**"
# FSDP
- "!verl/workers/**/*dp_*.py"
# Entrypoints
- "verl/workers/rollout/trtllm_rollout/**"
- "tests/workers/rollout/rollout_trtllm/**"
- ".github/workflows/e2e_ppo_grpo_trainer_trtllm.yml"
- "examples/data_preprocess/gsm8k.py"
- "examples/data_preprocess/geo3k.py"
- "examples/data_preprocess/dapo_multiturn_w_tool.py"
- "examples/data_preprocess/aime2024_multiturn_w_tool.py"
- "examples/grpo_trainer/run_qwen2-7b_math_trtllm.sh"
- "examples/grpo_trainer/run_qwen2-7b_math_megatron_trtllm.sh"
- "examples/grpo_trainer/run_qwen3-30b_dapo_megatron_fp8_trtllm.sh"
# add back when ppo flow is ready
# - "tests/special_e2e/run_ppo_trainer_megatron.sh"
# - "verl/trainer/main_ppo.py"
# - "verl/trainer/config/ppo_megatron_trainer.yaml"
# Cancel jobs on the same ref if a new one is triggered
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
# Declare permissions just read content.
permissions:
contents: read
env:
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:trtllm1.3.0rc4"
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner"
jobs:
setup:
if: github.repository_owner == 'verl-project'
runs-on: ubuntu-latest
outputs:
runner-label: ${{ steps.create-runner.outputs.runner-label }}
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }}
steps:
- uses: actions/checkout@v4
- id: create-runner
uses: volcengine/vemlp-github-runner@v1
with:
mode: "create"
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
mlp-image: "${{ env.IMAGE }}"
trtllm_unit_tests:
needs: setup
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"]
timeout-minutes: 30 # Increase this timeout value as needed
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
HF_ENDPOINT: "https://hf-mirror.com"
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install pytest-asyncio
pip3 install -r requirements-test.txt
pip3 install --no-deps -e .
- name: Run TRTLLM unit tests
run: |
export TRTLLM_TEST_MODEL_PATH_ROOT="${HOME}/models"
ray stop --force
pytest -v -s \
tests/workers/rollout/rollout_trtllm/test_adapter.py \
tests/workers/rollout/rollout_trtllm/test_async_server.py \
tests/workers/rollout/rollout_trtllm/test_trtllm_rollout_utils.py
e2e_grpo_trainer_fsdp-qwen2:
needs: setup
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"]
timeout-minutes: 30 # Increase this timeout value as needed
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
HF_ENDPOINT: "https://hf-mirror.com"
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install -r requirements-test.txt
pip3 install --no-deps -e .
- name: Prepare GSM8K dataset
run: |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k --local_save_dir ${PWD}/data/gsm8k
- name: Running GSM8K E2E training tests with FSDP on 8 L20 GPUs (Qwen)
run: |
ray stop --force
DATADIR=${HOME}/data \
bash examples/grpo_trainer/run_qwen2-7b_math_trtllm.sh 2 \
trainer.total_training_steps=1 \
data.train_files="['${PWD}/data/gsm8k/train.parquet']" \
data.val_files="['${PWD}/data/gsm8k/test.parquet']" \
trainer.logger='["console"]' \
actor_rollout_ref.model.path="${HOME}/models/Qwen/Qwen2.5-0.5B-Instruct"
- name: clean up
run: |
rm -rf checkpoints
e2e_grpo_trainer_megatron-qwen2:
needs: setup
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"]
timeout-minutes: 30 # Increase this timeout value as needed
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
HF_ENDPOINT: "https://hf-mirror.com"
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install -r requirements-test.txt
pip3 install --no-deps -e .
- name: Prepare GSM8K dataset
run: |
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k --local_save_dir ${PWD}/data/gsm8k
- name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen)
run: |
ray stop --force
DATADIR=${HOME}/data \
ACTOR_TP=2 \
bash examples/grpo_trainer/run_qwen2-7b_math_megatron_trtllm.sh 2 \
trainer.total_training_steps=1 \
data.train_files="['${PWD}/data/gsm8k/train.parquet']" \
data.val_files="['${PWD}/data/gsm8k/test.parquet']" \
trainer.logger='["console"]' \
actor_rollout_ref.model.path="${HOME}/models/Qwen/Qwen2.5-0.5B-Instruct"
- name: clean up
run: |
rm -rf checkpoints
e2e_grpo_trainer_fsdp-vlm:
needs: setup
runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"]
timeout-minutes: 30 # Increase this timeout value as needed
env:
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
HF_ENDPOINT: "https://hf-mirror.com"
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- name: Install the current repository
run: |
pip3 install -r requirements-test.txt
pip3 install --no-deps -e .
- name: Prepare GEO3K dataset
run: |
python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k --local_save_dir ${PWD}/data/geo3k
- name: Running GEO3K E2E training tests with FSDP on 8 L20 GPUs (VLM)
run: |
ray stop --force
DATADIR=${HOME}/data \
bash examples/grpo_trainer/run_qwen2_5_vl_3b_trtllm.sh 2 \
trainer.total_training_steps=1 \
data.train_files="['${PWD}/data/geo3k/train.parquet']" \
data.val_files="['${PWD}/data/geo3k/test.parquet']" \
trainer.logger='["console"]' \
actor_rollout_ref.model.path="${HOME}/models/Qwen/Qwen3-VL-2B-Instruct"
- name: clean up
run: |
rm -rf checkpoints
- name: Prepare DAPO-Math-17k and AIME-2024 datasets (data_preprocess)
run: |
python3 examples/data_preprocess/dapo_multiturn_w_tool.py --local_save_dir ${PWD}/data/dapo-math-17k
python3 examples/data_preprocess/aime2024_multiturn_w_tool.py --local_save_dir ${PWD}/data/aime-2024
- name: Running DAPO E2E with FP8 TRT-LLM rollout (Qwen3-0.6B)
run: |
ray stop --force
export INFER_TP=2 ACTOR_TP=2 ACTOR_PP=2 ACTOR_VPP=2 ACTOR_EP=1 ACTOR_CP=2 REF_TP=2 REF_PP=2 REF_VPP=2 REF_EP=1 REF_CP=2 GEN_MOE_TP=null GEN_MOE_EP=null
export NNODES=1 GPUS_PER_NODE=8 TRTLLM_MOE_BACKEND=CUTLASS
export DATA_DIR=${PWD} DAPO_MATH_TRAIN=${PWD}/data/dapo-math-17k/train.parquet AIME_VAL=${PWD}/data/aime-2024/train.parquet MODEL_PATH=${HOME}/models/Qwen/Qwen3-0.6B
bash examples/grpo_trainer/run_qwen3-30b_dapo_megatron_fp8_trtllm.sh \
reward_model.reward_kwargs.overlong_buffer_cfg.len=258 \
reward_model.reward_kwargs.max_resp_len=512 \
data.max_prompt_length=512 \
data.max_response_length=512 \
data.train_batch_size=32 \
actor_rollout_ref.rollout.n=4 \
actor_rollout_ref.rollout.max_num_seqs=16 \
actor_rollout_ref.rollout.max_num_batched_tokens=1024 \
actor_rollout_ref.rollout.max_model_len=1024 \
actor_rollout_ref.actor.megatron.override_transformer_config.moe_grouped_gemm=False \
actor_rollout_ref.actor.megatron.override_transformer_config.moe_permute_fusion=False \
trainer.total_training_steps=1 \
trainer.logger='["console"]'
- name: clean up
run: |
rm -rf checkpoints
cleanup:
runs-on: ubuntu-latest
needs: [setup, trtllm_unit_tests, e2e_grpo_trainer_fsdp-qwen2, e2e_grpo_trainer_megatron-qwen2, e2e_grpo_trainer_fsdp-vlm]
if: always()
steps:
- id: destroy-runner
uses: volcengine/vemlp-github-runner@v1
with:
mode: "destroy"
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}"