Skip to content

Commit 1c7ba89

Browse files
Merge branch 'volcengine:main' into main
2 parents 77fa0bc + 7df2afb commit 1c7ba89

File tree

167 files changed

+13342
-1932
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

167 files changed

+13342
-1932
lines changed
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
# # Tests layout
2+
3+
# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance:
4+
# - `tests/trainer` for testing functionality related to `verl/trainer`
5+
# - `tests/models` for testing functionality related to `verl/models`
6+
# - ...
7+
8+
# There are a few folders with `special_` prefix, created for special purposes:
9+
# - `special_distributed`: unit tests that must run with multiple GPUs
10+
# - `special_e2e`: end-to-end tests with training/generation scripts
11+
# - `special_npu`: tests for NPUs
12+
# - `special_sanity`: a suite of quick sanity tests
13+
# - `special_standalone`: a set of test that are designed to run in dedicated environments
14+
15+
# Accelerators for tests
16+
# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`.
17+
# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment.
18+
19+
# # Workflow layout
20+
21+
# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs:
22+
# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml`
23+
# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml`
24+
# 3. End-to-end tests: `e2e_*.yml`
25+
# 4. Unit tests
26+
# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py`
27+
# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix.
28+
# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when
29+
# - new workflow yaml is added to `.github/workflows`
30+
# - new tests are added to workflow mentioned in 2.
31+
32+
33+
name: e2e_fully_async_policy
34+
35+
on:
36+
# Trigger the workflow on push or pull request,
37+
# but only for the main branch
38+
# For push, for now only anti-patterns are specified so it is more conservative
39+
# and achieves higher coverage.
40+
push:
41+
branches:
42+
- main
43+
- v0.*
44+
paths:
45+
- "**/*.py"
46+
- "!**/*.md"
47+
- "!**/*.sh"
48+
# Other entrypoints
49+
- "!examples/*trainer*"
50+
- "!tests/**"
51+
- "!verl/trainer/main_*.py"
52+
- "!verl/trainer/fsdp_sft_trainer.py"
53+
- "!recipe/**"
54+
- "recipe/fully_async_policy"
55+
pull_request:
56+
branches:
57+
- main
58+
- v0.*
59+
paths:
60+
- "**/*.py"
61+
- "!**/*.md"
62+
- "!**/*.sh"
63+
# Other entrypoints
64+
- "!examples/**"
65+
- "!tests/**"
66+
- "!verl/trainer/main_*.py"
67+
- "!verl/trainer/fsdp_sft_trainer.py"
68+
# Other recipes
69+
- "!recipe/**"
70+
# Home
71+
- "recipe/fully_async_policy"
72+
# Entrypoints
73+
- ".github/workflows/e2e_fully_async_policy.yml"
74+
- "examples/data_preprocess/gsm8k.py"
75+
- "tests/special_e2e/run_fully_async_policy.sh"
76+
77+
# Cancel jobs on the same ref if a new one is triggered
78+
concurrency:
79+
group: ${{ github.workflow }}-${{ github.ref }}
80+
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
81+
82+
# Declare permissions just read content.
83+
permissions:
84+
contents: read
85+
86+
env:
87+
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.5-transformers4.55.4-vllm0.10.0-mcore0.13.0-te2.2"
88+
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner"
89+
TRANSFORMERS_VERSION: "4.56.2"
90+
91+
jobs:
92+
setup:
93+
if: github.repository_owner == 'volcengine'
94+
runs-on: ubuntu-latest
95+
outputs:
96+
runner-label: ${{ steps.create-runner.outputs.runner-label }}
97+
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }}
98+
steps:
99+
- uses: actions/checkout@v4
100+
- id: create-runner
101+
uses: volcengine/vemlp-github-runner@v1
102+
with:
103+
mode: "create"
104+
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
105+
mlp-image: "${{ env.IMAGE }}"
106+
107+
# Test FSDP2 strategy
108+
e2e_fully_async_policy_fsdp2:
109+
needs: setup
110+
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ]
111+
timeout-minutes: 10 # Increase timeout for async training
112+
env:
113+
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
114+
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
115+
NO_PROXY: "localhost,127.0.0.1,hf-mirror.com"
116+
HF_ENDPOINT: "https://hf-mirror.com"
117+
HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable
118+
ACTOR_STRATEGY: "fsdp2"
119+
steps:
120+
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
121+
with:
122+
fetch-depth: 0
123+
- name: Install the current repository
124+
run: |
125+
pip3 install --no-deps -e .[test,gpu]
126+
pip3 install transformers==$TRANSFORMERS_VERSION
127+
- name: Prepare GSM8K dataset
128+
run: |
129+
python3 examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/models/hf_data/gsm8k
130+
- name: Running the E2E test with fully_async_policy algorithm (FSDP2)
131+
run: |
132+
ray stop --force
133+
bash tests/special_e2e/run_fully_async_policy.sh
134+
135+
cleanup:
136+
runs-on: ubuntu-latest
137+
needs:
138+
[
139+
setup,
140+
e2e_fully_async_policy_fsdp2
141+
]
142+
if: always()
143+
steps:
144+
- id: destroy-runner
145+
uses: volcengine/vemlp-github-runner@v1
146+
with:
147+
mode: "destroy"
148+
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
149+
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}"

.github/workflows/gpu_unit_tests.yml

Lines changed: 43 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -70,19 +70,36 @@ concurrency:
7070
permissions:
7171
contents: read
7272

73+
env:
74+
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2"
75+
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner"
76+
7377
jobs:
78+
setup:
79+
if: github.repository_owner == 'volcengine'
80+
runs-on: ubuntu-latest
81+
outputs:
82+
runner-label: ${{ steps.create-runner.outputs.runner-label }}
83+
mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }}
84+
steps:
85+
- uses: actions/checkout@v4
86+
- id: create-runner
87+
uses: volcengine/vemlp-github-runner@v1
88+
with:
89+
mode: "create"
90+
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
91+
mlp-image: "${{ env.IMAGE }}"
92+
7493
gpu_unit_tests:
7594
if: github.repository_owner == 'volcengine'
76-
runs-on: [L20x8]
95+
needs: setup
96+
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ]
7797
timeout-minutes: 60 # Increase this timeout value as needed
7898
env:
7999
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
80100
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
81101
NO_PROXY: "localhost,127.0.0.1"
82102
HF_HUB_ENABLE_HF_TRANSFER: 1
83-
container:
84-
image: verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2
85-
options: --gpus all --shm-size=10g
86103
steps:
87104
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
88105
with:
@@ -93,11 +110,12 @@ jobs:
93110
pip3 install --no-deps -e .[test]
94111
pip3 install --upgrade "ray>=2.40.0"
95112
pip3 install cupy-cuda12x
96-
- name: Download Model to Use
97-
run: |
98-
huggingface-cli download Qwen/Qwen2.5-0.5B-Instruct
99-
huggingface-cli download Qwen/Qwen2.5-1.5B-Instruct
100-
export HF_HUB_OFFLINE=1
113+
pip3 install mlflow
114+
# - name: Download Model to Use
115+
# run: |
116+
# huggingface-cli download Qwen/Qwen2.5-0.5B-Instruct
117+
# huggingface-cli download Qwen/Qwen2.5-1.5B-Instruct
118+
# export HF_HUB_OFFLINE=1
101119
# Disable requests to avoid network errors
102120
- name: Run all GPU unit tests
103121
run: |
@@ -111,3 +129,19 @@ jobs:
111129
- name: Testing FSDP2 critic functionality
112130
run: |
113131
torchrun --standalone --nnodes=1 --nproc-per-node=2 tests/workers/critic/test_special_dp_critic.py
132+
133+
cleanup:
134+
runs-on: ubuntu-latest
135+
needs:
136+
[
137+
setup,
138+
gpu_unit_tests,
139+
]
140+
if: always()
141+
steps:
142+
- id: destroy-runner
143+
uses: volcengine/vemlp-github-runner@v1
144+
with:
145+
mode: "destroy"
146+
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
147+
mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}"
Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
# - new tests are added to workflow mentioned in 2.
3131
# name: Check PR Title
3232

33-
name: reward_model
33+
name: reward_model_sglang
3434

3535
on:
3636
# Trigger the workflow on push or pull request,
@@ -46,24 +46,21 @@ on:
4646
paths:
4747
- "verl/**/*.py"
4848
# Entrypoints
49-
- ".github/workflows/reward_model.yml"
50-
- "tests/workers/reward_model/**"
51-
52-
# Declare permissions just read content.
53-
permissions:
54-
contents: read
49+
- ".github/workflows/reward_model_sglang.yml"
50+
- "tests/experimental/reward/**"
5551

5652
# Cancel jobs on the same ref if a new one is triggered
5753
concurrency:
5854
group: ${{ github.workflow }}-${{ github.ref }}
5955
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
6056

57+
# Declare permissions just read content.
58+
permissions:
59+
contents: read
6160

6261
env:
63-
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.5-transformers4.55.4-sglang0.4.10.post2-mcore0.13.0-te2.2"
62+
IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:app-verl0.6-transformers4.56.1-sglang0.5.2-mcore0.13.0-te2.2"
6463
DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner"
65-
TRANSFORMERS_VERSION: "4.56.2"
66-
6764

6865
jobs:
6966
setup:
@@ -81,10 +78,10 @@ jobs:
8178
faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}"
8279
mlp-image: "${{ env.IMAGE }}"
8380

84-
reward_model:
81+
reward_model_sglang:
8582
needs: setup
8683
runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ]
87-
timeout-minutes: 20 # Increase this timeout value as needed
84+
timeout-minutes: 30 # Increase this timeout value as needed
8885
env:
8986
HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
9087
HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
@@ -101,25 +98,26 @@ jobs:
10198
- name: Install the current repository
10299
run: |
103100
pip3 install -e .[test]
104-
# - name: Download model config files
105-
# run: |
106-
# hf download Skywork/Skywork-Reward-V2-Llama-3.2-1B --local-dir $HOME/models/Skywork/Skywork-Reward-V2-Llama-3.2-1B
107-
# hf download verl-team/GenRM-CI-Test-1.5B --local-dir $HOME/models/verl-team/GenRM-CI-Test-1.5B
108-
- name: Running discriminative reward model tests on 8 L20 GPUs
101+
pip3 install sglang-router==0.1.8
102+
- name: Prepare gsm8k dataset
103+
run: |
104+
ray stop --force
105+
python3 examples/data_preprocess/gsm8k.py --local_dir ${HOME}/data/gsm8k
106+
- name: Running sglang reward model tests on 8 L20 GPUs
109107
run: |
110108
unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY
111-
pytest -s -x tests/workers/reward_model/test_discriminative_reward_model.py
112-
- name: Running generative reward model tests on 8 L20 GPUs
109+
ROLLOUT_NAME=sglang pytest -s -x tests/experimental/reward/test_reward_model.py
110+
- name: Running sglang agent loop with reward manager tests on 8 L20 GPUs
113111
run: |
114112
unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY
115-
pytest -s -x tests/workers/reward_model/test_generative_reward_model.py
113+
ROLLOUT_NAME=sglang pytest -s -x tests/experimental/reward/test_agent_loop_reward_manager.py
116114
117115
cleanup:
118116
runs-on: ubuntu-latest
119117
needs:
120118
[
121119
setup,
122-
reward_model
120+
reward_model_sglang
123121
]
124122
if: always()
125123
steps:

0 commit comments

Comments
 (0)