[megatron] feat: use yaml to manage mbridge args #9540
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # # Tests layout | |
| # Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: | |
| # - `tests/trainer` for testing functionality related to `verl/trainer` | |
| # - `tests/models` for testing functionality related to `verl/models` | |
| # - ... | |
| # There are a few folders with `special_` prefix, created for special purposes: | |
| # - `special_distributed`: unit tests that must run with multiple GPUs | |
| # - `special_e2e`: end-to-end tests with training/generation scripts | |
| # - `special_npu`: tests for NPUs | |
| # - `special_sanity`: a suite of quick sanity tests | |
| # - `special_standalone`: a set of test that are designed to run in dedicated environments | |
| # Accelerators for tests | |
| # - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. | |
| # - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. | |
| # # Workflow layout | |
| # All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: | |
| # 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` | |
| # 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` | |
| # 3. End-to-end tests: `e2e_*.yml` | |
| # 4. Unit tests | |
| # - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` | |
| # - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. | |
| # - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when | |
| # - new workflow yaml is added to `.github/workflows` | |
| # - new tests are added to workflow mentioned in 2. | |
| name: e2e_ascend | |
| on: | |
| # Trigger the workflow on push or pull request, | |
| # but only for the main branch | |
| push: | |
| branches: | |
| - main | |
| - v0.* | |
| pull_request: | |
| branches: | |
| - main | |
| paths: | |
| - ".github/workflows/e2e_ascend.yml" | |
| - "examples/data_preprocess/**" | |
| - "examples/grpo_trainer/**" | |
| - "examples/ppo_trainer/**" | |
| - "examples/sft/**" | |
| - "recipe/dapo/**" | |
| - "tests/special_npu/**" | |
| - "tests/special_sanity/check_device_api_usage.py" | |
| - "verl/**" | |
| - "pyproject.toml" | |
| - "requirements-npu.txt" | |
| - "setup.py" | |
| # Cancel jobs on the same ref if a new one is triggered | |
| concurrency: | |
| group: ${{ github.workflow }}-${{ github.ref }} | |
| cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} | |
| permissions: | |
| contents: read | |
| jobs: | |
| non_rl_job: | |
| if: github.repository_owner == 'volcengine' | |
| name: E2E Ascend testing for non-RL algorithm scenarios | |
| runs-on: linux-aarch64-a2-2 | |
| timeout-minutes: 60 | |
| container: | |
| image: swr.ap-southeast-1.myhuaweicloud.com/base_image/ascend-ci/verl/verl:verl-8.3.rc1-910b-ubuntu22.04-py3.11-latest | |
| options: >- | |
| --shm-size 16g | |
| env: | |
| HF_ENDPOINT: "https://hf-mirror.com" | |
| HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
| steps: | |
| - name: Config third-party dependency download cache | |
| run: | | |
| sed -Ei 's@(ports|archive)[email protected]:8081@g' /etc/apt/sources.list | |
| pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple | |
| pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local | |
| - name: Check npu and CANN info | |
| run: | | |
| cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info | |
| npu-smi info | |
| - name: Check initial pip list from image | |
| run: | | |
| pip list | |
| - name: Checkout volcengine/verl repo | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 0 | |
| clean: true | |
| - name: Install the current repository | |
| run: | | |
| pip install -r requirements-npu.txt | |
| pip install -e . | |
| - name: Check final pip list | |
| run: | | |
| pip list | |
| - name: Preprocess gsm8k dataset | |
| run: | | |
| python examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/.cache/datasets/openai/gsm8k | |
| - name: Running gsm8k e2e training tests with peft sft on ASCEND NPU | |
| run: | | |
| ray stop --force | |
| bash tests/special_npu/run_qwen2_5_05b_sft_peft_sp2.sh | |
| rm -rf $HOME/ckpts | |
| - name: Running NPU profiling unit tests | |
| run: | | |
| ray stop --force | |
| pytest -s -x tests/utils/test_special_mstx_profile.py | |
| llm_rl_job: | |
| if: github.repository_owner == 'volcengine' | |
| name: E2E Ascend testing for RL training scenarios of LLM models | |
| runs-on: linux-aarch64-a2-8 | |
| timeout-minutes: 60 | |
| container: | |
| image: swr.ap-southeast-1.myhuaweicloud.com/base_image/ascend-ci/verl/verl:verl-8.3.rc1-910b-ubuntu22.04-py3.11-latest | |
| options: >- | |
| --shm-size 16g | |
| env: | |
| HF_ENDPOINT: "https://hf-mirror.com" | |
| HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
| steps: | |
| - name: Config third-party dependency download cache | |
| run: | | |
| sed -Ei 's@(ports|archive)[email protected]:8081@g' /etc/apt/sources.list | |
| pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple | |
| pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local | |
| - name: Check npu and CANN info | |
| run: | | |
| cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info | |
| npu-smi info | |
| - name: Check initial pip list from image | |
| run: | | |
| pip list | |
| - name: Checkout volcengine/verl repo | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 0 | |
| clean: true | |
| - name: Install the current repository | |
| run: | | |
| pip install -r requirements-npu.txt | |
| pip install -e . | |
| - name: Check final pip list | |
| run: | | |
| pip list | |
| - name: Preprocess gsm8k dataset | |
| run: | | |
| python examples/data_preprocess/gsm8k.py --local_dataset_path ${HOME}/.cache/datasets/openai/gsm8k | |
| - name: Running gsm8k e2e training tests with PPO on ASCEND NPU (FSDP backend) | |
| run: | | |
| ray stop --force | |
| bash tests/special_npu/run_qwen3_06b_ppo.sh | |
| rm -rf $HOME/ckpts | |
| - name: Running gsm8k e2e training tests with GRPO on ASCEND NPU (FSDP backend) | |
| run: | | |
| ray stop --force | |
| bash tests/special_npu/run_qwen2_5_05b_grpo.sh | |
| rm -rf $HOME/ckpts | |
| - name: Running gsm8k e2e training tests with DAPO on ASCEND NPU (FSDP backend) | |
| run: | | |
| ray stop --force | |
| bash tests/special_npu/run_qwen2_5_05b_dapo.sh | |
| rm -rf $HOME/ckpts | |
| - name: Running gsm8k e2e training tests with GRPO on ASCEND NPU (MindSpeed backend) | |
| run: | | |
| ray stop --force | |
| export PYTHONPATH=$PYTHONPATH:/Megatron-LM | |
| USE_DIST_CKPT=True bash tests/special_npu/run_qwen2_5_05b_grpo_mindspeed.sh | |
| rm -rf $HOME/dist_ckpt/qwen2_5_05b_grpo_mindspeed | |
| rm -rf $HOME/ckpts | |
| - name: Running gsm8k e2e training tests with DAPO on ASCEND NPU (MindSpeed backend, MoE Model) | |
| run: | | |
| ray stop --force | |
| export PYTHONPATH=$PYTHONPATH:/Megatron-LM | |
| USE_DIST_CKPT=True USE_DUMMY_MODEL=True DUMMY_MODEL_CONFIG_PATH=tests/special_e2e/ppo_trainer/expert_parallel/qwen3moe_minimal.json DUMMY_MODEL_PATH=$HOME/dist_ckpt/qwen3_30b_dapo_mindspeed bash tests/special_npu/run_qwen3_30b_dapo_mindspeed.sh | |
| vlm_rl_job: | |
| if: github.repository_owner == 'volcengine' | |
| name: E2E Ascend testing for RL training scenarios of VLM models | |
| runs-on: linux-aarch64-a2-8 | |
| timeout-minutes: 60 | |
| container: | |
| image: swr.ap-southeast-1.myhuaweicloud.com/base_image/ascend-ci/verl/verl:verl-8.3.rc1-910b-ubuntu22.04-py3.11-latest | |
| options: >- | |
| --shm-size 16g | |
| env: | |
| HF_ENDPOINT: "https://hf-mirror.com" | |
| HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable | |
| steps: | |
| - name: Config third-party dependency download cache | |
| run: | | |
| sed -Ei 's@(ports|archive)[email protected]:8081@g' /etc/apt/sources.list | |
| pip config set global.index-url http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple | |
| pip config set global.trusted-host cache-service.nginx-pypi-cache.svc.cluster.local | |
| - name: Check npu and CANN info | |
| run: | | |
| cat /usr/local/Ascend/ascend-toolkit/latest/"$(uname -i)"-linux/ascend_toolkit_install.info | |
| npu-smi info | |
| - name: Check initial pip list from image | |
| run: | | |
| pip list | |
| - name: Checkout volcengine/verl repo | |
| uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 0 | |
| clean: true | |
| - name: Install the current repository | |
| run: | | |
| pip install -r requirements-npu.txt | |
| pip install -e . | |
| - name: Check final pip list | |
| run: | | |
| pip list | |
| - name: Preprocess geo3k dataset | |
| run: | | |
| python examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/.cache/datasets/hiyouga/geometry3k | |
| - name: Running geo3k e2e training tests with GRPO on ASCEND NPU | |
| run: | | |
| ray stop --force | |
| bash tests/special_npu/run_qwen2_5_vl_3b_npu.sh | |
| rm -rf $HOME/ckpts |