TorchFT 8 GPU Integration Test #732
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: TorchFT 8 GPU Integration Test | |
| on: | |
| push: | |
| branches: [ main ] | |
| tags: | |
| - ciflow/8gpu/* | |
| paths: | |
| - 'torchtitan/components/ft.py' | |
| - '.github/workflows/integration_test_8gpu_torchft.yaml' | |
| pull_request: | |
| paths: | |
| - 'torchtitan/components/ft.py' | |
| - '.github/workflows/integration_test_8gpu_torchft.yaml' | |
| schedule: | |
| # Runs every 6 hours | |
| - cron: '0 */6 * * *' | |
| concurrency: | |
| group: unit-test${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_number || github.ref }} | |
| cancel-in-progress: true | |
| defaults: | |
| run: | |
| shell: bash -l -eo pipefail {0} | |
| permissions: | |
| id-token: write | |
| contents: read | |
| jobs: | |
| # Step 1: Dynamically compute the matrix based on conditions | |
| set-matrix: | |
| uses: ./.github/workflows/set-matrix.yaml | |
| # Step 2: Use the dynamic matrix in the build-test job | |
| build-test: | |
| needs: set-matrix | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| strategy: | |
| fail-fast: false | |
| matrix: ${{ fromJSON(needs.set-matrix.outputs.matrix) }} | |
| with: | |
| runner: ${{ matrix.runner }} | |
| gpu-arch-type: ${{ matrix.gpu-arch-type }} | |
| gpu-arch-version: ${{ matrix.gpu-arch-version }} | |
| docker-image: ${{ matrix.docker-image }} | |
| repository: pytorch/torchtitan | |
| upload-artifact: outputs | |
| timeout: 45 | |
| script: | | |
| set -eux | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| # Log CUDA driver version for debugging. | |
| DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader | head -n 1 || true) | |
| echo "CUDA driver version: ${DRIVER_VERSION}" | |
| pip config --user set global.progress_bar off | |
| python -m pip install torchft-nightly | |
| python -m pip install --force-reinstall --pre torch --index-url ${{ matrix.index-url }} | |
| USE_CPP=0 python -m pip install --pre torchao --index-url ${{ matrix.index-url }} | |
| sudo mkdir -p "$RUNNER_TEMP/artifacts-to-be-uploaded" | |
| sudo chown -R $(id -u):$(id -g) "$RUNNER_TEMP/artifacts-to-be-uploaded" | |
| echo "torchft_lighthouse" | |
| RUST_BACKTRACE=1 torchft_lighthouse --min_replicas 1 --quorum_tick_ms 100 --join_timeout_ms 10000 > /dev/null 2>&1 & | |
| echo "ft_integration_test" | |
| # Getting error - Cuda failure 217 'peer access is not supported between these two devices' | |
| python -m tests.integration_tests.ft $RUNNER_TEMP/artifacts-to-be-uploaded --ngpu 8 | |
| # pkill -9 torchft_lighthouse | |
| rm -rf $RUNNER_TEMP/artifacts-to-be-uploaded/*/checkpoint |