8 GPU Model Tests #1924
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: 8 GPU Model Tests | |
| on: | |
| push: | |
| branches: [ main ] | |
| tags: | |
| - ciflow/8gpu/* | |
| paths-ignore: | |
| - 'torchtitan/experiments/**' | |
| pull_request: | |
| branches: [ main ] | |
| paths-ignore: | |
| - 'torchtitan/experiments/**' | |
| schedule: | |
| # Runs every 6 hours | |
| - cron: '0 */6 * * *' | |
| concurrency: | |
| group: unit-test${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_number || github.ref }} | |
| cancel-in-progress: true | |
| defaults: | |
| run: | |
| shell: bash -l -eo pipefail {0} | |
| permissions: | |
| id-token: write | |
| contents: read | |
| jobs: | |
| # Step 1: Dynamically compute the matrix based on conditions | |
| set-matrix: | |
| uses: ./.github/workflows/set-matrix.yaml | |
| # Step 2: Use the dynamic matrix in the build-test job | |
| build-test: | |
| needs: set-matrix | |
| uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
| strategy: | |
| fail-fast: false | |
| matrix: ${{ fromJSON(needs.set-matrix.outputs.matrix) }} | |
| with: | |
| runner: ${{ matrix.runner }} | |
| gpu-arch-type: ${{ matrix.gpu-arch-type }} | |
| gpu-arch-version: ${{ matrix.gpu-arch-version }} | |
| docker-image: ${{ matrix.docker-image }} | |
| repository: pytorch/torchtitan | |
| upload-artifact: outputs | |
| timeout: 45 | |
| script: | | |
| set -eux | |
| # The generic Linux job chooses to use base env, not the one setup by the image | |
| CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
| conda activate "${CONDA_ENV}" | |
| # Log CUDA driver version for debugging. | |
| DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader | head -n 1 || true) | |
| echo "CUDA driver version: ${DRIVER_VERSION}" | |
| pip config --user set global.progress_bar off | |
| python -m pip install --force-reinstall --pre torch --index-url ${{ matrix.index-url }} | |
| USE_CPP=0 python -m pip install --pre torchao --index-url ${{ matrix.index-url }} | |
| sudo mkdir -p "$RUNNER_TEMP/artifacts-to-be-uploaded" | |
| sudo chown -R $(id -u):$(id -g) "$RUNNER_TEMP/artifacts-to-be-uploaded" | |
| python -m tests.integration_tests.run_tests --gpu_arch_type ${{ matrix.gpu-arch-type }} --test_suite models $RUNNER_TEMP/artifacts-to-be-uploaded --ngpu 8 | |
| python -m tests.integration_tests.flux $RUNNER_TEMP/artifacts-to-be-uploaded/flux --ngpu 8 | |
| rm -rf $RUNNER_TEMP/artifacts-to-be-uploaded/*/checkpoint | |
| rm -rf $RUNNER_TEMP/artifacts-to-be-uploaded/flux/*/inference_results/ |