Skip to content

Enable sdpa backends for server export in export.py #157

Enable sdpa backends for server export in export.py

Enable sdpa backends for server export in export.py #157

name: Run the README instructions - with stories - on Linux aarch64
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
jobs:
test-readme-cpu:
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.arm64.2xlarge
docker-image: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main"
gpu-arch-type: cpu-aarch64
timeout: 60
script: |
echo "::group::Print machine info"
uname -a
echo "::endgroup::"
TORCHCHAT_DEVICE=cpu .ci/scripts/run-docs readme
echo "::group::Completion"
echo "tests complete"
echo "*******************************************"
echo "::endgroup::"
test-quantization-cpu:
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.arm64.2xlarge
docker-image: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main"
gpu-arch-type: cpu-aarch64
timeout: 60
script: |
echo "::group::Print machine info"
uname -a
echo "::endgroup::"
TORCHCHAT_DEVICE=cpu .ci/scripts/run-docs quantization
test-gguf-cpu:
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.arm64.2xlarge
docker-image: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main"
gpu-arch-type: cpu-aarch64
timeout: 60
script: |
echo "::group::Print machine info"
uname -a
echo "::endgroup::"
TORCHCHAT_DEVICE=cpu .ci/scripts/run-docs gguf
echo "::group::Completion"
echo "tests complete"
echo "*******************************************"
echo "::endgroup::"
test-advanced-cpu:
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.arm64.2xlarge
docker-image: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main"
gpu-arch-type: cpu-aarch64
timeout: 60
script: |
echo "::group::Print machine info"
uname -a
echo "::endgroup::"
TORCHCHAT_DEVICE=cpu .ci/scripts/run-docs advanced
echo "::group::Completion"
echo "tests complete"
echo "*******************************************"
echo "::endgroup::"
test-evaluation-cpu:
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
permissions:
id-token: write
contents: read
with:
runner: linux.arm64.2xlarge
docker-image: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64-main"
gpu-arch-type: cpu-aarch64
timeout: 60
script: |
echo "::group::Print machine info"
uname -a
echo "::endgroup::"
TORCHCHAT_DEVICE=cpu .ci/scripts/run-docs evaluation
echo "::group::Completion"
echo "tests complete"
echo "*******************************************"
echo "::endgroup::"