Skip to content

Add jetson build on CI #3524

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 39 commits into from
Jun 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
a790126
add jetpack build in ci
lanluo-nvidia May 19, 2025
d790c1e
test
lanluo-nvidia May 19, 2025
97599c2
update jetpack doc
lanluo-nvidia May 20, 2025
9c2424f
test
lanluo-nvidia May 20, 2025
6de93e1
test
lanluo-nvidia May 20, 2025
a218320
test
lanluo-nvidia May 20, 2025
0595203
test
lanluo-nvidia May 20, 2025
3aa272b
test
lanluo-nvidia May 20, 2025
f56e554
test
lanluo-nvidia May 20, 2025
c0b73ce
test
lanluo-nvidia May 20, 2025
f215f8f
test
lanluo-nvidia May 20, 2025
613d650
test
lanluo-nvidia May 20, 2025
899e61c
test
lanluo-nvidia May 20, 2025
8b2d82c
test
lanluo-nvidia May 20, 2025
c2f566c
test
lanluo-nvidia May 20, 2025
9039b0b
test
lanluo-nvidia May 20, 2025
4f332d9
test
lanluo-nvidia May 20, 2025
2f8f0d1
test
lanluo-nvidia May 20, 2025
a661587
test
lanluo-nvidia May 20, 2025
115d70f
test
lanluo-nvidia May 20, 2025
a0ddd4d
test
lanluo-nvidia May 21, 2025
40626a7
doc change
lanluo-nvidia May 21, 2025
2188577
address comments from Naren
lanluo-nvidia May 22, 2025
690aeb9
add pyproject.toml change to support conditional dependencies for tor…
lanluo-nvidia May 26, 2025
cd9354a
Merge branch 'main' into lluo/jetson_build
lanluo-nvidia May 26, 2025
33d583c
test
lanluo-nvidia May 26, 2025
26d821a
add +jp62 for the wheel for jetpack
lanluo-nvidia May 30, 2025
79feec6
push to pytorch nightly
lanluo-nvidia May 30, 2025
b0baba1
test
lanluo-nvidia May 30, 2025
63f974c
Merge branch 'main' into lluo/jetson_build
lanluo-nvidia Jun 1, 2025
61b3480
modify jetpack.rst file accordinging to comments
lanluo-nvidia Jun 1, 2025
558222a
test
lanluo-nvidia Jun 2, 2025
a66c0dd
change torch torchvision url to use the index
lanluo-nvidia Jun 2, 2025
99d9d3d
test
lanluo-nvidia Jun 2, 2025
4d607cf
jetson build do not upload to pytorch index
lanluo-nvidia Jun 2, 2025
29378ae
infra: Add an OOB build option for jetpack
narendasan Jun 3, 2025
d8318d8
simplify the jetson build
lanluo-nvidia Jun 3, 2025
40c3c39
test
lanluo-nvidia Jun 3, 2025
4995048
lint fixing
lanluo-nvidia Jun 3, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 45 additions & 8 deletions .github/scripts/filter-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@
# currently we don't support python 3.13t due to tensorrt does not support 3.13t
disabled_python_versions: List[str] = ["3.13t"]

# jetpack 6.2 only officially supports python 3.10 and cu126
jetpack_python_versions: List[str] = ["3.10"]
jetpack_cuda_versions: List[str] = ["cu126"]

jetpack_container_image: str = "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
sbsa_container_image: str = "quay.io/pypa/manylinux_2_34_aarch64"


def main(args: list[str]) -> None:
parser = argparse.ArgumentParser()
Expand All @@ -19,8 +26,23 @@ def main(args: list[str]) -> None:
default="",
)

options = parser.parse_args(args)
parser.add_argument(
"--jetpack",
help="is jetpack",
type=str,
choices=["true", "false"],
default="false",
)

parser.add_argument(
"--limit-pr-builds",
help="If it is a PR build",
type=str,
choices=["true", "false"],
default=os.getenv("LIMIT_PR_BUILDS", "false"),
)

options = parser.parse_args(args)
if options.matrix == "":
raise Exception("--matrix needs to be provided")

Expand All @@ -30,14 +52,29 @@ def main(args: list[str]) -> None:
for item in includes:
if item["python_version"] in disabled_python_versions:
continue
if item["gpu_arch_type"] == "cuda-aarch64":
# pytorch image:pytorch/manylinuxaarch64-builder:cuda12.8 comes with glibc2.28
# however, TensorRT requires glibc2.31 on aarch64 platform
# TODO: in future, if pytorch supports aarch64 with glibc2.31, we should switch to use the pytorch image
item["container_image"] = "quay.io/pypa/manylinux_2_34_aarch64"
filtered_includes.append(item)
if options.jetpack == "true":
if options.limit_pr_builds == "true":
# limit pr build, matrix passed in from test-infra is cu128, python 3.9, change to cu126, python 3.10
item["desired_cuda"] = "cu126"
item["python_version"] = "3.10"
item["container_image"] = jetpack_container_image
filtered_includes.append(item)
else:
if (
item["python_version"] in jetpack_python_versions
and item["desired_cuda"] in jetpack_cuda_versions
):
item["container_image"] = jetpack_container_image
filtered_includes.append(item)
else:
filtered_includes.append(item)
if item["gpu_arch_type"] == "cuda-aarch64":
# pytorch image:pytorch/manylinuxaarch64-builder:cuda12.8 comes with glibc2.28
# however, TensorRT requires glibc2.31 on aarch64 platform
# TODO: in future, if pytorch supports aarch64 with glibc2.31, we should switch to use the pytorch image
item["container_image"] = sbsa_container_image
filtered_includes.append(item)
else:
filtered_includes.append(item)
filtered_matrix_dict = {}
filtered_matrix_dict["include"] = filtered_includes
print(json.dumps(filtered_matrix_dict))
Expand Down
86 changes: 86 additions & 0 deletions .github/workflows/build-test-linux-aarch64-jetpack.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
name: Build and test Linux aarch64 wheels for Jetpack

on:
pull_request:
push:
branches:
- main
- nightly
- release/*
tags:
# NOTE: Binary build pipelines should only get triggered on release candidate builds
# Release candidate tags look like: v1.11.0-rc1
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
workflow_dispatch:

jobs:
generate-matrix:
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main
with:
package-type: wheel
os: linux-aarch64
test-infra-repository: pytorch/test-infra
test-infra-ref: main
with-rocm: false
with-cpu: false

filter-matrix:
needs: [generate-matrix]
outputs:
matrix: ${{ steps.filter.outputs.matrix }}
runs-on: ubuntu-latest
steps:
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- uses: actions/checkout@v4
with:
repository: pytorch/tensorrt
- name: Filter matrix
id: filter
env:
LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}
run: |
set -eou pipefail
echo "LIMIT_PR_BUILDS=${LIMIT_PR_BUILDS}"
MATRIX_BLOB=${{ toJSON(needs.generate-matrix.outputs.matrix) }}
MATRIX_BLOB="$(python3 .github/scripts/filter-matrix.py --matrix "${MATRIX_BLOB}" --jetpack true)"
echo "${MATRIX_BLOB}"
echo "matrix=${MATRIX_BLOB}" >> "${GITHUB_OUTPUT}"

build:
needs: filter-matrix
permissions:
id-token: write
contents: read
strategy:
fail-fast: false
matrix:
include:
- repository: pytorch/tensorrt
pre-script: packaging/pre_build_script.sh
env-var-script: packaging/env_vars.txt
post-script: packaging/post_build_script.sh
smoke-test-script: packaging/smoke_test_script.sh
package-name: torch_tensorrt
name: Build torch-tensorrt whl package
uses: ./.github/workflows/build_wheels_linux_aarch64.yml
with:
repository: ${{ matrix.repository }}
ref: ""
test-infra-repository: pytorch/test-infra
test-infra-ref: main
build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
pre-script: ${{ matrix.pre-script }}
env-var-script: ${{ matrix.env-var-script }}
post-script: ${{ matrix.post-script }}
package-name: ${{ matrix.package-name }}
smoke-test-script: ${{ matrix.smoke-test-script }}
trigger-event: ${{ github.event_name }}
architecture: "aarch64"
is-jetpack: true


concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ inputs.repository }}-${{ github.event_name == 'workflow_dispatch' }}-${{ inputs.job-name }}
cancel-in-progress: true
8 changes: 5 additions & 3 deletions .github/workflows/build-test-linux-aarch64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ jobs:
filter-matrix:
needs: [generate-matrix]
outputs:
matrix: ${{ steps.generate.outputs.matrix }}
matrix: ${{ steps.filter.outputs.matrix }}
runs-on: ubuntu-latest
steps:
- uses: actions/setup-python@v5
Expand All @@ -36,8 +36,10 @@ jobs:
- uses: actions/checkout@v4
with:
repository: pytorch/tensorrt
- name: Generate matrix
id: generate
- name: Filter matrix
id: filter
env:
LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}
run: |
set -eou pipefail
MATRIX_BLOB=${{ toJSON(needs.generate-matrix.outputs.matrix) }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build-test-linux-x86_64.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Build and test Linux wheels
name: Build and test Linux x86_64 wheels

on:
pull_request:
Expand Down
29 changes: 23 additions & 6 deletions .github/workflows/build_wheels_linux_aarch64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ on:
required: false
default: "python -m build --wheel"
type: string
is-jetpack:
description: Set to true if the build is for jetpack
required: false
default: false
type: boolean
pip-install-torch-extra-args:
# NOTE: Why does this exist?
# Well setuptools / python packaging doesn't actually allow you to specify dependencies
Expand Down Expand Up @@ -128,7 +133,7 @@ jobs:
UPLOAD_TO_BASE_BUCKET: ${{ matrix.upload_to_base_bucket }}
ARCH: ${{ inputs.architecture }}
BUILD_TARGET: ${{ inputs.build-target }}
name: build-${{ matrix.build_name }}
name: build-wheel-${{ matrix.python_version }}-${{ matrix.desired_cuda }}-${{ matrix.gpu_arch_type }}
runs-on: ${{ matrix.validation_runner }}
environment: ${{(inputs.trigger-event == 'schedule' || (inputs.trigger-event == 'push' && (startsWith(github.event.ref, 'refs/heads/nightly') || startsWith(github.event.ref, 'refs/tags/v')))) && 'pytorchbot-env' || ''}}
container:
Expand Down Expand Up @@ -170,6 +175,11 @@ jobs:
# when using Python version, less than the conda latest
###############################################################################
echo 'Installing conda-forge'
if [[ ${{ inputs.is-jetpack }} == true ]]; then
# jetpack base image is ubuntu 22.04, does not have curl installed
apt-get update
apt-get install -y curl git
fi
curl -L -o /mambaforge.sh https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-aarch64.sh
chmod +x /mambaforge.sh
/mambaforge.sh -b -p /opt/conda
Expand All @@ -195,12 +205,11 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }}
cuda-version: ${{ env.CU_VERSION }}
arch: ${{ env.ARCH }}

- name: Combine Env Var and Build Env Files
if: ${{ inputs.env-var-script != '' }}
working-directory: ${{ inputs.repository }}
run: |
set -euxo pipefail
set -x
cat "${{ inputs.env-var-script }}" >> "${BUILD_ENV_FILE}"
- name: Add XPU Env Vars in Build Env File
if: ${{ matrix.gpu_arch_type == 'xpu' }}
Expand All @@ -211,6 +220,7 @@ jobs:
echo "source /opt/intel/oneapi/pti/latest/env/vars.sh"
} >> "${BUILD_ENV_FILE}"
- name: Install torch dependency
if: ${{ inputs.is-jetpack == false }}
run: |
set -euxo pipefail
# shellcheck disable=SC1090
Expand Down Expand Up @@ -241,12 +251,17 @@ jobs:
working-directory: ${{ inputs.repository }}
shell: bash -l {0}
run: |
set -euxo pipefail
#set -euxo pipefail
set -x
source "${BUILD_ENV_FILE}"
export PYTORCH_VERSION="$(${CONDA_RUN} pip show torch | grep ^Version: | sed 's/Version: *//' | sed 's/+.\+//')"
${CONDA_RUN} python setup.py clean
echo "Successfully ran `python setup.py clean`"
${CONDA_RUN} python setup.py bdist_wheel
if [[ ${{ inputs.is-jetpack }} == false ]]; then
${CONDA_RUN} python setup.py bdist_wheel
else
${CONDA_RUN} python setup.py bdist_wheel --jetpack --plat-name=linux_tegra_aarch64
fi
- name: Repair Manylinux_2_28 Wheel
shell: bash -l {0}
env:
Expand All @@ -272,6 +287,7 @@ jobs:
script: ${{ inputs.post-script }}
- name: Smoke Test
shell: bash -l {0}
if: ${{ inputs.is-jetpack == false }}
env:
PACKAGE_NAME: ${{ inputs.package-name }}
SMOKE_TEST_SCRIPT: ${{ inputs.smoke-test-script }}
Expand Down Expand Up @@ -316,7 +332,8 @@ jobs:
upload:
needs: build
uses: pytorch/test-infra/.github/workflows/_binary_upload.yml@main
if: always()
# only upload to pytorch index for non jetpack builds
if: ${{ inputs.is-jetpack == false }}
with:
repository: ${{ inputs.repository }}
ref: ${{ inputs.ref }}
Expand Down
9 changes: 9 additions & 0 deletions MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,15 @@ http_archive(
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip"],
)

http_archive(
name = "torch_l4t",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "torch",
type = "zip",
urls = ["https://pypi.jetson-ai-lab.dev/jp6/cu126/+f/6ef/f643c0a7acda9/torch-2.7.0-cp310-cp310-linux_aarch64.whl"],
sha256 = "6eff643c0a7acda92734cc798338f733ff35c7df1a4434576f5ff7c66fc97319"
)

# Download these tarballs manually from the NVIDIA website
# Either place them in the distdir directory in third_party and use the --distdir flag
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
Expand Down
1 change: 1 addition & 0 deletions core/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/conversion/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/conversion/conversionctx/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
3 changes: 3 additions & 0 deletions core/conversion/converters/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand All @@ -81,6 +82,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down Expand Up @@ -143,6 +145,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/conversion/evaluators/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/conversion/tensorcontainer/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/conversion/var/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/ir/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/lowering/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ cc_library(
}) + select({
":windows": ["@libtorch_win//:libtorch"],
":use_torch_whl": ["@torch_whl//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
1 change: 1 addition & 0 deletions core/lowering/passes/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ cc_library(
] + select({
":use_torch_whl": ["@torch_whl//:libtorch"],
":windows": ["@libtorch_win//:libtorch"],
":jetpack": ["@torch_l4t//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
Expand Down
Loading
Loading