Skip to content

Commit ca8c48d

Browse files
author
pytorchbot
committed
2025-03-01 nightly release (c8155f5)
1 parent 6cb4d59 commit ca8c48d

File tree

213 files changed

+3289
-703
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

213 files changed

+3289
-703
lines changed

.github/scripts/generate-release-matrix.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,16 @@
55
import sys
66

77
RELEASE_CUDA_VERSION = {
8-
"wheel": ["cu124"],
9-
"tarball": ["cu124"],
8+
"wheel": ["cu128"],
9+
"tarball": ["cu128"],
1010
}
1111
RELEASE_PYTHON_VERSION = {
12-
"wheel": ["3.8", "3.9", "3.10", "3.11", "3.12"],
13-
"tarball": ["3.10"],
12+
"wheel": ["3.9", "3.10", "3.11", "3.12"],
13+
"tarball": ["3.11"],
1414
}
1515

1616
CXX11_TARBALL_CONTAINER_IMAGE = {
17-
"cu124": "pytorch/libtorch-cxx11-builder:cuda12.4-main",
17+
"cu128": "pytorch/libtorch-cxx11-builder:cuda12.8-main",
1818
}
1919

2020

.github/scripts/generate-tensorrt-test-matrix.py

+13-21
Original file line numberDiff line numberDiff line change
@@ -11,31 +11,23 @@
1111
# channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch
1212
# channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....)
1313
CUDA_VERSIONS_DICT = {
14-
"nightly": ["cu126"],
15-
"test": ["cu124", "cu126"],
16-
"release": ["cu124", "cu126"],
14+
"nightly": ["cu128"],
15+
"test": ["cu118", "cu126", "cu128"],
16+
"release": ["cu118", "cu126", "cu128"],
1717
}
1818

1919
# please update the python version you want to test with the future tensorRT version here
2020
# channel: nightly if the future tensorRT version test workflow is triggered from the main branch or your personal branch
2121
# channel: test if the future tensorRT version test workflow is triggered from the release branch(release/2.5 etc....)
2222
PYTHON_VERSIONS_DICT = {
23-
"nightly": ["3.9"],
23+
"nightly": ["3.11"],
2424
"test": ["3.9", "3.10", "3.11", "3.12"],
2525
"release": ["3.9", "3.10", "3.11", "3.12"],
2626
}
2727

2828
# please update the future tensorRT version you want to test here
2929
TENSORRT_VERSIONS_DICT = {
3030
"windows": {
31-
"10.4.0": {
32-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.4.0/zip/TensorRT-10.4.0.26.Windows.win10.cuda-12.6.zip",
33-
"strip_prefix": "TensorRT-10.4.0.26",
34-
},
35-
"10.5.0": {
36-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.5.0/zip/TensorRT-10.5.0.18.Windows.win10.cuda-12.6.zip",
37-
"strip_prefix": "TensorRT-10.5.0.18",
38-
},
3931
"10.6.0": {
4032
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/zip/TensorRT-10.6.0.26.Windows.win10.cuda-12.6.zip",
4133
"strip_prefix": "TensorRT-10.6.0.26",
@@ -44,16 +36,12 @@
4436
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
4537
"strip_prefix": "TensorRT-10.7.0.23",
4638
},
39+
"10.8.0": {
40+
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/zip/TensorRT-10.8.0.43.Windows.win10.cuda-12.8.zip",
41+
"strip_prefix": "TensorRT-10.8.0.43",
42+
},
4743
},
4844
"linux": {
49-
"10.4.0": {
50-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.4.0/tars/TensorRT-10.4.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
51-
"strip_prefix": "TensorRT-10.4.0.26",
52-
},
53-
"10.5.0": {
54-
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.5.0/tars/TensorRT-10.5.0.18.Linux.x86_64-gnu.cuda-12.6.tar.gz",
55-
"strip_prefix": "TensorRT-10.5.0.18",
56-
},
5745
"10.6.0": {
5846
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/tars/TensorRT-10.6.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
5947
"strip_prefix": "TensorRT-10.6.0.26",
@@ -62,6 +50,10 @@
6250
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
6351
"strip_prefix": "TensorRT-10.7.0.23",
6452
},
53+
"10.8.0": {
54+
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
55+
"strip_prefix": "TensorRT-10.8.0.43",
56+
},
6557
},
6658
}
6759

@@ -87,7 +79,7 @@ def check_file_availability(url: str) -> bool:
8779
# calculate the next minor version
8880
minor = int(list(TENSORRT_VERSIONS_DICT["linux"].keys())[-1].split(".")[1]) + 1
8981
trt_version = f"{major}.{minor}.0"
90-
for patch in range(patch_from, 50):
82+
for patch in range(patch_from, 80):
9183
for cuda_minor in range(4, 11):
9284
trt_linux_release_url_candidate = f"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/{trt_version}/tars/TensorRT-{trt_version}.{patch}.Linux.x86_64-gnu.cuda-12.{cuda_minor}.tar.gz"
9385
if check_file_availability(trt_linux_release_url_candidate):

.github/scripts/generate_binary_build_matrix.py

+5-11
Original file line numberDiff line numberDiff line change
@@ -24,23 +24,16 @@
2424
"release": ["3.9", "3.10", "3.11", "3.12"],
2525
}
2626
CUDA_ARCHES_DICT = {
27-
"nightly": ["11.8", "12.4", "12.6"],
28-
"test": ["11.8", "12.1", "12.4"],
29-
"release": ["11.8", "12.1", "12.4"],
27+
"nightly": ["11.8", "12.6", "12.8"],
28+
"test": ["11.8", "12.6", "12.8"],
29+
"release": ["11.8", "12.6", "12.8"],
3030
}
3131
ROCM_ARCHES_DICT = {
3232
"nightly": ["6.1", "6.2"],
3333
"test": ["6.1", "6.2"],
3434
"release": ["6.1", "6.2"],
3535
}
3636

37-
CUDA_CUDDN_VERSIONS = {
38-
"11.8": {"cuda": "11.8.0", "cudnn": "9"},
39-
"12.1": {"cuda": "12.1.1", "cudnn": "9"},
40-
"12.4": {"cuda": "12.4.1", "cudnn": "9"},
41-
"12.6": {"cuda": "12.6.2", "cudnn": "9"},
42-
}
43-
4437
PACKAGE_TYPES = ["wheel", "conda", "libtorch"]
4538
PRE_CXX11_ABI = "pre-cxx11"
4639
CXX11_ABI = "cxx11-abi"
@@ -151,6 +144,7 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
151144
"12.1": "pytorch/manylinux2_28-builder:cuda12.1",
152145
"12.4": "pytorch/manylinux2_28-builder:cuda12.4",
153146
"12.6": "pytorch/manylinux2_28-builder:cuda12.6",
147+
"12.8": "pytorch/manylinux2_28-builder:cuda12.8",
154148
**{
155149
gpu_arch: f"pytorch/manylinux2_28-builder:rocm{gpu_arch}"
156150
for gpu_arch in ROCM_ARCHES
@@ -278,7 +272,7 @@ def get_wheel_install_command(
278272
return f"{WHL_INSTALL_BASE} {PACKAGES_TO_INSTALL_WHL} --index-url {get_base_download_url_for_repo('whl', channel, gpu_arch_type, desired_cuda)}_pypi_pkg" # noqa: E501
279273
else:
280274
raise ValueError(
281-
"Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6 on Linux nightly builds." # noqa: E501
275+
"Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6, 12.8 on Linux nightly builds." # noqa: E501
282276
)
283277
if (
284278
channel == RELEASE

.github/workflows/build-test-linux.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ jobs:
2323
test-infra-ref: main
2424
with-rocm: false
2525
with-cpu: false
26+
python-versions: '["3.11"]'
2627

2728
filter-matrix:
2829
needs: [generate-matrix]
@@ -32,7 +33,7 @@ jobs:
3233
steps:
3334
- uses: actions/setup-python@v5
3435
with:
35-
python-version: '3.10'
36+
python-version: '3.11'
3637
- uses: actions/checkout@v4
3738
with:
3839
repository: pytorch/tensorrt

.github/workflows/build-test-tensorrt-linux.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ jobs:
2020
test-infra-ref: main
2121
with-rocm: false
2222
with-cpu: false
23+
python-versions: '["3.11"]'
2324

2425
generate-tensorrt-matrix:
2526
needs: [generate-matrix]
@@ -29,7 +30,7 @@ jobs:
2930
steps:
3031
- uses: actions/setup-python@v5
3132
with:
32-
python-version: '3.10'
33+
python-version: '3.11'
3334
- uses: actions/checkout@v4
3435
with:
3536
repository: pytorch/tensorrt

.github/workflows/build-test-tensorrt-windows.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ jobs:
2020
test-infra-ref: main
2121
with-rocm: false
2222
with-cpu: false
23+
python-versions: '["3.11"]'
2324

2425
generate-tensorrt-matrix:
2526
needs: [generate-matrix]
@@ -29,7 +30,7 @@ jobs:
2930
steps:
3031
- uses: actions/setup-python@v5
3132
with:
32-
python-version: '3.10'
33+
python-version: '3.11'
3334
- uses: actions/checkout@v4
3435
with:
3536
repository: pytorch/tensorrt

.github/workflows/build-test-windows.yml

+1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ jobs:
2323
test-infra-ref: main
2424
with-rocm: false
2525
with-cpu: false
26+
python-versions: '["3.11"]'
2627

2728
substitute-runner:
2829
needs: generate-matrix

.github/workflows/docgen.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,12 @@ jobs:
1414
if: ${{ ! contains(github.actor, 'pytorchbot') }}
1515
environment: pytorchbot-env
1616
container:
17-
image: docker.io/pytorch/manylinux2_28-builder:cuda12.6
17+
image: docker.io/pytorch/manylinux2_28-builder:cuda12.8
1818
options: --gpus all
1919
env:
20-
CUDA_HOME: /usr/local/cuda-12.6
21-
VERSION_SUFFIX: cu126
22-
CU_VERSION: cu126
20+
CUDA_HOME: /usr/local/cuda-12.8
21+
VERSION_SUFFIX: cu128
22+
CU_VERSION: cu128
2323
CHANNEL: nightly
2424
CI_BUILD: 1
2525
steps:

.github/workflows/generate_binary_build_matrix.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ jobs:
7272
steps:
7373
- uses: actions/setup-python@v5
7474
with:
75-
python-version: '3.10'
75+
python-version: '3.11'
7676
- name: Checkout test-infra repository
7777
uses: actions/checkout@v4
7878
with:

.github/workflows/release-linux.yml

+3-2
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ jobs:
2424
test-infra-ref: main
2525
with-rocm: false
2626
with-cpu: false
27+
python-versions: '["3.11"]'
2728

2829
generate-release-tarball-matrix:
2930
needs: [generate-matrix]
@@ -33,7 +34,7 @@ jobs:
3334
steps:
3435
- uses: actions/setup-python@v5
3536
with:
36-
python-version: '3.10'
37+
python-version: '3.11'
3738
- uses: actions/checkout@v4
3839
with:
3940
repository: pytorch/tensorrt
@@ -83,7 +84,7 @@ jobs:
8384
steps:
8485
- uses: actions/setup-python@v5
8586
with:
86-
python-version: '3.10'
87+
python-version: '3.11'
8788
- uses: actions/checkout@v4
8889
with:
8990
repository: pytorch/tensorrt

.github/workflows/release-wheel-linux.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ jobs:
241241
name: ${{ env.ARTIFACT_NAME }}
242242
path: ${{ inputs.repository }}/release/wheel/
243243
- name: Upload pre-cxx11 tarball to GitHub
244-
if: ${{ inputs.cxx11-tarball-release != 'true' && env.PYTHON_VERSION == '3.10' }}
244+
if: ${{ inputs.cxx11-tarball-release != 'true' && env.PYTHON_VERSION == '3.11' }}
245245
continue-on-error: true
246246
uses: actions/upload-artifact@v4
247247
with:

.github/workflows/release-windows.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ jobs:
2424
test-infra-ref: main
2525
with-rocm: false
2626
with-cpu: false
27+
python-versions: '["3.11"]'
2728

2829
generate-release-matrix:
2930
needs: [generate-matrix]
@@ -33,7 +34,7 @@ jobs:
3334
steps:
3435
- uses: actions/setup-python@v5
3536
with:
36-
python-version: '3.10'
37+
python-version: '3.11'
3738
- uses: actions/checkout@v4
3839
with:
3940
repository: pytorch/tensorrt

MODULE.bazel

+9-9
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,13 @@ new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.
3636
new_local_repository(
3737
name = "cuda",
3838
build_file = "@//third_party/cuda:BUILD",
39-
path = "/usr/local/cuda-12.6/",
39+
path = "/usr/local/cuda-12.8/",
4040
)
4141

4242
new_local_repository(
4343
name = "cuda_win",
4444
build_file = "@//third_party/cuda:BUILD",
45-
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.6/",
45+
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8/",
4646
)
4747

4848
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
@@ -55,21 +55,21 @@ http_archive(
5555
name = "libtorch",
5656
build_file = "@//third_party/libtorch:BUILD",
5757
strip_prefix = "libtorch",
58-
urls = ["https://download.pytorch.org/libtorch/nightly/cu126/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
58+
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
5959
)
6060

6161
http_archive(
6262
name = "libtorch_pre_cxx11_abi",
6363
build_file = "@//third_party/libtorch:BUILD",
6464
strip_prefix = "libtorch",
65-
urls = ["https://download.pytorch.org/libtorch/nightly/cu126/libtorch-shared-with-deps-latest.zip"],
65+
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-shared-with-deps-latest.zip"],
6666
)
6767

6868
http_archive(
6969
name = "libtorch_win",
7070
build_file = "@//third_party/libtorch:BUILD",
7171
strip_prefix = "libtorch",
72-
urls = ["https://download.pytorch.org/libtorch/nightly/cu126/libtorch-win-shared-with-deps-latest.zip"],
72+
urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip"],
7373
)
7474

7575
# Download these tarballs manually from the NVIDIA website
@@ -79,18 +79,18 @@ http_archive(
7979
http_archive(
8080
name = "tensorrt",
8181
build_file = "@//third_party/tensorrt/archive:BUILD",
82-
strip_prefix = "TensorRT-10.7.0.23",
82+
strip_prefix = "TensorRT-10.8.0.43",
8383
urls = [
84-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
84+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/tars/TensorRT-10.8.0.43.Linux.x86_64-gnu.cuda-12.8.tar.gz",
8585
],
8686
)
8787

8888
http_archive(
8989
name = "tensorrt_win",
9090
build_file = "@//third_party/tensorrt/archive:BUILD",
91-
strip_prefix = "TensorRT-10.7.0.23",
91+
strip_prefix = "TensorRT-10.8.0.43",
9292
urls = [
93-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
93+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.8.0/zip/TensorRT-10.8.0.43.Windows.win10.cuda-12.8.zip",
9494
],
9595
)
9696

README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ Torch-TensorRT
77
[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
88
[![pytorch](https://img.shields.io/badge/PyTorch-2.4-green)](https://www.python.org/downloads/release/python-31013/)
99
[![cuda](https://img.shields.io/badge/CUDA-12.4-green)](https://developer.nvidia.com/cuda-downloads)
10-
[![trt](https://img.shields.io/badge/TensorRT-10.7.0-green)](https://github.com/nvidia/tensorrt-llm)
10+
[![trt](https://img.shields.io/badge/TensorRT-10.8.0-green)](https://github.com/nvidia/tensorrt-llm)
1111
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
1212
[![linux_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml)
1313
[![windows_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml)
@@ -117,9 +117,9 @@ auto results = trt_mod.forward({input_tensor});
117117
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
118118
119119
- Bazel 6.3.2
120-
- Libtorch 2.5.0.dev (latest nightly) (built with CUDA 12.4)
121-
- CUDA 12.4
122-
- TensorRT 10.7.0.23
120+
- Libtorch 2.7.0.dev (latest nightly) (built with CUDA 12.8)
121+
- CUDA 12.8
122+
- TensorRT 10.8.0.43
123123
124124
## Deprecation Policy
125125

dev_dep_versions.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
__cuda_version__: "12.6"
2-
__tensorrt_version__: "10.7.0.post1"
1+
__cuda_version__: "12.8"
2+
__tensorrt_version__: "10.8.0"

docker/Dockerfile

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,16 @@
11
# syntax=docker/dockerfile:1
22

33
# Base image starts with CUDA
4-
ARG BASE_IMG=nvidia/cuda:12.4.1-devel-ubuntu22.04
4+
#TODO: cuda version
5+
ARG BASE_IMG=nvidia/cuda:12.8.0-devel-ubuntu22.04
56
FROM ${BASE_IMG} as base
6-
ENV BASE_IMG=nvidia/cuda:12.4.1-devel-ubuntu22.04
7+
ENV BASE_IMG=nvidia/cuda:12.8.0-devel-ubuntu22.04
78

89
ARG TENSORRT_VERSION
910
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
1011
RUN test -n "$TENSORRT_VERSION" || (echo "No tensorrt version specified, please use --build-arg TENSORRT_VERSION=x.y to specify a version." && exit 1)
1112

12-
ARG PYTHON_VERSION=3.10
13+
ARG PYTHON_VERSION=3.11
1314
ENV PYTHON_VERSION=${PYTHON_VERSION}
1415

1516
ARG USE_PRE_CXX11_ABI

0 commit comments

Comments
 (0)