Skip to content

Commit ceb9aa5

Browse files
authored
Deprecate torchscript frontend (#3373)
1 parent 54e36db commit ceb9aa5

File tree

5 files changed

+33
-6
lines changed

5 files changed

+33
-6
lines changed

.pre-commit-config.yaml

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
exclude: ^.github/actions/assigner/dist
22
repos:
33
- repo: https://github.com/pre-commit/pre-commit-hooks
4-
rev: v4.5.0
4+
rev: v5.0.0
55
hooks:
66
- id: check-yaml
77
- id: trailing-whitespace
@@ -32,7 +32,7 @@ repos:
3232
hooks:
3333
- id: validate-pyproject
3434
- repo: https://github.com/pycqa/isort
35-
rev: 5.13.2
35+
rev: 6.0.0
3636
hooks:
3737
- id: isort
3838
name: isort (python)
@@ -52,7 +52,7 @@ repos:
5252
- id: black
5353
exclude: ^examples/custom_converters/elu_converter/setup.py|^docs
5454
- repo: https://github.com/crate-ci/typos
55-
rev: v1.22.9
55+
rev: typos-dict-v0.12.4
5656
hooks:
5757
- id: typos
5858
- repo: https://github.com/astral-sh/uv-pre-commit

cpp/include/torch_tensorrt/macros.h

+3
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,9 @@
3030
STR(TORCH_TENSORRT_MAJOR_VERSION) \
3131
"." STR(TORCH_TENSORRT_MINOR_VERSION) "." STR(TORCH_TENSORRT_PATCH_VERSION)
3232

33+
#define TORCH_TENSORRT_PTQ_DEPRECATION \
34+
[[deprecated( \
35+
"Int8 PTQ Calibrator has been deprecated by TensorRT, please plan on porting to a NVIDIA Model Optimizer Toolkit based workflow. See: https://pytorch.org/TensorRT/tutorials/_rendered_examples/dynamo/vgg16_ptq.html for more details")]]
3336
// Setup namespace aliases for ease of use
3437
namespace torch_tensorrt {
3538
namespace torchscript {}

cpp/include/torch_tensorrt/ptq.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -308,9 +308,8 @@ class Int8CacheCalibrator : Algorithm {
308308
* @param use_cache: bool - use calibration cache
309309
* @return Int8Calibrator<Algorithm, DataLoader>
310310
*/
311-
312311
template <typename Algorithm = nvinfer1::IInt8EntropyCalibrator2, typename DataLoader>
313-
inline Int8Calibrator<Algorithm, DataLoader> make_int8_calibrator(
312+
TORCH_TENSORRT_PTQ_DEPRECATION inline Int8Calibrator<Algorithm, DataLoader> make_int8_calibrator(
314313
DataLoader dataloader,
315314
const std::string& cache_file_path,
316315
bool use_cache) {
@@ -344,7 +343,8 @@ inline Int8Calibrator<Algorithm, DataLoader> make_int8_calibrator(
344343
* @return Int8CacheCalibrator<Algorithm>
345344
*/
346345
template <typename Algorithm = nvinfer1::IInt8EntropyCalibrator2>
347-
inline Int8CacheCalibrator<Algorithm> make_int8_cache_calibrator(const std::string& cache_file_path) {
346+
TORCH_TENSORRT_PTQ_DEPRECATION inline Int8CacheCalibrator<Algorithm> make_int8_cache_calibrator(
347+
const std::string& cache_file_path) {
348348
return Int8CacheCalibrator<Algorithm>(cache_file_path);
349349
}
350350

py/torch_tensorrt/ts/_compiler.py

+13
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from __future__ import annotations
22

3+
import warnings
34
from typing import Any, List, Optional, Sequence, Set, Tuple
45

56
import torch
@@ -102,6 +103,12 @@ def compile(
102103
torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT
103104
"""
104105

106+
warnings.warn(
107+
'The torchscript frontend for Torch-TensorRT has been deprecated, please plan on porting to the dynamo frontend (torch_tensorrt.compile(..., ir="dynamo"). Torchscript will continue to be a supported deployment format via post compilation torchscript tracing, see: https://pytorch.org/TensorRT/user_guide/saving_models.html for more details',
108+
DeprecationWarning,
109+
stacklevel=2,
110+
)
111+
105112
input_list = list(inputs) if inputs is not None else []
106113
enabled_precisions_set = (
107114
enabled_precisions if enabled_precisions is not None else set()
@@ -240,6 +247,12 @@ def convert_method_to_trt_engine(
240247
Returns:
241248
bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs
242249
"""
250+
warnings.warn(
251+
'The torchscript frontend for Torch-TensorRT has been deprecated, please plan on porting to the dynamo frontend (torch_tensorrt.convert_method_to_trt_engine(..., ir="dynamo"). Torchscript will continue to be a supported deployment format via post compilation torchscript tracing, see: https://pytorch.org/TensorRT/user_guide/saving_models.html for more details',
252+
DeprecationWarning,
253+
stacklevel=2,
254+
)
255+
243256
input_list = list(inputs) if inputs is not None else []
244257
enabled_precisions_set = (
245258
enabled_precisions if enabled_precisions is not None else {torch.float}

py/torch_tensorrt/ts/ptq.py

+11
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from typing_extensions import Self
88

99
import os
10+
import warnings
1011
from enum import Enum
1112

1213
import torch
@@ -88,6 +89,11 @@ def __init__(self, **kwargs: Any):
8889
pass
8990

9091
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
92+
warnings.warn(
93+
"Int8 PTQ Calibrator has been deprecated by TensorRT, please plan on porting to a NVIDIA Model Optimizer Toolkit based workflow. See: https://pytorch.org/TensorRT/tutorials/_rendered_examples/dynamo/vgg16_ptq.html for more details",
94+
DeprecationWarning,
95+
stacklevel=2,
96+
)
9197
dataloader = args[0]
9298
algo_type = kwargs.get("algo_type", CalibrationAlgo.ENTROPY_CALIBRATION_2)
9399
cache_file = kwargs.get("cache_file", None)
@@ -175,6 +181,11 @@ def __init__(self, **kwargs: Any):
175181
pass
176182

177183
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
184+
warnings.warn(
185+
"Int8 PTQ Calibrator has been deprecated by TensorRT, please plan on porting to a NVIDIA Model Optimizer Toolkit based workflow. See: https://pytorch.org/TensorRT/tutorials/_rendered_examples/dynamo/vgg16_ptq.html for more details",
186+
DeprecationWarning,
187+
stacklevel=2,
188+
)
178189
cache_file = args[0]
179190
algo_type = kwargs.get("algo_type", CalibrationAlgo.ENTROPY_CALIBRATION_2)
180191

0 commit comments

Comments
 (0)