Skip to content

Commit cd5fa5a

Browse files
committed
Including version in versions.txt
1 parent 054f562 commit cd5fa5a

File tree

3 files changed

+8
-3
lines changed

3 files changed

+8
-3
lines changed

dev_dep_versions.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
__cuda_version__: "12.8"
22
__tensorrt_version__: "10.9.0"
3+
__tensorrt_llm_version__: "0.17.0.post1"

py/torch_tensorrt/dynamo/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from torch_tensorrt._enums import Platform, dtype
2121
from torch_tensorrt._features import ENABLED_FEATURES
2222
from torch_tensorrt._Input import Input
23+
from torch_tensorrt._version import __tensorrt_llm_version__
2324
from torch_tensorrt.dynamo import _defaults
2425
from torch_tensorrt.dynamo._defaults import default_device
2526
from torch_tensorrt.dynamo._engine_cache import BaseEngineCache
@@ -821,9 +822,8 @@ def download_plugin_lib_path(py_version: str, platform: str) -> str:
821822
plugin_lib_path = None
822823

823824
# Downloading TRT-LLM lib
824-
# TODO: check how to fix the 0.18.0 hardcode below
825825
base_url = "https://pypi.nvidia.com/tensorrt-llm/"
826-
file_name = f"tensorrt_llm-0.18.0-{py_version}-{py_version}-{platform}.whl"
826+
file_name = f"tensorrt_llm-{__tensorrt_llm_version__}-{py_version}-{py_version}-{platform}.whl"
827827
download_url = base_url + file_name
828828
if not (os.path.exists(file_name)):
829829
try:
@@ -887,7 +887,7 @@ def load_tensorrt_llm() -> bool:
887887
return False
888888
else:
889889
# this is used as the default py version
890-
py_version = "cp312"
890+
py_version = "cp310"
891891
platform = Platform.current_platform()
892892

893893
platform = str(platform).lower()

setup.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
__version__: str = "0.0.0"
2929
__cuda_version__: str = "0.0"
3030
__tensorrt_version__: str = "0.0"
31+
__tensorrt_llm_version__: str = "0.0"
3132

3233
LEGACY_BASE_VERSION_SUFFIX_PATTERN = re.compile("a0$")
3334

@@ -63,6 +64,7 @@ def get_base_version() -> str:
6364
def load_dep_info():
6465
global __cuda_version__
6566
global __tensorrt_version__
67+
global __tensorrt_llm_version__
6668
with open("dev_dep_versions.yml", "r") as stream:
6769
versions = yaml.safe_load(stream)
6870
if (gpu_arch_version := os.environ.get("CU_VERSION")) is not None:
@@ -72,6 +74,7 @@ def load_dep_info():
7274
else:
7375
__cuda_version__ = versions["__cuda_version__"]
7476
__tensorrt_version__ = versions["__tensorrt_version__"]
77+
__tensorrt_llm_version__ = versions["__tensorrt_llm_version__"]
7578

7679

7780
load_dep_info()
@@ -249,6 +252,7 @@ def gen_version_file():
249252
f.write('__version__ = "' + __version__ + '"\n')
250253
f.write('__cuda_version__ = "' + __cuda_version__ + '"\n')
251254
f.write('__tensorrt_version__ = "' + __tensorrt_version__ + '"\n')
255+
f.write('__tensorrt_llm_version__ = "' + __tensorrt_llm_version__ + '"\n')
252256

253257

254258
def copy_libtorchtrt(multilinux=False, rt_only=False):

0 commit comments

Comments
 (0)