Skip to content

Commit 1ef0133

Browse files
committed
bump to trt10.7
1 parent 45407a6 commit 1ef0133

10 files changed

+45
-28
lines changed

.github/scripts/generate-tensorrt-test-matrix.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
"10.7.0": {
6767
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
6868
"strip_prefix": "TensorRT-10.7.0.23",
69-
"sha256": "27d0f7e9af657b9fa19bfe9d62376d4de64182ed6274bde04bf143f56d308ec9",
69+
"sha256": "d7f16520457caaf97ad8a7e94d802f89d77aedf9f361a255f2c216e2a3a40a11",
7070
},
7171
},
7272
}

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ Torch-TensorRT
77
[![Documentation](https://img.shields.io/badge/docs-master-brightgreen)](https://nvidia.github.io/Torch-TensorRT/)
88
[![pytorch](https://img.shields.io/badge/PyTorch-2.4-green)](https://www.python.org/downloads/release/python-31013/)
99
[![cuda](https://img.shields.io/badge/CUDA-12.4-green)](https://developer.nvidia.com/cuda-downloads)
10-
[![trt](https://img.shields.io/badge/TensorRT-10.3.0-green)](https://github.com/nvidia/tensorrt-llm)
10+
[![trt](https://img.shields.io/badge/TensorRT-10.7.0-green)](https://github.com/nvidia/tensorrt-llm)
1111
[![license](https://img.shields.io/badge/license-BSD--3--Clause-blue)](./LICENSE)
1212
[![linux_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-linux.yml)
1313
[![windows_tests](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml/badge.svg)](https://github.com/pytorch/TensorRT/actions/workflows/build-test-windows.yml)
@@ -119,7 +119,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
119119
- Bazel 6.3.2
120120
- Libtorch 2.5.0.dev (latest nightly) (built with CUDA 12.4)
121121
- CUDA 12.4
122-
- TensorRT 10.6.0.26
122+
- TensorRT 10.7.0.23
123123
124124
## Deprecation Policy
125125

dev_dep_versions.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
__cuda_version__: "12.4"
2-
__tensorrt_version__: ">=10.3.0,<=10.6.0"
1+
__cuda_version__: "12.6"
2+
__tensorrt_version__: "10.7.0"

packaging/smoke_test_script.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
# The issue was smoke test installs the built torch_tensorrt wheel file and checks `import torch_tensorrt; print(torch_tensorrt.__version__)`
33
# Since tensorrt cannot be pip installable in CI, the smoke test will fail.
44
# One way we tried to handle it is manually install tensorrt wheel while by extracting from the tarball.
5-
# However, the TensorRT-10.3.0.26/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it.
5+
# However, the TensorRT-10.7.0.23/lib path doesn't seem to show up in LD_LIBRARY_PATH even if we explicitly set it.
66
# TODO: Implement a custom smoke_test script to verify torch_tensorrt installation.

py/ci/Dockerfile.ci

+7-6
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,14 @@ FROM pytorch/manylinux2_28-builder:cuda12.6
33
RUN yum install -y ninja-build
44

55
# download TensorRT tarball
6-
RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz \
7-
&& gunzip TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz \
8-
&& tar -xvf TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar \
9-
&& rm TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar
6+
RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz \
7+
&& gunzip TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz \
8+
&& tar -xvf TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar \
9+
&& rm TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar
1010

11-
ENV TENSORRT_DIR=/TensorRT-10.3.0.26
12-
ENV TENSORRT_VERSION=10.3.0
11+
ENV TENSORRT_DIR=/TensorRT-10.7.0.23
12+
ENV TENSORRT_VERSION=10.7.0
13+
ENV USE_CXX11_ABI=1
1314

1415
RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
1516
&& mv bazelisk-linux-amd64 /usr/bin/bazel \

pyproject.toml

+10
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,11 @@ requires = [
88
"cffi>=1.15.1",
99
"typing-extensions>=4.7.0",
1010
"future>=0.18.3",
11+
<<<<<<< HEAD
1112
"tensorrt-cu12>=10.6.0,<10.8.0",
13+
=======
14+
"tensorrt-cu12==10.7.0",
15+
>>>>>>> 78af8f69c (bump to trt10.7)
1216
"torch>=2.6.0.dev,<2.7.0",
1317
"pybind11==2.6.2",
1418
"numpy",
@@ -55,9 +59,15 @@ keywords = [
5559
]
5660
dependencies = [
5761
"torch>=2.6.0.dev,<2.7.0",
62+
<<<<<<< HEAD
5863
"tensorrt-cu12>=10.6.0,<10.8.0",
5964
"tensorrt-cu12-bindings>=10.6.0,<10.8.0",
6065
"tensorrt-cu12-libs>=10.6.0,<10.8.0",
66+
=======
67+
"tensorrt-cu12==10.7.0",
68+
"tensorrt-cu12-bindings==10.7.0",
69+
"tensorrt-cu12-libs==10.7.0",
70+
>>>>>>> 78af8f69c (bump to trt10.7)
6171
"packaging>=23",
6272
"numpy",
6373
"typing-extensions>=4.7.0",

toolchains/ci_workspaces/MODULE.bazel.tmpl

+4-6
Original file line numberDiff line numberDiff line change
@@ -67,20 +67,18 @@ http_archive(
6767
http_archive(
6868
name = "tensorrt",
6969
build_file = "@//third_party/tensorrt/archive:BUILD",
70-
sha256 = "33d3c2f3f4c84dc7991a4337a6fde9ed33f5c8e5c4f03ac2eb6b994a382b03a0",
71-
strip_prefix = "TensorRT-10.6.0.26",
70+
strip_prefix = "TensorRT-10.7.0.23",
7271
urls = [
73-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/tars/TensorRT-10.6.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
72+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
7473
],
7574
)
7675

7776
http_archive(
7877
name = "tensorrt_win",
7978
build_file = "@//third_party/tensorrt/archive:BUILD",
80-
sha256 = "6c6d92c108a1b3368423e8f69f08d31269830f1e4c9da43b37ba34a176797254",
81-
strip_prefix = "TensorRT-10.6.0.26",
79+
strip_prefix = "TensorRT-10.7.0.23",
8280
urls = [
83-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/zip/TensorRT-10.6.0.26.Windows.win10.cuda-12.6.zip",
81+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/zip/TensorRT-10.7.0.23.Windows.win10.cuda-12.6.zip",
8482
],
8583
)
8684

toolchains/legacy/WORKSPACE.win.release.tmpl

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ http_archive(
6363

6464
new_local_repository(
6565
name = "tensorrt_win",
66-
path = "C:/TensorRT-10.3.0.26",
66+
path = "C:/TensorRT-10.7.0.23",
6767
build_file = "@//third_party/tensorrt/local:BUILD"
6868
)
6969

toolchains/legacy/WORKSPACE.x86_64.release.rhel.tmpl

+3-3
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,10 @@ http_archive(
7171
http_archive(
7272
name = "tensorrt",
7373
build_file = "@//third_party/tensorrt/archive:BUILD",
74-
sha256 = "adff1cd5abe5d87013806172351e58fd024e5bf0fc61d49ef4b84cd38ed99081",
75-
strip_prefix = "TensorRT-10.3.0.26",
74+
sha256 = "d7f16520457caaf97ad8a7e94d802f89d77aedf9f361a255f2c216e2a3a40a11",
75+
strip_prefix = "TensorRT-10.7.0.23",
7676
urls = [
77-
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz",
77+
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.7.0/tars/TensorRT-10.7.0.23.Linux.x86_64-gnu.cuda-12.6.tar.gz",
7878
],
7979
)
8080

uv.lock

+14-6
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)