Skip to content

Commit db67cb9

Browse files
authored
chore: Remove CUDNN dependencies (#2804)
1 parent 4142d3f commit db67cb9

32 files changed

+34
-422
lines changed

.github/workflows/docker_builder.yml

+2-4
Original file line numberDiff line numberDiff line change
@@ -44,18 +44,16 @@ jobs:
4444
username: ${{ github.actor }}
4545
password: ${{ secrets.GITHUB_TOKEN }}
4646

47-
# Automatically detect TensorRT and cuDNN default versions for Torch-TRT build
47+
# Automatically detect TensorRT default versions for Torch-TRT build
4848
- name: Build Docker image
4949
env:
5050
DOCKER_TAG: ${{ env.DOCKER_REGISTRY }}/${{ steps.fix_slashes.outputs.container_name }}
5151
run: |
5252
python3 -m pip install pyyaml
5353
TRT_VERSION=$(python3 -c "import versions; versions.tensorrt_version()")
5454
echo "TRT VERSION = ${TRT_VERSION}"
55-
CUDNN_VERSION=$(python3 -c "import versions; versions.cudnn_version()")
56-
echo "CUDNN VERSION = ${CUDNN_VERSION}"
5755
58-
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=$TRT_VERSION --build-arg CUDNN_VERSION=$CUDNN_VERSION -f docker/Dockerfile --tag $DOCKER_TAG .
56+
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=$TRT_VERSION -f docker/Dockerfile --tag $DOCKER_TAG .
5957
6058
- name: Push Docker image
6159
env:

README.md

+5-21
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Torch-TensorRT is distributed in the ready-to-run NVIDIA [NGC PyTorch Container]
1919

2020
## Building a docker container for Torch-TensorRT
2121

22-
We provide a `Dockerfile` in `docker/` directory. It expects a PyTorch NGC container as a base but can easily be modified to build on top of any container that provides, PyTorch, CUDA, cuDNN and TensorRT. The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
22+
We provide a `Dockerfile` in `docker/` directory. It expects a PyTorch NGC container as a base but can easily be modified to build on top of any container that provides, PyTorch, CUDA, and TensorRT. The dependency libraries in the container can be found in the <a href="https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html">release notes</a>.
2323

2424
Please follow this instruction to build a Docker container.
2525

@@ -152,14 +152,13 @@ bash ./compile.sh
152152
You need to start by having CUDA installed on the system, LibTorch will automatically be pulled for you by bazel,
153153
then you have two options.
154154

155-
#### 1. Building using cuDNN & TensorRT tarball distributions
155+
#### 1. Building using TensorRT tarball distributions
156156

157157
> This is recommended so as to build Torch-TensorRT hermetically and insures any bugs are not caused by version issues
158158
159159
> Make sure when running Torch-TensorRT that these versions of the libraries are prioritized in your `$LD_LIBRARY_PATH`
160160
161-
1. You need to download the tarball distributions of TensorRT and cuDNN from the NVIDIA website.
162-
- https://developer.nvidia.com/cudnn
161+
1. You need to download the tarball distributions of TensorRT from the NVIDIA website.
163162
- https://developer.nvidia.com/tensorrt
164163
2. Place these files in a directory (the directories `third_party/dist_dir/[x86_64-linux-gnu | aarch64-linux-gnu]` exist for this purpose)
165164
3. Compile using:
@@ -168,25 +167,16 @@ then you have two options.
168167
bazel build //:libtorchtrt --compilation_mode opt --distdir third_party/dist_dir/[x86_64-linux-gnu | aarch64-linux-gnu]
169168
```
170169

171-
#### 2. Building using locally installed cuDNN & TensorRT
170+
#### 2. Building using locally installed TensorRT
172171

173172
> If you find bugs and you compiled using this method please disclose you used this method in the issue
174173
> (an `ldd` dump would be nice too)
175174
176-
1. Install TensorRT, CUDA and cuDNN on the system before starting to compile.
175+
1. Install TensorRT and CUDA on the system before starting to compile.
177176
2. In `WORKSPACE` comment out
178177

179178
```py
180179
# Downloaded distributions to use with --distdir
181-
http_archive(
182-
name = "cudnn",
183-
urls = ["<URL>",],
184-
185-
build_file = "@//third_party/cudnn/archive:BUILD",
186-
sha256 = "<TAR SHA256>",
187-
strip_prefix = "cuda"
188-
)
189-
190180
http_archive(
191181
name = "tensorrt",
192182
urls = ["<URL>",],
@@ -201,12 +191,6 @@ and uncomment
201191

202192
```py
203193
# Locally installed dependencies
204-
new_local_repository(
205-
name = "cudnn",
206-
path = "/usr/",
207-
build_file = "@//third_party/cudnn/local:BUILD"
208-
)
209-
210194
new_local_repository(
211195
name = "tensorrt",
212196
path = "/usr/",

cmake/Modules/FindcuDNN.cmake

-243
This file was deleted.

cmake/dependencies.cmake

-2
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@ endif()
77

88
# If the custom finders are needed at this point, there are good chances that they will be needed when consuming the library as well
99
install(FILES "${CMAKE_SOURCE_DIR}/cmake/Modules/FindTensorRT.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/torchtrt/Modules")
10-
install(FILES "${CMAKE_SOURCE_DIR}/cmake/Modules/FindcuDNN.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/torchtrt/Modules")
1110

1211
# CUDA
1312
find_package(CUDAToolkit REQUIRED)
14-
find_package(cuDNN REQUIRED) # Headers are needed somewhere
1513

1614
# libtorch
1715
find_package(Torch REQUIRED)

core/plugins/CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ target_link_libraries(${lib_name}
2323
TensorRT::nvinfer_plugin
2424
torch
2525
core_util
26-
cuDNN::cuDNN
2726
PRIVATE
2827
Threads::Threads
2928
)

dev_dep_versions.yml

-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
11
__version__: "2.4.0.dev0"
22
__cuda_version__: "12.1"
3-
__cudnn_version__: "8.9"
43
__tensorrt_version__: "10.0.1"

docker/Dockerfile

+1-5
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,6 @@ ENV BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
88
ARG TENSORRT_VERSION
99
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
1010
RUN test -n "$TENSORRT_VERSION" || (echo "No tensorrt version specified, please use --build-arg TENSORRT_VERSION=x.y to specify a version." && exit 1)
11-
ARG CUDNN_VERSION
12-
ENV CUDNN_VERSION=${CUDNN_VERSION}
13-
RUN test -n "$CUDNN_VERSION" || (echo "No cudnn version specified, please use --build-arg CUDNN_VERSION=x.y to specify a version." && exit 1)
1411

1512
ARG PYTHON_VERSION=3.10
1613
ENV PYTHON_VERSION=${PYTHON_VERSION}
@@ -35,13 +32,12 @@ RUN wget -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-instal
3532
RUN pyenv install -v ${PYTHON_VERSION}
3633
RUN pyenv global ${PYTHON_VERSION}
3734

38-
# Install CUDNN + TensorRT + dependencies
35+
# Install TensorRT + dependencies
3936
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
4037
RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
4138
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub
4239
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
4340
RUN apt-get update
44-
RUN apt-get install -y libcudnn8=${CUDNN_VERSION}* libcudnn8-dev=${CUDNN_VERSION}*
4541

4642
RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
4743
RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"

0 commit comments

Comments
 (0)