Skip to content

Commit 19a11c2

Browse files
authored
Merge pull request #2432 from gs-olive/segfault_fix
2 parents c2e48c8 + 23bb893 commit 19a11c2

File tree

7 files changed

+15
-22
lines changed

7 files changed

+15
-22
lines changed

.circleci/config.yml

+6-6
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ commands:
109109
sudo docker run --rm --runtime=nvidia --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi
110110
111111
install-cudnn:
112-
description: "Install CUDNN 8.8.1"
112+
description: "Install CUDNN 8.9.5"
113113
parameters:
114114
os:
115115
type: string
@@ -119,7 +119,7 @@ commands:
119119
default: "x86_64"
120120
cudnn-version:
121121
type: string
122-
default: "8.8.1.3"
122+
default: "8.9.5.30"
123123
cuda-version:
124124
type: string
125125
default: "cuda12.0"
@@ -198,7 +198,7 @@ commands:
198198
default: "cuda12.0"
199199
cudnn-version:
200200
type: string
201-
default: "8.8.1.3"
201+
default: "8.9.5.30"
202202
trt-version-short:
203203
type: string
204204
default: "8.6.1"
@@ -246,7 +246,7 @@ commands:
246246
default: "8.6.1"
247247
cudnn-version-long:
248248
type: string
249-
default: "8.8.1.3"
249+
default: "8.9.5.30"
250250
steps:
251251
- run:
252252
name: Set up python environment
@@ -1460,7 +1460,7 @@ parameters:
14601460
default: "https://download.pytorch.org/whl/nightly/cu121"
14611461
cudnn-version:
14621462
type: string
1463-
default: "8.8.1.3"
1463+
default: "8.9.5.30"
14641464
trt-version-short:
14651465
type: string
14661466
default: "8.6.1"
@@ -1483,7 +1483,7 @@ parameters:
14831483
default: "https://download.pytorch.org/whl/cu117"
14841484
cudnn-version-legacy:
14851485
type: string
1486-
default: "8.8.1.3"
1486+
default: "8.9.5.30"
14871487
trt-version-short-legacy:
14881488
type: string
14891489
default: "8.6.1"

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
118118
- Bazel 5.2.0
119119
- Libtorch 2.2.0.dev (latest nightly) (built with CUDA 12.1)
120120
- CUDA 12.1
121-
- cuDNN 8.8.1
121+
- cuDNN 8.9.5
122122
- TensorRT 8.6.1
123123

124124
## Prebuilt Binaries and Wheel files

WORKSPACE

+3-3
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,10 @@ http_archive(
7171
http_archive(
7272
name = "cudnn",
7373
build_file = "@//third_party/cudnn/archive:BUILD",
74-
sha256 = "79d77a769c7e7175abc7b5c2ed5c494148c0618a864138722c887f95c623777c",
75-
strip_prefix = "cudnn-linux-x86_64-8.8.1.3_cuda12-archive",
74+
sha256 = "2a2eb89a2ab51071151c6082f1e816c702167a711a9372f9f73a7b5c4b06e01a",
75+
strip_prefix = "cudnn-linux-x86_64-8.9.5.30_cuda12-archive",
7676
urls = [
77-
"https://developer.nvidia.com/downloads/compute/cudnn/secure/8.8.1/local_installers/12.0/cudnn-linux-x86_64-8.8.1.3_cuda12-archive.tar.xz",
77+
"https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.5/local_installers/12.x/cudnn-linux-x86_64-8.9.5.30_cuda12-archive.tar.xz",
7878
],
7979
)
8080

dev_dep_versions.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
__version__: "2.2.0.dev0"
22
__cuda_version__: "12.1"
3-
__cudnn_version__: "8.8"
3+
__cudnn_version__: "8.9"
44
__tensorrt_version__: "8.6"

docker/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
1717

1818
### Instructions
1919

20-
- The example below uses CUDNN 8.8 and TensorRT 8.6
20+
- The example below uses CUDNN 8.9 and TensorRT 8.6
2121
- See <a href="https://github.com/pytorch/TensorRT#dependencies">dependencies</a> for a list of current default dependencies.
2222

2323
> From root of Torch-TensorRT repo
2424
2525
Build:
2626
```
27-
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.8 -f docker/Dockerfile -t torch_tensorrt:latest .
27+
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.9 -f docker/Dockerfile -t torch_tensorrt:latest .
2828
```
2929

3030
Run:

tools/perf/perf_run.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77
import time
88
import timeit
99
import warnings
10+
from functools import wraps
1011

1112
import numpy as np
1213
import pandas as pd
1314
import tensorrt as trt
1415

1516
# Importing supported Backends
1617
import torch
17-
import torch.backends.cudnn as cudnn
1818
from utils import (
1919
BENCHMARK_MODELS,
2020
parse_backends,
@@ -30,6 +30,7 @@
3030

3131

3232
def run_with_try_except(func):
33+
@wraps(func)
3334
def wrapper_func(*args, **kwargs):
3435
try:
3536
return func(*args, **kwargs)
@@ -527,7 +528,6 @@ def recordStats(backend, timings, precision, batch_size=1, compile_time_s=None):
527528
)
528529
args = arg_parser.parse_args()
529530

530-
cudnn.benchmark = True
531531
# Create random input tensor of certain size
532532
torch.manual_seed(12345)
533533
model_name = "Model"
@@ -542,9 +542,6 @@ def recordStats(backend, timings, precision, batch_size=1, compile_time_s=None):
542542
if os.path.exists(model_name):
543543
print("Loading user provided torchscript model: ", model_name)
544544
model = torch.jit.load(model_name).cuda().eval()
545-
elif model_name in BENCHMARK_MODELS:
546-
print("Loading torchscript model from BENCHMARK_MODELS for: ", model_name)
547-
model = BENCHMARK_MODELS[model_name]["model"].eval().cuda()
548545

549546
# Load PyTorch Model, if provided
550547
if len(model_name_torch) > 0 and os.path.exists(model_name_torch):

tools/perf/utils.py

-4
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,8 @@
1-
from typing import Optional, Sequence, Union
2-
31
import custom_models as cm
42
import timm
53
import torch
64
import torchvision.models as models
75

8-
import torch_tensorrt
9-
106
BENCHMARK_MODEL_NAMES = {
117
"vgg16",
128
"alexnet",

0 commit comments

Comments
 (0)