Skip to content

Commit 71c4dcb

Browse files
committed
feat!: Updating versions of CUDA, cuDNN, TensorRT and PyTorch
BREAKING CHANGE: PyTorch version has been bumped to 1.8.0 Default CUDA version is CUDA 11.1 TensorRT version is TensorRT 7.2.3.4 cuDNN version is now cuDNN 8.1 Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 6bb9fbf commit 71c4dcb

14 files changed

+75
-75
lines changed

Diff for: README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,11 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts")
7373
### Dependencies
7474
These are the following dependencies used to verify the testcases. TRTorch can work with other versions, but the tests are not guaranteed to pass.
7575

76-
- Bazel 3.7.0
77-
- Libtorch 1.7.x (built with CUDA 11.0)
78-
- CUDA 11.0 (10.2 on Jetson)
79-
- cuDNN 8
80-
- TensorRT 7.2
76+
- Bazel 4.0.0
77+
- Libtorch 1.8.0 (built with CUDA 11.1)
78+
- CUDA 11.1 (10.2 on Jetson)
79+
- cuDNN 8.1
80+
- TensorRT 7.2.3
8181

8282
## Prebuilt Binaries and Wheel files
8383

Diff for: WORKSPACE

+10-10
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ git_repository(
3535
# CUDA should be installed on the system locally
3636
new_local_repository(
3737
name = "cuda",
38-
path = "/usr/local/cuda-11.0/",
38+
path = "/usr/local/cuda-11.1/",
3939
build_file = "@//third_party/cuda:BUILD",
4040
)
4141

@@ -53,16 +53,16 @@ http_archive(
5353
name = "libtorch",
5454
build_file = "@//third_party/libtorch:BUILD",
5555
strip_prefix = "libtorch",
56-
sha256 = "117f6dd65b7267839197397edd0b10fd2900b0f291e3e54b0b800caefc31bcb6",
57-
urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-cxx11-abi-shared-with-deps-1.7.1%2Bcu110.zip"],
56+
sha256 = "62a2c06761c32576b30f5884240cf675b937945d929e4b13cc776de8d9c2236c",
57+
urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-cxx11-abi-shared-with-deps-1.8.0%2Bcu111.zip"],
5858
)
5959

6060
http_archive(
6161
name = "libtorch_pre_cxx11_abi",
6262
build_file = "@//third_party/libtorch:BUILD",
6363
strip_prefix = "libtorch",
64-
sha256 = "c77f926afd55d7e860ec9c7abc992c25be77c89771c3ec6fcc13ea42f07d46df",
65-
urls = ["https://download.pytorch.org/libtorch/cu110/libtorch-shared-with-deps-1.7.1%2Bcu110.zip"],
64+
sha256 = "1c8b0c0883dd17f5ce952d42ec5f7f0cc7ceb370307535cee26a66c10419f1f6",
65+
urls = ["https://download.pytorch.org/libtorch/cu111/libtorch-shared-with-deps-1.8.0%2Bcu111.zip"],
6666
)
6767

6868
# Download these tarballs manually from the NVIDIA website
@@ -71,18 +71,18 @@ http_archive(
7171

7272
http_archive(
7373
name = "cudnn",
74-
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.0.5/11.0_20201106/cudnn-11.0-linux-x64-v8.0.5.39.tgz",],
74+
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.1.1.33/11.2_20210301/cudnn-11.2-linux-x64-v8.1.1.33.tgz",],
7575
build_file = "@//third_party/cudnn/archive:BUILD",
76-
sha256 = "4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73",
76+
sha256 = "98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a",
7777
strip_prefix = "cuda"
7878
)
7979

8080
http_archive(
8181
name = "tensorrt",
82-
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.2/tars/TensorRT-7.2.2.3.Ubuntu-18.04.x86_64-gnu.cuda-11.0.cudnn8.0.tar.gz",],
82+
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.3/tars/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz",],
8383
build_file = "@//third_party/tensorrt/archive:BUILD",
84-
strip_prefix = "TensorRT-7.2.2.3",
85-
sha256 = "b5c325e38e1d92ce1ce92ca8b54ede9c224bf128c9a53eb0b9022f1ee4313ee0"
84+
strip_prefix = "TensorRT-7.2.3.4",
85+
sha256 = "d3a1f478e304b48878604fac70ce7920fece71f9cac62f925c9c59c197f5d087"
8686
)
8787

8888
####################################################################################

Diff for: core/util/trt_util.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ nvinfer1::DataType toTRTDataType(at::ScalarType t) {
295295
}
296296

297297
c10::optional<nvinfer1::DataType> toTRTDataType(caffe2::TypeMeta dtype) {
298-
if (auto t = c10::tryTypeMetaToScalarType(dtype)) {
298+
if (auto t = c10::optTypeMetaToScalarType(dtype)) {
299299
return toTRTDataType(t.value());
300300
} else {
301301
return {};

Diff for: cpp/api/include/trtorch/trtorch.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ struct Module;
2323
} // namespace torch
2424

2525
namespace c10 {
26-
enum class DeviceType : int16_t;
26+
enum class DeviceType : int8_t;
2727
enum class ScalarType : int8_t;
2828
template <class>
2929
class ArrayRef;

Diff for: py/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
torch==1.7.1
1+
torch==1.8.0

Diff for: py/setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ def run(self):
204204
long_description=long_description,
205205
ext_modules=ext_modules,
206206
install_requires=[
207-
'torch>=1.7.0,<1.8.0',
207+
'torch>=1.8.0+cu111,<1.9.0',
208208
],
209209
setup_requires=[],
210210
cmdclass={

Diff for: tests/core/conversion/converters/converter_test.bzl

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,5 +12,5 @@ def converter_test(name, visibility=None):
1212
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
1313
"//conditions:default": ["@libtorch//:libtorch"],
1414
}),
15-
timeout="short"
15+
timeout="moderate"
1616
)

Diff for: tests/core/conversion/converters/test_activation.cpp

+20-20
Original file line numberDiff line numberDiff line change
@@ -66,28 +66,28 @@ TEST(Converters, ATenTanhConvertsCorrectly) {
6666

6767
// TODO: Seems like the IR parser is not handling negative numbers well, need to
6868
// follow up with the PyTorch Team
69-
// TEST(Converters, ATenHardTanhConvertsCorrectly) {
70-
// const auto graph = R"IR(
71-
// graph(%0 : Tensor):
72-
// %1 : float = prim::Constant[value=-1.0]()
73-
// %2 : float = prim::Constant[value=1.0]()
74-
// %3 : Tensor = aten::hardtanh(%0, %1, %2)
75-
// return (%3))IR";
69+
TEST(Converters, ATenHardTanhConvertsCorrectly) {
70+
const auto graph = R"IR(
71+
graph(%0 : Tensor):
72+
%1 : float = prim::Constant[value=-1.0]()
73+
%2 : float = prim::Constant[value=1.0]()
74+
%3 : Tensor = aten::hardtanh(%0, %1, %2)
75+
return (%3))IR";
7676

77-
// auto g = std::make_shared<torch::jit::Graph>();
78-
// torch::jit::script::parseIR(graph, &*g);
77+
auto g = std::make_shared<torch::jit::Graph>();
78+
torch::jit::parseIR(graph, &*g);
7979

80-
// auto in = at::randint(-5, 5, {5}, {at::kCUDA});
81-
// auto params = trtorch::core::conversion::get_named_params(g->inputs(),
82-
// {}); auto jit_results = trtorch::tests::util::RunGraph(g, params, {in});
80+
auto in = at::randint(-5, 5, {5}, {at::kCUDA});
81+
auto params = trtorch::core::conversion::get_named_params(g->inputs(), {});
82+
auto jit_results = trtorch::tests::util::RunGraph(g, params, {in});
8383

84-
// in = at::clone(in);
85-
// params = trtorch::core::conversion::get_named_params(g->inputs(), {});
86-
// auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in});
84+
in = at::clone(in);
85+
params = trtorch::core::conversion::get_named_params(g->inputs(), {});
86+
auto trt_results = trtorch::tests::util::RunGraphEngine(g, params, {in});
8787

88-
// ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0],
89-
// trt_results[0], 2e-6));
90-
// }
88+
ASSERT_TRUE(trtorch::tests::util::almostEqual(jit_results[0],
89+
trt_results[0], 2e-6));
90+
}
9191

9292
TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) {
9393
const auto graph = R"IR(
@@ -114,7 +114,7 @@ TEST(Converters, ATenHardTanhCustomRangeConvertsCorrectly) {
114114
TEST(Converters, ATenPReLUConvertsCorrectly) {
115115
const auto graph = R"IR(
116116
graph(%0 : Tensor,
117-
%1 : Float(1:1)):
117+
%1 : Float(1, strides=[1])):
118118
%3 : Tensor = aten::prelu(%0, %1)
119119
return (%3))IR";
120120

@@ -137,7 +137,7 @@ TEST(Converters, ATenPReLUConvertsCorrectly) {
137137
TEST(Converters, ATenPReLUMultiChannelConvertsCorrectly) {
138138
const auto graph = R"IR(
139139
graph(%0 : Tensor,
140-
%1 : Float(10:1)):
140+
%1 : Float(10, strides=[1])):
141141
%3 : Tensor = aten::prelu(%0, %1)
142142
return (%3))IR";
143143

Diff for: tests/core/conversion/converters/test_batch_norm.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77
TEST(Converters, ATenBatchNormConvertsCorrectly) {
88
const auto graph = R"IR(
99
graph(%0 : Tensor,
10-
%1: Float(5:1),
11-
%2: Float(5:1),
12-
%3: Float(5:1),
13-
%4: Float(5:1)):
10+
%1: Float(5, strides=[1]),
11+
%2: Float(5, strides=[1]),
12+
%3: Float(5, strides=[1]),
13+
%4: Float(5, strides=[1])):
1414
%5 : bool = prim::Constant[value=0]()
1515
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
1616
%7 : float = prim::Constant[value=0.10000000000000001]()

Diff for: tests/core/conversion/converters/test_conv_deconv.cpp

+25-25
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ void conv_test_helper(std::string graph_ir) {
3939
TEST(Converters, ATenConvolutionConvertsCorrectly) {
4040
const auto graph = R"IR(
4141
graph(%0 : Tensor,
42-
%1 : Float(8:45, 3:15, 5:5, 5:1),
43-
%2 : Float(8:1)):
42+
%1 : Float(8, 3, 5, 5, strides=[45, 15, 5, 1]),
43+
%2 : Float(8)):
4444
%3 : int = prim::Constant[value=1]()
4545
%4 : int = prim::Constant[value=0]()
4646
%5 : int = prim::Constant[value=1]()
@@ -81,7 +81,7 @@ TEST(Converters, ATenConvolutionConvertsCorrectly) {
8181
TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) {
8282
const auto graph = R"IR(
8383
graph(%0 : Tensor,
84-
%1 : Float(4:9, 1:9, 3:3, 3:1)):
84+
%1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])):
8585
%2 : None = prim::Constant()
8686
%3 : int = prim::Constant[value=1]()
8787
%4 : int = prim::Constant[value=0]()
@@ -119,8 +119,8 @@ TEST(Converters, ATenConvolutionNoBiasConvertsCorrectly) {
119119
TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) {
120120
const auto graph = R"IR(
121121
graph(%0 : Tensor,
122-
%1 : Float(4:27, 3:9, 3:3, 3:1),
123-
%2 : Float(4:1)):
122+
%1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]),
123+
%2 : Float(4)):
124124
%3 : int = prim::Constant[value=3]()
125125
%4 : int = prim::Constant[value=0]()
126126
%5 : int = prim::Constant[value=1]()
@@ -162,8 +162,8 @@ TEST(Converters, ATenConvolutionWithStrideConvertsCorrectly) {
162162
TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) {
163163
const auto graph = R"IR(
164164
graph(%0 : Tensor,
165-
%1 : Float(4:48, 3:16, 4:4, 4:1),
166-
%2 : Float(4:1)):
165+
%1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]),
166+
%2 : Float(4)):
167167
%3 : int = prim::Constant[value=1]()
168168
%4 : int = prim::Constant[value=2]()
169169
%5 : int = prim::Constant[value=1]()
@@ -205,8 +205,8 @@ TEST(Converters, ATenConvolutionWithPaddingConvertsCorrectly) {
205205
TEST(Converters, ATenConvolution3dConvertsCorrectly) {
206206
const auto graph = R"IR(
207207
graph(%0 : Tensor,
208-
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
209-
%2 : Float(32:1)):
208+
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
209+
%2 : Float(32)):
210210
%sv : int = prim::Constant[value=1]()
211211
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
212212
%pv : int = prim::Constant[value=0]()
@@ -247,7 +247,7 @@ TEST(Converters, ATenConvolution3dConvertsCorrectly) {
247247
TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) {
248248
const auto graph = R"IR(
249249
graph(%0 : Tensor,
250-
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1)):
250+
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1])):
251251
%bias : None = prim::Constant()
252252
%sv : int = prim::Constant[value=1]()
253253
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
@@ -285,8 +285,8 @@ TEST(Converters, ATenConvolution3dNoBiasConvertsCorrectly) {
285285
TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) {
286286
const auto graph = R"IR(
287287
graph(%0 : Tensor,
288-
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
289-
%2 : Float(32:1)):
288+
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
289+
%2 : Float(32)):
290290
%sv : int = prim::Constant[value=1]()
291291
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
292292
%pv : int = prim::Constant[value=1]()
@@ -327,8 +327,8 @@ TEST(Converters, ATenConvolution3dWithPaddingConvertsCorrectly) {
327327
TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) {
328328
const auto graph = R"IR(
329329
graph(%0 : Tensor,
330-
%1 : Float(32:81, 3:27, 3:9, 3:3, 3:1),
331-
%2 : Float(32:1)):
330+
%1 : Float(32, 3, 3, 3, 3, strides=[81, 27, 9, 3, 1]),
331+
%2 : Float(32)):
332332
%sv : int = prim::Constant[value=2]()
333333
%s : int[] = prim::ListConstruct(%sv, %sv, %sv)
334334
%pv : int = prim::Constant[value=1]()
@@ -369,8 +369,8 @@ TEST(Converters, ATenConvolution3dWithStrideDilationConvertsCorrectly) {
369369
TEST(Converters, ATenConvTransposeConvertsCorrectly) {
370370
const auto graph = R"IR(
371371
graph(%0 : Tensor,
372-
%1 : Float(8:27, 3:9, 3:3, 3:1),
373-
%2 : Float(8:1)):
372+
%1 : Float(8, 3, 3, 3, strides=[27, 9, 3, 1]),
373+
%2 : Float(8)):
374374
%3 : int = prim::Constant[value=1]()
375375
%4 : int = prim::Constant[value=0]()
376376
%5 : int = prim::Constant[value=1]()
@@ -411,7 +411,7 @@ TEST(Converters, ATenConvTransposeConvertsCorrectly) {
411411
TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) {
412412
const auto graph = R"IR(
413413
graph(%0 : Tensor,
414-
%1 : Float(4:9, 1:9, 3:3, 3:1)):
414+
%1 : Float(4, 1, 3, 3, strides=[9, 9, 3, 1])):
415415
%2 : None = prim::Constant()
416416
%3 : int = prim::Constant[value=1]()
417417
%4 : int = prim::Constant[value=0]()
@@ -449,8 +449,8 @@ TEST(Converters, ATenConvTransposeNoBiasConvertsCorrectly) {
449449
TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) {
450450
const auto graph = R"IR(
451451
graph(%0 : Tensor,
452-
%1 : Float(4:27, 3:9, 3:3, 3:1),
453-
%2 : Float(4:1)):
452+
%1 : Float(4, 3, 3, 3, strides=[27, 9, 3, 1]),
453+
%2 : Float(4)):
454454
%3 : int = prim::Constant[value=3]()
455455
%4 : int = prim::Constant[value=0]()
456456
%5 : int = prim::Constant[value=1]()
@@ -492,8 +492,8 @@ TEST(Converters, ATenConvTransposeWithStrideConvertsCorrectly) {
492492
TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) {
493493
const auto graph = R"IR(
494494
graph(%0 : Tensor,
495-
%1 : Float(4:48, 3:16, 4:4, 4:1),
496-
%2 : Float(4:1)):
495+
%1 : Float(4, 3, 4, 4, strides=[48, 16, 4, 1]),
496+
%2 : Float(4)):
497497
%3 : int = prim::Constant[value=1]()
498498
%4 : int = prim::Constant[value=2]()
499499
%5 : int = prim::Constant[value=1]()
@@ -535,8 +535,8 @@ TEST(Converters, ATenConvTransposeWithPaddingConvertsCorrectly) {
535535
TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) {
536536
const auto graph = R"IR(
537537
graph(%0 : Tensor,
538-
%1 : Float(8:48, 1:16, 2:4, 2:1),
539-
%2 : Float(8:1)):
538+
%1 : Float(8, 1, 2, 2, strides=[48, 16, 4, 1]),
539+
%2 : Float(8)):
540540
%3 : int = prim::Constant[value=1]()
541541
%4 : int = prim::Constant[value=2]()
542542
%5 : int = prim::Constant[value=1]()
@@ -578,8 +578,8 @@ TEST(Converters, ATenConvolutionWithGroupConvertsCorrectly) {
578578
TEST(Converters, ATenConvTransposeWithGroupConvertsCorrectly) {
579579
const auto graph = R"IR(
580580
graph(%0 : Tensor,
581-
%1 : Float(8:56, 4:16, 3:3, 3:1),
582-
%2 : Float(16:1)):
581+
%1 : Float(8, 4, 3, 3, strides=[56, 16, 3, 1]),
582+
%2 : Float(16)):
583583
%3 : int = prim::Constant[value=1]()
584584
%4 : int = prim::Constant[value=1]()
585585
%5 : int = prim::Constant[value=1]()

Diff for: tests/core/conversion/converters/test_linear.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
TEST(Converters, ATenLinearNoBiasConvertsCorrectly) {
88
const auto graph = R"IR(
99
graph(%0 : Tensor,
10-
%1 : Float(3:2, 2:1)):
10+
%1 : Float(3, 2, strides=[2, 1])):
1111
%2 : None = prim::Constant()
1212
%3 : Tensor = aten::linear(%0, %1, %2)
1313
return (%3))IR";
@@ -33,8 +33,8 @@ TEST(Converters, ATenLinearNoBiasConvertsCorrectly) {
3333
TEST(Converters, ATenLinearBiasConvertsCorrectly) {
3434
const auto graph = R"IR(
3535
graph(%0 : Tensor,
36-
%1 : Float(2:3, 3:1),
37-
%2 : Float(2:1)):
36+
%1 : Float(2, 3, strides=[3, 1]),
37+
%2 : Float(2)):
3838
%3 : Tensor = aten::linear(%0, %1, %2)
3939
return (%3))IR";
4040

Diff for: tests/core/conversion/converters/test_select.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ TEST(Converters, ATenNarrowStartScalarConvertsCorrectly) {
8888

8989
TEST(Converters, ATenEmbeddingConvertsCorrectly) {
9090
const auto graph = R"IR(
91-
graph(%1 : Tensor, %emb_weight : Float(10:3, 3:1)):
91+
graph(%1 : Tensor, %emb_weight : Float(10, 3, strides=[3, 1])):
9292
%2 : bool = prim::Constant[value=0]()
9393
%3 : int = prim::Constant[value=-1]()
9494
%5 : Tensor = aten::embedding(%emb_weight, %1, %3, %2, %2)

Diff for: tests/core/conversion/converters/test_stack.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ TEST(Converters, ATenStackPureTensorConvertsCorrectly) {
3131
TEST(Converters, ATenStackDiffTensorConvertsCorrectly) {
3232
const auto graph = R"IR(
3333
graph(%0 : Tensor,
34-
%1 : Float(4:16, 4:4, 4:1)):
34+
%1 : Float(4, 4, 4, strides=[16, 4, 1])):
3535
%2 : Tensor[] = prim::ListConstruct(%0, %1)
3636
%3 : int = prim::Constant[value=1]()
3737
%4 : Tensor = aten::stack(%2, %3)

Diff for: tests/py/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
torchvision==0.8.2
1+
torchvision==0.9.0

0 commit comments

Comments
 (0)