Skip to content

Commit e8514f2

Browse files
authored
Merge pull request #2574 from pytorch/staging_rel_2_2
cherry-pick: Port most changes from `main`
2 parents b6dd22b + db12a8e commit e8514f2

File tree

90 files changed

+4242
-702
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+4242
-702
lines changed

.github/scripts/install-torch-tensorrt.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
set -eou pipefail
33
# Source conda so it's available to the script environment
44
source ${BUILD_ENV_FILE}
5-
${CONDA_RUN} ${PIP_INSTALL_TORCH} torch==2.1.2 torchvision==0.16.2 pyyaml
5+
${CONDA_RUN} ${PIP_INSTALL_TORCH} torch==2.2.0 torchvision==0.17.0 pyyaml
66
export TRT_VERSION=$(${CONDA_RUN} python -c "import versions; versions.tensorrt_version()")
77
${CONDA_RUN} python -m pip install /opt/torch-tensorrt-builds/torch_tensorrt*+${CU_VERSION}*.whl tensorrt~=${TRT_VERSION} tensorrt-bindings~=${TRT_VERSION} --extra-index-url=https://pypi.ngc.nvidia.com
88

.github/workflows/build-test.yml

+21-16
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,12 @@ on:
1515

1616
jobs:
1717
generate-matrix:
18-
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@release/2.1
18+
uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@release/2.2
1919
with:
2020
package-type: wheel
2121
os: linux
22-
test-infra-repository: pytorch/test-infra
23-
test-infra-ref: release/2.1
22+
test-infra-repository: gs-olive/test-infra
23+
test-infra-ref: release/2.2
2424
channel: test
2525
with-rocm: false
2626
with-cpu: false
@@ -38,12 +38,12 @@ jobs:
3838
smoke-test-script: ""
3939
package-name: torch_tensorrt
4040
name: Build torch-tensorrt whl package
41-
uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@release/2.1
41+
uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@release/2.2
4242
with:
4343
repository: ${{ matrix.repository }}
4444
ref: ""
45-
test-infra-repository: pytorch/test-infra
46-
test-infra-ref: release/2.1
45+
test-infra-repository: gs-olive/test-infra
46+
test-infra-ref: release/2.2
4747
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
4848
pre-script: ${{ matrix.pre-script }}
4949
env-var-script: ${{ matrix.env-var-script }}
@@ -57,6 +57,7 @@ jobs:
5757

5858
tests-py-torchscript-fe:
5959
name: Test torchscript frontend [Python]
60+
if: success() || failure()
6061
needs: [generate-matrix, build]
6162
strategy:
6263
fail-fast: false
@@ -70,8 +71,8 @@ jobs:
7071
job-name: tests-py-torchscript-fe
7172
repository: "pytorch/tensorrt"
7273
ref: ""
73-
test-infra-repository: pytorch/test-infra
74-
test-infra-ref: release/2.1
74+
test-infra-repository: gs-olive/test-infra
75+
test-infra-ref: release/2.2
7576
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
7677
pre-script: ${{ matrix.pre-script }}
7778
script: |
@@ -92,6 +93,7 @@ jobs:
9293
9394
tests-py-dynamo-converters:
9495
name: Test dynamo converters [Python]
96+
if: success() || failure()
9597
needs: [generate-matrix, build]
9698
strategy:
9799
fail-fast: false
@@ -105,8 +107,8 @@ jobs:
105107
job-name: tests-py-dynamo-converters
106108
repository: "pytorch/tensorrt"
107109
ref: ""
108-
test-infra-repository: pytorch/test-infra
109-
test-infra-ref: release/2.1
110+
test-infra-repository: gs-olive/test-infra
111+
test-infra-ref: release/2.2
110112
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
111113
pre-script: ${{ matrix.pre-script }}
112114
script: |
@@ -119,6 +121,7 @@ jobs:
119121
120122
tests-py-dynamo-fe:
121123
name: Test dynamo frontend [Python]
124+
if: success() || failure()
122125
needs: [generate-matrix, build]
123126
strategy:
124127
fail-fast: false
@@ -132,8 +135,8 @@ jobs:
132135
job-name: tests-py-dynamo-fe
133136
repository: "pytorch/tensorrt"
134137
ref: ""
135-
test-infra-repository: pytorch/test-infra
136-
test-infra-ref: release/2.1
138+
test-infra-repository: gs-olive/test-infra
139+
test-infra-ref: release/2.2
137140
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
138141
pre-script: ${{ matrix.pre-script }}
139142
script: |
@@ -148,6 +151,7 @@ jobs:
148151
149152
tests-py-torch-compile-be:
150153
name: Test torch compile backend [Python]
154+
if: success() || failure()
151155
needs: [generate-matrix, build]
152156
strategy:
153157
fail-fast: false
@@ -161,8 +165,8 @@ jobs:
161165
job-name: tests-py-torch-compile-be
162166
repository: "pytorch/tensorrt"
163167
ref: ""
164-
test-infra-repository: pytorch/test-infra
165-
test-infra-ref: release/2.1
168+
test-infra-repository: gs-olive/test-infra
169+
test-infra-ref: release/2.2
166170
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
167171
pre-script: ${{ matrix.pre-script }}
168172
script: |
@@ -176,6 +180,7 @@ jobs:
176180
177181
tests-py-dynamo-core:
178182
name: Test dynamo core [Python]
183+
if: success() || failure()
179184
needs: [generate-matrix, build]
180185
strategy:
181186
fail-fast: false
@@ -189,8 +194,8 @@ jobs:
189194
job-name: tests-py-dynamo-core
190195
repository: "pytorch/tensorrt"
191196
ref: ""
192-
test-infra-repository: pytorch/test-infra
193-
test-infra-ref: release/2.1
197+
test-infra-repository: gs-olive/test-infra
198+
test-infra-ref: release/2.2
194199
build-matrix: ${{ needs.generate-matrix.outputs.matrix }}
195200
pre-script: ${{ matrix.pre-script }}
196201
script: |

.github/workflows/linux-test.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ on:
2121
type: string
2222
test-infra-repository:
2323
description: "Test infra repository to use"
24-
default: "pytorch/test-infra"
24+
default: "gs-olive/test-infra"
2525
type: string
2626
test-infra-ref:
2727
description: "Test infra reference to use"

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
116116
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
117117

118118
- Bazel 6.2.1
119-
- Libtorch 2.1.1
119+
- Libtorch 2.2.0
120120
- CUDA 12.1
121121
- cuDNN 8.9.5
122122
- TensorRT 8.6.1

WORKSPACE

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,14 +54,14 @@ http_archive(
5454
name = "libtorch",
5555
build_file = "@//third_party/libtorch:BUILD",
5656
strip_prefix = "libtorch",
57-
urls = ["https://download.pytorch.org/libtorch/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.1%2Bcu121.zip"],
57+
urls = ["https://download.pytorch.org/libtorch/test/cu121/libtorch-cxx11-abi-shared-with-deps-2.2.0%2Bcu121.zip"],
5858
)
5959

6060
http_archive(
6161
name = "libtorch_pre_cxx11_abi",
6262
build_file = "@//third_party/libtorch:BUILD",
6363
strip_prefix = "libtorch",
64-
urls = ["https://download.pytorch.org/libtorch/cu121/libtorch-shared-with-deps-2.1.1%2Bcu121.zip"],
64+
urls = ["https://download.pytorch.org/libtorch/test/cu121/libtorch-shared-with-deps-2.2.0%2Bcu121.zip"],
6565
)
6666

6767
# Download these tarballs manually from the NVIDIA website

core/conversion/conversionctx/ConversionCtx.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ void ConversionCtx::RecordNewITensor(const torch::jit::Value* value, nvinfer1::I
164164

165165
std::string ConversionCtx::SerializeEngine() {
166166
#if NV_TENSORRT_MAJOR > 7
167-
auto serialized_network = builder->buildSerializedNetwork(*net, *cfg);
167+
auto serialized_network = make_trt(builder->buildSerializedNetwork(*net, *cfg));
168168
if (!serialized_network) {
169169
TORCHTRT_THROW_ERROR("Building serialized network failed in TensorRT");
170170
}

core/conversion/converters/impl/conv_deconv.cpp

+86-61
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,74 @@ namespace converters {
1010
namespace impl {
1111
namespace {
1212

13+
void add_output_padding(nvinfer1::Dims& padding, nvinfer1::Dims& out_padding, bool& has_output_padding) {
14+
int nbSpatialDims = out_padding.nbDims;
15+
// When there is out_padding, if padding is larger than out_padding, just adjust padding Or reduce out_padding as
16+
// minimum as possible.
17+
for (int i = 0; i < nbSpatialDims; ++i) {
18+
if (padding.d[i] - out_padding.d[i] >= 0) {
19+
padding.d[i] -= out_padding.d[i];
20+
out_padding.d[i] = 0;
21+
} else {
22+
// Reduce out_padding as possible.
23+
out_padding.d[i] -= padding.d[i];
24+
padding.d[i] = 0;
25+
has_output_padding = true;
26+
}
27+
}
28+
}
29+
30+
nvinfer1::ILayer* add_bias_layer(
31+
ConversionCtx* ctx,
32+
nvinfer1::ITensor* input_tensor,
33+
nvinfer1::Dims& input_dims,
34+
nvinfer1::Dims& output_padding,
35+
Weights& bias) {
36+
nvinfer1::ITensor* input_shape = ctx->net->addShape(*input_tensor)->getOutput(0);
37+
// Add padding layer
38+
nvinfer1::ITensor* start;
39+
nvinfer1::ITensor* totalPadding;
40+
auto in_nbDims = input_dims.nbDims;
41+
std::vector<int32_t> startVec(in_nbDims, 0);
42+
std::vector<int32_t> totalPaddingVec(in_nbDims, 0);
43+
int32_t diff = in_nbDims - output_padding.nbDims;
44+
for (int32_t i = diff; i < in_nbDims; i++) {
45+
int32_t idx = i - diff;
46+
startVec[i] = 0; // Don't need begin padding, only post padding
47+
totalPaddingVec[i] = output_padding.d[idx];
48+
}
49+
start = tensor_to_const(ctx, torch::tensor(startVec, torch::kInt32));
50+
totalPadding = tensor_to_const(ctx, torch::tensor(totalPaddingVec, torch::kInt32));
51+
52+
const auto size =
53+
ctx->net->addElementWise(*input_shape, *totalPadding, nvinfer1::ElementWiseOperation::kSUM)->getOutput(0);
54+
55+
nvinfer1::Dims stride;
56+
stride.nbDims = in_nbDims;
57+
for (int64_t i = 0; i < in_nbDims; i++) {
58+
stride.d[i] = 1;
59+
}
60+
const auto& dummy = stride;
61+
auto* sliceLayer = ctx->net->addSlice(*input_tensor, dummy, dummy, stride);
62+
sliceLayer->setInput(1, *start);
63+
sliceLayer->setInput(2, *size);
64+
sliceLayer->setMode(nvinfer1::SliceMode::kFILL);
65+
nvinfer1::ITensor* slice_output = sliceLayer->getOutput(0);
66+
67+
nvinfer1::Dims constantDims;
68+
constantDims.nbDims = in_nbDims;
69+
for (int64_t i = 0; i < in_nbDims; i++) {
70+
constantDims.d[i] = 1;
71+
}
72+
constantDims.d[diff - 1] =
73+
bias.shape.d[0]; // Set C dimension to bias dim and other dimensions to 1 to enable broadcast
74+
auto const_layer = ctx->net->addConstant(constantDims, bias.data);
75+
auto bias_layer =
76+
ctx->net->addElementWise(*slice_output, *const_layer->getOutput(0), nvinfer1::ElementWiseOperation::kSUM);
77+
78+
return bias_layer;
79+
}
80+
1381
bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args) {
1482
// Input to conv/deconv
1583
auto in = args[0].ITensor();
@@ -76,16 +144,29 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
76144

77145
nvinfer1::ILayer* layer = nullptr;
78146
if (transposed) {
79-
nvinfer1::IDeconvolutionLayer* deconvLayer =
80-
ctx->net->addDeconvolutionNd(*in, kernel_dims.d[0], filter_dim, kernel_weights, bias.data);
147+
// Fix padding based on output_padding provided
148+
nvinfer1::Dims begPadding = padding;
149+
bool hasOutputPadding = false;
150+
add_output_padding(padding, out_padding, hasOutputPadding);
151+
152+
nvinfer1::IDeconvolutionLayer* deconvLayer = ctx->net->addDeconvolutionNd(
153+
*in, kernel_dims.d[0], filter_dim, kernel_weights, hasOutputPadding ? nvinfer1::Weights{} : bias.data);
81154
deconvLayer->setStrideNd(stride);
82155
deconvLayer->setDilationNd(dilation);
83156
deconvLayer->setNbGroups(groups);
84-
deconvLayer->setPaddingNd(padding);
157+
deconvLayer->setPrePadding(begPadding);
158+
deconvLayer->setPostPadding(padding);
159+
85160
// Set deconv kernel weights
86161
deconvLayer->setInput(1, *kernel);
87162
TORCHTRT_CHECK(deconvLayer, "Unable to create deconv layer with non-const weights from node: " << *n);
88163
layer = deconvLayer;
164+
if (hasOutputPadding) {
165+
LOG_DEBUG("Padding output deconvolution tensor with:" << out_padding);
166+
nvinfer1::ITensor* tensorPtr = deconvLayer->getOutput(0);
167+
auto dims = in->getDimensions();
168+
layer = add_bias_layer(ctx, tensorPtr, dims, out_padding, bias);
169+
}
89170
} else {
90171
nvinfer1::IConvolutionLayer* convLayer =
91172
ctx->net->addConvolutionNd(*in, kernel_dims.d[0], filter_dim, kernel_weights, bias.data);
@@ -155,20 +236,7 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
155236
// https://github.com/onnx/onnx-tensorrt/blob/c3cfcbc8248c6bd007e6630af2085df5e4834b42/builtin_op_importers.cpp#L734
156237
nvinfer1::Dims begPadding = padding;
157238
bool hasOutputPadding = false;
158-
int nbSpatialDims = out_padding.nbDims;
159-
// When there is out_padding, if padding is larger than out_padding, just adjust padding Or reduce out_padding as
160-
// minimum as possible.
161-
for (int i = 0; i < nbSpatialDims; ++i) {
162-
if (padding.d[i] - out_padding.d[i] >= 0) {
163-
padding.d[i] -= out_padding.d[i];
164-
out_padding.d[i] = 0;
165-
} else {
166-
// Reduce out_padding as possible.
167-
out_padding.d[i] -= padding.d[i];
168-
padding.d[i] = 0;
169-
hasOutputPadding = true;
170-
}
171-
}
239+
add_output_padding(padding, out_padding, hasOutputPadding);
172240

173241
// shape of deconvolution's weight: [in, out/groups, ...]
174242
// If there is still output padding, remove the bias. Bias will be added below.
@@ -190,51 +258,8 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
190258
#endif
191259
if (hasOutputPadding) {
192260
LOG_DEBUG("Padding output deconvolution tensor with:" << out_padding);
193-
194-
// Add padding layer
195-
nvinfer1::ITensor* start;
196-
nvinfer1::ITensor* totalPadding;
197-
auto in_nbDims = orig_dims.nbDims;
198-
std::vector<int32_t> startVec(in_nbDims, 0);
199-
std::vector<int32_t> totalPaddingVec(in_nbDims, 0);
200-
int32_t diff = in_nbDims - out_padding.nbDims;
201-
for (int32_t i = diff; i < in_nbDims; i++) {
202-
int32_t idx = i - diff;
203-
startVec[i] = 0; // Don't need begin padding, only post padding
204-
totalPaddingVec[i] = out_padding.d[idx];
205-
}
206-
start = tensor_to_const(ctx, torch::tensor(startVec, torch::kInt32));
207-
totalPadding = tensor_to_const(ctx, torch::tensor(totalPaddingVec, torch::kInt32));
208-
209261
nvinfer1::ITensor* tensorPtr = deconv->getOutput(0);
210-
nvinfer1::ITensor* deconvOutShape = ctx->net->addShape(*tensorPtr)->getOutput(0);
211-
const auto size =
212-
ctx->net->addElementWise(*deconvOutShape, *totalPadding, nvinfer1::ElementWiseOperation::kSUM)->getOutput(0);
213-
214-
nvinfer1::Dims stride;
215-
stride.nbDims = in_nbDims;
216-
for (int64_t i = 0; i < in_nbDims; i++) {
217-
stride.d[i] = 1;
218-
}
219-
const auto& dummy = stride;
220-
auto* sliceLayer = ctx->net->addSlice(*tensorPtr, dummy, dummy, stride);
221-
sliceLayer->setInput(1, *start);
222-
sliceLayer->setInput(2, *size);
223-
sliceLayer->setMode(nvinfer1::SliceMode::kFILL);
224-
tensorPtr = sliceLayer->getOutput(0);
225-
226-
nvinfer1::Dims constantDims;
227-
constantDims.nbDims = in_nbDims;
228-
for (int64_t i = 0; i < in_nbDims; i++) {
229-
constantDims.d[i] = 1;
230-
}
231-
constantDims.d[diff - 1] =
232-
bias.shape.d[0]; // Set C dimension to bias dim and other dimensions to 1 to enable broadcast
233-
auto const_layer = ctx->net->addConstant(constantDims, bias.data);
234-
auto add_bias_layer =
235-
ctx->net->addElementWise(*tensorPtr, *const_layer->getOutput(0), nvinfer1::ElementWiseOperation::kSUM);
236-
237-
new_layer = add_bias_layer;
262+
new_layer = add_bias_layer(ctx, tensorPtr, orig_dims, out_padding, bias);
238263
} else {
239264
new_layer = deconv;
240265
}

0 commit comments

Comments
 (0)