Skip to content

Commit 51b09d4

Browse files
committed
fix(aten::expand): Fix compiler warnings in the expand converter
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent f05a550 commit 51b09d4

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

Diff for: core/conversion/converters/impl/expand.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
2222
"Number of dimensions of the desired expansion must be greater than or equal to the number of input dimensions");
2323

2424
// Validate the expansion. Eg: an input of [3, 1] can be expanded to [1, 3, 4] but not [3, 4, 1]
25-
for (int i = expandedDims.nbDims - 1; i >= 0; --i) {
25+
for (int64_t i = expandedDims.nbDims - 1; i >= 0; --i) {
2626
int64_t offset = expandedDims.nbDims - 1 - i;
2727
int64_t dim = input_dims.nbDims - 1 - offset;
2828
int64_t size = (dim >= 0) ? input_dims.d[dim] : 1;
@@ -41,10 +41,10 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
4141
if (num_expand_dims > 0) {
4242
nvinfer1::Dims reshape_dims;
4343
reshape_dims.nbDims = expandedDims.nbDims;
44-
for (int i = 0; i < num_expand_dims; i++) {
44+
for (int64_t i = 0; i < num_expand_dims; i++) {
4545
reshape_dims.d[i] = 1;
4646
}
47-
for (int i = 0; i < input_dims.nbDims; i++) {
47+
for (int64_t i = 0; i < input_dims.nbDims; i++) {
4848
reshape_dims.d[num_expand_dims + i] = input_dims.d[i];
4949
}
5050
// Add a reshape layer to expand dims
@@ -60,7 +60,7 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
6060

6161
// Set the stride of non singleton dimension to 1
6262
std::vector<int64_t> strides_vec(expandedDims.nbDims, 0);
63-
for (int i = 0; i < expandedDims.nbDims; i++) {
63+
for (int64_t i = 0; i < expandedDims.nbDims; i++) {
6464
strides_vec[i] = (in->getDimensions().d[i] != 1);
6565
}
6666

@@ -104,16 +104,16 @@ auto expand_registrations TRTORCH_UNUSED =
104104
auto input_dims = in->getDimensions();
105105
auto repeats = args[1].unwrapToIntList().vec();
106106
TRTORCH_CHECK(
107-
repeats.size() >= input_dims.nbDims,
107+
static_cast<int64_t>(repeats.size()) >= input_dims.nbDims,
108108
"Number of repeat dimensions cannot be smaller than number of input dimensions");
109109
auto num_expand_dims = repeats.size() - input_dims.nbDims;
110110
if (num_expand_dims > 0) {
111111
nvinfer1::Dims reshape_dims;
112112
reshape_dims.nbDims = repeats.size();
113-
for (int i = 0; i < num_expand_dims; i++) {
113+
for (size_t i = 0; i < num_expand_dims; i++) {
114114
reshape_dims.d[i] = 1;
115115
}
116-
for (int i = 0; i < input_dims.nbDims; i++) {
116+
for (int64_t i = 0; i < input_dims.nbDims; i++) {
117117
reshape_dims.d[num_expand_dims + i] = input_dims.d[i];
118118
}
119119
// Add a reshape layer to expand dims
@@ -127,9 +127,9 @@ auto expand_registrations TRTORCH_UNUSED =
127127

128128
// Concat across all repeat axes.
129129
// TODO: Implementation might not be performant. Explore other strategies to improve performance.
130-
for (int i = repeats.size() - 1; i >= 0; --i) {
130+
for (int64_t i = repeats.size() - 1; i >= 0; --i) {
131131
std::vector<nvinfer1::ITensor*> tensors_vec;
132-
for (int j = 0; j < repeats[i]; j++) {
132+
for (int64_t j = 0; j < repeats[i]; j++) {
133133
tensors_vec.push_back(in);
134134
}
135135
auto concat_layer = ctx->net->addConcatenation(tensors_vec.data(), tensors_vec.size());
@@ -139,7 +139,7 @@ auto expand_registrations TRTORCH_UNUSED =
139139

140140
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], in);
141141

142-
LOG_DEBUG("Repeat layer output tensor shape: " << in->getDimensions());
142+
LOG_DEBUG("Repeat layer output tensor shape: " << out->getDimensions());
143143

144144
return true;
145145
}});

0 commit comments

Comments
 (0)