@@ -22,7 +22,7 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
22
22
" Number of dimensions of the desired expansion must be greater than or equal to the number of input dimensions" );
23
23
24
24
// Validate the expansion. Eg: an input of [3, 1] can be expanded to [1, 3, 4] but not [3, 4, 1]
25
- for (int i = expandedDims.nbDims - 1 ; i >= 0 ; --i) {
25
+ for (int64_t i = expandedDims.nbDims - 1 ; i >= 0 ; --i) {
26
26
int64_t offset = expandedDims.nbDims - 1 - i;
27
27
int64_t dim = input_dims.nbDims - 1 - offset;
28
28
int64_t size = (dim >= 0 ) ? input_dims.d [dim] : 1 ;
@@ -41,10 +41,10 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
41
41
if (num_expand_dims > 0 ) {
42
42
nvinfer1::Dims reshape_dims;
43
43
reshape_dims.nbDims = expandedDims.nbDims ;
44
- for (int i = 0 ; i < num_expand_dims; i++) {
44
+ for (int64_t i = 0 ; i < num_expand_dims; i++) {
45
45
reshape_dims.d [i] = 1 ;
46
46
}
47
- for (int i = 0 ; i < input_dims.nbDims ; i++) {
47
+ for (int64_t i = 0 ; i < input_dims.nbDims ; i++) {
48
48
reshape_dims.d [num_expand_dims + i] = input_dims.d [i];
49
49
}
50
50
// Add a reshape layer to expand dims
@@ -60,7 +60,7 @@ bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor
60
60
61
61
// Set the stride of non singleton dimension to 1
62
62
std::vector<int64_t > strides_vec (expandedDims.nbDims , 0 );
63
- for (int i = 0 ; i < expandedDims.nbDims ; i++) {
63
+ for (int64_t i = 0 ; i < expandedDims.nbDims ; i++) {
64
64
strides_vec[i] = (in->getDimensions ().d [i] != 1 );
65
65
}
66
66
@@ -104,16 +104,16 @@ auto expand_registrations TRTORCH_UNUSED =
104
104
auto input_dims = in->getDimensions ();
105
105
auto repeats = args[1 ].unwrapToIntList ().vec ();
106
106
TRTORCH_CHECK (
107
- repeats.size () >= input_dims.nbDims ,
107
+ static_cast < int64_t >( repeats.size () ) >= input_dims.nbDims ,
108
108
" Number of repeat dimensions cannot be smaller than number of input dimensions" );
109
109
auto num_expand_dims = repeats.size () - input_dims.nbDims ;
110
110
if (num_expand_dims > 0 ) {
111
111
nvinfer1::Dims reshape_dims;
112
112
reshape_dims.nbDims = repeats.size ();
113
- for (int i = 0 ; i < num_expand_dims; i++) {
113
+ for (size_t i = 0 ; i < num_expand_dims; i++) {
114
114
reshape_dims.d [i] = 1 ;
115
115
}
116
- for (int i = 0 ; i < input_dims.nbDims ; i++) {
116
+ for (int64_t i = 0 ; i < input_dims.nbDims ; i++) {
117
117
reshape_dims.d [num_expand_dims + i] = input_dims.d [i];
118
118
}
119
119
// Add a reshape layer to expand dims
@@ -127,9 +127,9 @@ auto expand_registrations TRTORCH_UNUSED =
127
127
128
128
// Concat across all repeat axes.
129
129
// TODO: Implementation might not be performant. Explore other strategies to improve performance.
130
- for (int i = repeats.size () - 1 ; i >= 0 ; --i) {
130
+ for (int64_t i = repeats.size () - 1 ; i >= 0 ; --i) {
131
131
std::vector<nvinfer1::ITensor*> tensors_vec;
132
- for (int j = 0 ; j < repeats[i]; j++) {
132
+ for (int64_t j = 0 ; j < repeats[i]; j++) {
133
133
tensors_vec.push_back (in);
134
134
}
135
135
auto concat_layer = ctx->net ->addConcatenation (tensors_vec.data (), tensors_vec.size ());
@@ -139,7 +139,7 @@ auto expand_registrations TRTORCH_UNUSED =
139
139
140
140
auto out = ctx->AssociateValueAndTensor (n->outputs ()[0 ], in);
141
141
142
- LOG_DEBUG (" Repeat layer output tensor shape: " << in ->getDimensions ());
142
+ LOG_DEBUG (" Repeat layer output tensor shape: " << out ->getDimensions ());
143
143
144
144
return true ;
145
145
}});
0 commit comments