@@ -282,6 +282,116 @@ auto expand_registrations TORCHTRT_UNUSED =
282
282
auto out = ctx->AssociateValueAndTensor (n->outputs ()[0 ], in);
283
283
284
284
LOG_DEBUG (" Repeat layer output tensor shape: " << out->getDimensions ());
285
+ return true ;
286
+ }})
287
+ .pattern(
288
+ {" aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None, *, int? output_size=None) -> (Tensor)" ,
289
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
290
+ auto self = args[0 ].ITensorOrFreeze (ctx);
291
+ auto repeats = args[1 ].unwrapToScalar ().to <int >();
292
+
293
+ auto input_shape = self->getDimensions ();
294
+
295
+ int dim;
296
+ if (args[2 ].IValue ()->isNone ()) {
297
+ dim = 0 ;
298
+
299
+ // Flatten self tensor
300
+ int size;
301
+ if (ctx->input_is_dynamic ) {
302
+ // Set size to -1 if input is dynamic
303
+ size = -1 ;
304
+ } else {
305
+ size = 1 ;
306
+ for (int i = 0 ; i < input_shape.nbDims ; i++) {
307
+ size *= input_shape.d [i];
308
+ }
309
+ }
310
+ auto flatten = ctx->net ->addShuffle (*self);
311
+ TORCHTRT_CHECK (flatten, " Unable to create shuffle layer from node: " << *n);
312
+ flatten->setReshapeDimensions (util::toDims (std::vector<int64_t >({size})));
313
+ self = flatten->getOutput (0 );
314
+ input_shape = self->getDimensions ();
315
+ } else {
316
+ dim = args[2 ].unwrapToScalar ().to <int >();
317
+ }
318
+
319
+ if (ctx->input_is_dynamic ) {
320
+ int dynamic_dims = 0 ;
321
+ for (int idx = 0 ; idx < input_shape.nbDims ; idx++) {
322
+ if (input_shape.d [idx] == -1 ) {
323
+ dynamic_dims++;
324
+ }
325
+ }
326
+
327
+ if (dynamic_dims > 1 ) {
328
+ TORCHTRT_THROW_ERROR (
329
+ " Repeat_interleave is currently not supported when target shape contains more than one dynamic dimension" );
330
+ }
331
+ }
332
+
333
+ // Insert singleton dimension after desired repeat dimension
334
+ std::vector<int64_t > repeat_shape_vec;
335
+ for (int j = 0 ; j < input_shape.nbDims ; j++) {
336
+ repeat_shape_vec.push_back (input_shape.d [j]);
337
+ if (j == dim) {
338
+ repeat_shape_vec.push_back (1 );
339
+ }
340
+ }
341
+ auto expand = ctx->net ->addShuffle (*self);
342
+ TORCHTRT_CHECK (expand, " Unable to create shuffle layer from node: " << *n);
343
+ auto repeat_shape_dims = util::toDims (repeat_shape_vec);
344
+ expand->setReshapeDimensions (repeat_shape_dims);
345
+
346
+ // Expand on newly created singleton dimension
347
+ repeat_shape_dims.d [dim + 1 ] = repeats;
348
+ std::vector<int64_t > start_vec (repeat_shape_dims.nbDims , 0 );
349
+ auto start_dims = util::toDims (start_vec);
350
+
351
+ std::vector<int64_t > strides_vec (repeat_shape_dims.nbDims , 1 );
352
+ strides_vec[dim + 1 ] = 0 ;
353
+ auto strides_dims = util::toDims (strides_vec);
354
+
355
+ auto slice = ctx->net ->addSlice (*expand->getOutput (0 ), start_dims, repeat_shape_dims, strides_dims);
356
+
357
+ if (ctx->input_is_dynamic ) {
358
+ auto start_tensor = tensor_to_const (ctx, torch::tensor (start_vec, torch::kInt32 ));
359
+
360
+ auto expand_output_shape = ctx->net ->addShape (*expand->getOutput (0 ))->getOutput (0 );
361
+ std::vector<int64_t > repeat_const_vec (repeat_shape_dims.nbDims , 1 );
362
+ repeat_const_vec[dim + 1 ] = repeats;
363
+ auto repeat_const = tensor_to_const (ctx, torch::tensor (repeat_const_vec, torch::kInt32 ));
364
+ auto repeat_shape_tensor =
365
+ ctx->net
366
+ ->addElementWise (*expand_output_shape, *repeat_const, nvinfer1::ElementWiseOperation::kPROD )
367
+ ->getOutput (0 );
368
+
369
+ auto strides_tensor = tensor_to_const (ctx, torch::tensor (strides_vec, torch::kInt32 ));
370
+ slice->setInput (1 , *start_tensor);
371
+ slice->setInput (2 , *repeat_shape_tensor);
372
+ slice->setInput (3 , *strides_tensor);
373
+ }
374
+
375
+ // Collapse repeated dimension back into desired dimension
376
+ std::vector<int64_t > collapse_shape_vec;
377
+ for (int k = 0 ; k < repeat_shape_dims.nbDims ; k++) {
378
+ if (k == dim) {
379
+ int64_t collapse_dim = repeat_shape_dims.d [k] * repeat_shape_dims.d [++k];
380
+ // Set dim size to -1 if repeat is being done on dynamic dim
381
+ collapse_dim = std::max (collapse_dim, (int64_t )-1 );
382
+ collapse_shape_vec.push_back (collapse_dim);
383
+ } else {
384
+ collapse_shape_vec.push_back (repeat_shape_dims.d [k]);
385
+ }
386
+ }
387
+ auto collapse = ctx->net ->addShuffle (*slice->getOutput (0 ));
388
+ TORCHTRT_CHECK (collapse, " Unable to create shuffle layer from node: " << *n);
389
+ collapse->setReshapeDimensions (util::toDims (collapse_shape_vec));
390
+
391
+ collapse->setName (util::node_info (n).c_str ());
392
+ auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], collapse->getOutput (0 ));
393
+ LOG_DEBUG (" Output tensor shape: " << out_tensor->getDimensions ());
394
+
285
395
return true ;
286
396
}});
287
397
0 commit comments