diff --git a/examples/fx/quantized_resnet_test.py b/examples/fx/quantized_resnet_test.py index c25691b95b..c725c27aad 100644 --- a/examples/fx/quantized_resnet_test.py +++ b/examples/fx/quantized_resnet_test.py @@ -49,7 +49,7 @@ def build_int8_trt(rn18): # uncomment to check per channel quant works weight=torch.quantization.default_per_channel_weight_observer, ) - prepared = prepare_fx(rn18, {"": qconfig}) + prepared = prepare_fx(rn18, {"": qconfig}, data) for _ in range(10): prepared(data) quantized_rn18 = convert_to_reference(prepared) diff --git a/py/torch_tensorrt/fx/lower.py b/py/torch_tensorrt/fx/lower.py index 763ffdc653..9f8ec7865c 100644 --- a/py/torch_tensorrt/fx/lower.py +++ b/py/torch_tensorrt/fx/lower.py @@ -104,10 +104,13 @@ def __call__(self, mod, input, split_name) -> TRTInterpreterResult: ), self.lower_setting.opt_profile_replica, ) - if self.lower_setting.explicit_batch_dimension and self.lower_setting.dynamic_batch + if self.lower_setting.explicit_batch_dimension + and self.lower_setting.dynamic_batch else InputTensorSpec.from_tensors(input) ) ) + logger.info(f"{split_name=} {input_specs_val=}") + # Prepare algorithm selector and timing_cache for TRTInterpreter algo_selector = None if self.lower_setting.algo_selector: diff --git a/py/torch_tensorrt/fx/lower_setting.py b/py/torch_tensorrt/fx/lower_setting.py index 6695c8ff85..50a0b5f32a 100644 --- a/py/torch_tensorrt/fx/lower_setting.py +++ b/py/torch_tensorrt/fx/lower_setting.py @@ -64,11 +64,7 @@ class LowerSetting(LowerSettingBasic): cache file is provided. cuda_graph_batch_size (int): Cuda graph batch size, default to be -1. preset_lowerer (str): when specified, use a preset logic to build the - instance of Lowerer. Refer to - `caffe2.torch.fb.model_transform.fx2trt.presets.LowererPresetsManager` on - how presets are applied. Refer to - `caffe2.torch.fb.model_transform.fx2trt.presets.ESUHMLowererPreset` on how - to add a preset. + instance of Lowerer. opt_profile_replica (int): the number of opt profile set for TensorRT engine, this field is only used by explicit batch dim with dynamic shape mode. dynamic_batch: enable the dynamic shape in TRT with dim=-1 for the 1st dimension. diff --git a/py/torch_tensorrt/fx/passes/pass_utils.py b/py/torch_tensorrt/fx/passes/pass_utils.py index 5f9867fe8c..2018433599 100644 --- a/py/torch_tensorrt/fx/passes/pass_utils.py +++ b/py/torch_tensorrt/fx/passes/pass_utils.py @@ -63,14 +63,17 @@ def pass_with_validation( y = y.cpu() accuracy_check = torch.allclose(x, y, **kwargs) if not accuracy_check: + _LOGGER.error( + f"Pass {pass_} failed correctness check, get original model output as {x} and processed model output as {y} for output {kk}." + ) if suppress_accuracy_check_failure: _LOGGER.error( - f"pass {pass_} failed correctness check due to output {kk}, escape current pass." + f"Pass {pass_} failed correctness check due to output {kk}." ) return processed_module else: raise RuntimeError( - f"pass {pass_} failed correctness check due to output {kk}" + f"Pass {pass_} failed correctness check due to output {kk}" ) return processed_module diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_dequantize.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_dequantize.py index 9d9a8e4c66..7f32b749c5 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_dequantize.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_dequantize.py @@ -45,6 +45,24 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.dequantize} ) + def test_dequantize_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + x = torch.quantize_per_tensor(x, 1, 0, torch.quint8) + return x.dequantize() + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.dequantize} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_einsum.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_einsum.py index cd28becdca..efc2c97a92 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_einsum.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_einsum.py @@ -3,7 +3,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestConverter(AccTestCase): @@ -30,6 +30,37 @@ def forward(self, x, y): test_implicit_batch_dim=False, ) + @parameterized.expand( + [ + ("4d_dim", "bcwd,bcdh->bcwh", (2, 3, 4, 5), (2, 3, 5, 6)), + ("4d_dim_ext", "bcxd,bcyd->bcxy", (2, 3, 4, 5), (2, 3, 6, 5)), + # TRT does not support ellipsis or diagonal operations + ] + ) + def test_einsum_with_dynamic_shape_four_dimensions( + self, _, equation, x_size, y_size + ): + class Einsum(nn.Module): + def forward(self, x, y): + return torch.einsum(equation, x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 3, 3), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 3, 3), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Einsum(), input_specs, expected_ops={acc_ops.einsum} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_elu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_elu.py index b42df203c1..1482654cfd 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_elu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_elu.py @@ -30,6 +30,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.elu} ) + def test_elu_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.elu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 5), (3, 3, 3, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.elu} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py index f75620801c..19a867d78d 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_embedding.py @@ -5,7 +5,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import param, parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec @unittest.skip( @@ -62,6 +62,46 @@ def forward(self, indices, weights): test_explicit_batch_dim=True, ) + def test_embedding_with_dynamic_shape_four_dimensions( + self, + test_name, + indices_tensor, + weights_tensor, + padding_idx=None, + max_norm=None, + norm_type=2.0, + scale_grad_by_freq=False, + sparse=False, + ): + class TestEmbedding(torch.nn.Module): + def forward(self, indices, weights): + return torch.nn.functional.embedding( + input=indices, + weight=weights, + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + scale_grad_by_freq=scale_grad_by_freq, + sparse=sparse, + ) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + TestEmbedding(), input_specs, expected_ops={acc_ops.embedding} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py index f74a70e614..257375c7ca 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_eq.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestEqConverter(AccTestCase): @@ -184,6 +184,28 @@ def forward(self, x, y): ) +class TestEqOperatorSimpleConverterWithDynamicShape(AccTestCase): + def test_eq(self): + class Eq(torch.nn.Module): + def forward(self, x, y): + return x == y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.eq}) + + class TestEqOperatorConstantConverter(AccTestCase): @parameterized.expand( [ @@ -243,5 +265,25 @@ def forward(self, x): ) +class TestConstInputConverterWithDynamicShape(AccTestCase): + def test_eq(self): + class Eq(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[0] == 4 + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.eq}) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_gelu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_gelu.py index 0f0e069841..7fdd5da3c7 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_gelu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_gelu.py @@ -35,6 +35,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.gelu} ) + def test_gelu_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.gelu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.gelu} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_getitem.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_getitem.py index 484d8d5622..88279996c5 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_getitem.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_getitem.py @@ -148,6 +148,52 @@ def forward(self, x): Getitem(idx), input_specs, expected_ops={acc_ops.getitem} ) + # Testing with following parameters results into Error: + # AssertionError: We don't support slicing tensor on dynamic shape. + + """ + ("ellipsis", (slice(None, None, None), ..., slice(0, -3, 2))), + ( + "slice_end_none", + (slice(None, None, None), slice(None, None, None), slice(1, None, 1)), + ), + ( + "slice_step_none", + (slice(None, None, None), slice(None, None, None), slice(0, 3, None)), + ), + """ + + @parameterized.expand( + [ + ("slice_batch_dim", slice(None, None, None)), + ( + "slice_all_none", + (slice(None, None, None), slice(None, None, None)), + ), + ] + ) + def test_getitem_with_dynamic_shape_four_dimensions(self, _, idx): + class Getitem(nn.Module): + def __init__(self, idx): + super().__init__() + self.idx = idx + + def forward(self, x): + x = x + x + return x[self.idx] + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (3, 3, 3, 3), (5, 5, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + Getitem(idx), input_specs, expected_ops={acc_ops.getitem} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_gt.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_gt.py index 0e8be9c311..4dc725e9f7 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_gt.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_gt.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestGtConverter(AccTestCase): @@ -169,6 +169,30 @@ def forward(self, x, y): ) +class TestEqOperatorSimpleConverterWithDynamicShape(AccTestCase): + def test_eq( + self, + ): + class Eq(torch.nn.Module): + def forward(self, x, y): + return x > y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.gt}) + + class TestEqOperatorConstantConverter(AccTestCase): @parameterized.expand( [ @@ -228,5 +252,25 @@ def forward(self, x): ) +class TestConstInputConverterWithDynamicShape(AccTestCase): + def test_gt(self): + class Gt(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[0] > 4 + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (3, 3, 3, 3), (5, 5, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape(Gt(), input_specs, expected_ops={acc_ops.gt}) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_hard_sigmoid.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_hard_sigmoid.py index b5d27db5cd..ad0c9bd0fe 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_hard_sigmoid.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_hard_sigmoid.py @@ -38,6 +38,22 @@ def forward(self, x): Hardsigmoid(), input_specs, expected_ops={acc_ops.hardsigmoid} ) + def test_hardsigmoid_with_dynamic_shape_four_dimensions(self): + class Hardsigmoid(nn.Module): + def forward(self, x): + return torch.nn.functional.hardsigmoid(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + self.run_test_with_dynamic_shape( + Hardsigmoid(), input_specs, expected_ops={acc_ops.hardsigmoid} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_hardtanh.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_hardtanh.py index f4d6ec01d4..c1d2ed650e 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_hardtanh.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_hardtanh.py @@ -3,7 +3,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestHardtanhConverter(AccTestCase): @@ -25,5 +25,33 @@ def forward(self, x): self.run_test(Hardtanh(), inputs, expected_ops={acc_ops.hardtanh}) +class TestHardtanhConverterWithDynamicShape(AccTestCase): + @parameterized.expand( + [ + (-2.0, 6), + (0, 1), + (0.5, 7), + ] + ) + def test_hardtanh(self, test_min_value, test_max_value): + class Hardtanh(nn.Module): + def forward(self, x): + return nn.functional.hardtanh( + x, min_val=test_min_value, max_val=test_max_value + ) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Hardtanh(), input_specs, expected_ops={acc_ops.hardtanh} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_isinf.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_isinf.py index e75ec16c35..d8ec10a71b 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_isinf.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_isinf.py @@ -4,7 +4,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec @unittest.skip("Implementation is commented out due to accuracy issue T113156424") @@ -41,6 +41,23 @@ def forward(self, x): Test(), inputs, expected_ops={acc_ops.isinf}, test_implicit_batch_dim=False ) + def test_isinf_large_with_dynamic_shape_four_dimensions(self): + class Test(torch.nn.Module): + def forward(self, x): + return torch.isinf(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Test(), input_specs, expected_ops={acc_ops.isinf} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_leaky_relu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_leaky_relu.py index 5cc1ad4294..0df494baac 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_leaky_relu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_leaky_relu.py @@ -30,6 +30,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.leaky_relu} ) + def test_leaky_relu_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.leaky_relu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.leaky_relu} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_linear.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_linear.py index 4841e77bf4..c33b5136d6 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_linear.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_linear.py @@ -52,6 +52,9 @@ def forward(self, x): expected_ops={acc_ops.linear}, ) + # Testing with (-1, -1, 512) results into following error: + # AssertionError: Currently we only support one dynmaic dim for linear and it can't be the last dim. + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_and.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_and.py index dac1a5da1a..9ca6e176a5 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_and.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_and.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestAndMethodSimpleConverter(AccTestCase): @@ -58,6 +58,30 @@ def forward(self, x, y): ) +class TestAndMethodSimpleConverterWithDynamicShape(AccTestCase): + def test_and(self): + class And(torch.nn.Module): + def forward(self, x, y): + return x.logical_and(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + And(), input_specs, expected_ops={acc_ops.logical_and} + ) + + class TestAndFunctionSimpleConverter(AccTestCase): @parameterized.expand( [ @@ -178,5 +202,29 @@ def forward(self, x): ) +class TestAndFunctionSimpleConverterWithDynamicShape(AccTestCase): + def test_and(self): + class And(torch.nn.Module): + def forward(self, x, y): + return torch.logical_and(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + And(), input_specs, expected_ops={acc_ops.logical_and} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_or.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_or.py index aaf5879fa8..7dba20b214 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_or.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_or.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestLogicalOrMethodSimpleConverter(AccTestCase): @@ -45,6 +45,30 @@ def forward(self, x, y): ) +class TestLogicalOrMethodSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_or(self): + class LogicalOr(torch.nn.Module): + def forward(self, x, y): + return x.logical_or(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalOr(), input_specs, expected_ops={acc_ops.logical_or} + ) + + class TestLogicalOrFunctionSimpleConverter(AccTestCase): @parameterized.expand( [ @@ -85,6 +109,30 @@ def forward(self, x, y): ) +class TestLogicalOrFunctionSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_or(self): + class LogicalOr(torch.nn.Module): + def forward(self, x, y): + return torch.logical_or(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalOr(), input_specs, expected_ops={acc_ops.logical_or} + ) + + class TestLogicalOrOperatorSimpleConverter(AccTestCase): @parameterized.expand( [ @@ -125,5 +173,29 @@ def forward(self, x, y): ) +class TestLogicalOrOperatorSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_or(self): + class LogicalOr(torch.nn.Module): + def forward(self, x, y): + return x | y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalOr(), input_specs, expected_ops={acc_ops.logical_or} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_xor.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_xor.py index d2c459cf84..54b0490a57 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_xor.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_logical_xor.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestLogicalXorMethodSimpleConverter(AccTestCase): @@ -45,6 +45,30 @@ def forward(self, x, y): ) +class TestLogicalXorMethodSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_xor(self): + class LogicalXor(torch.nn.Module): + def forward(self, x, y): + return x.logical_xor(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalXor(), input_specs, expected_ops={acc_ops.logical_xor} + ) + + class TestLogicalXorFunctionSimpleConverter(AccTestCase): @parameterized.expand( [ @@ -85,6 +109,30 @@ def forward(self, x, y): ) +class TestLogicalXorFunctionSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_xor(self): + class LogicalXor(torch.nn.Module): + def forward(self, x, y): + return torch.logical_xor(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalXor(), input_specs, expected_ops={acc_ops.logical_xor} + ) + + class TestLogicalXorOperatorSimpleConverter(AccTestCase): @parameterized.expand( [ @@ -125,5 +173,29 @@ def forward(self, x, y): ) +class TestLogicalXorOperatorSimpleConverterWithDynamicShape(AccTestCase): + def test_logical_xor(self): + class LogicalXor(torch.nn.Module): + def forward(self, x, y): + return x ^ y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.bool, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + LogicalXor(), input_specs, expected_ops={acc_ops.logical_xor} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_lt.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_lt.py index c2edffc3ec..7184e80656 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_lt.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_lt.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestLtConverter(AccTestCase): @@ -169,6 +169,28 @@ def forward(self, x, y): ) +class TestEqOperatorSimpleConverterWithDynamicShape(AccTestCase): + def test_eq(self): + class Eq(torch.nn.Module): + def forward(self, x, y): + return x < y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Eq(), input_specs, expected_ops={acc_ops.lt}) + + class TestEqOperatorConstantConverter(AccTestCase): @parameterized.expand( [ @@ -228,5 +250,25 @@ def forward(self, x): ) +class TestConstInputConverterWithDynamicShape(AccTestCase): + def test_lt(self): + class Lt(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[0] < 4 + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ) + ] + + self.run_test_with_dynamic_shape(Lt(), input_specs, expected_ops={acc_ops.lt}) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_max.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_max.py index 711939a6c1..746e61cb30 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_max.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_max.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestMaxConverter(AccTestCase): @@ -78,5 +78,54 @@ def forward(self, input, other): self.run_test(MaxMethod(), inputs, expected_ops={acc_ops.maximum}) +class TestMaxConverterWithDynamicShape(AccTestCase): + def test_max_full_reduce( + self, + ): + class MaxFullReduce(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.max(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + MaxFullReduce(), input_specs, expected_ops={acc_ops.max_full_reduce} + ) + + def test_max_method(self): + class MaxMethod(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, input, other): + return input.max(other) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + MaxMethod(), input_specs, expected_ops={acc_ops.maximum} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_maximum.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_maximum.py index ad9a355063..3924173911 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_maximum.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_maximum.py @@ -1,7 +1,7 @@ import torch import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestMaximumConverter(AccTestCase): @@ -17,6 +17,30 @@ def forward(self, x, y): self.run_test(Maximum(), inputs, expected_ops={acc_ops.maximum}) +class TestMaximumConverterWithDynamicShape(AccTestCase): + def test_maximum(self): + class Maximum(torch.nn.Module): + def forward(self, x, y): + return torch.maximum(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + Maximum(), input_specs, expected_ops={acc_ops.maximum} + ) + + class TestMaximumMethodConverter(AccTestCase): def test_maximum(self): class Maximum(torch.nn.Module): @@ -30,5 +54,29 @@ def forward(self, x, y): self.run_test(Maximum(), inputs, expected_ops={acc_ops.maximum}) +class TestMaximumMethodConverterWithDynamicShape(AccTestCase): + def test_maximum(self): + class Maximum(torch.nn.Module): + def forward(self, x, y): + return x.maximum(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + Maximum(), input_specs, expected_ops={acc_ops.maximum} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_maxpool.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_maxpool.py index 8d54b43184..024452e8e5 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_maxpool.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_maxpool.py @@ -259,6 +259,121 @@ def forward(self, x): inputs = [torch.randn(1, 3, 32, 32, 32)] self.run_test(TestModule(), inputs, expected_ops={acc_ops.max_pool3d}) + @parameterized.expand( + [ + ("default", 1), + param("stride", 2, stride=()), + ] + ) + def test_stride_none_max_pool1d_with_dynamic_shape( + self, + test_name, + kernel_size, + stride=None, + padding=0, + dilation=1, + ceil_mode=False, + ): + class TestModule(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.nn.functional.max_pool1d( + x, + kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + dilation=dilation, + ) + + # shape is not set to (-1, -1, -1) as reshape dimension with + # more than one -1 wildcard is not allowed while adding unsqueeze layer + input_specs = [ + InputTensorSpec( + shape=(1, 1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1), (1, 1, 4), (1, 1, 4))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), + input_specs, + expected_ops={acc_ops.max_pool1d}, + ) + + @parameterized.expand( + [ + ("default", 1), + param("stride", 2, stride=()), + ] + ) + def test_stride_none_max_pool2d_with_dynamic_shape( + self, + test_name, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + ): + class TestModule(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.nn.functional.max_pool2d( + x, kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode + ) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 4, 4), (2, 4, 4, 4))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.max_pool2d} + ) + + @parameterized.expand( + [ + ("default", 1), + param("stride", 2, stride=()), + ] + ) + def test_stride_none_max_pool3d_with_dynamic_shape( + self, + test_name, + kernel_size, + stride=None, + padding=0, + ceil_mode=False, + ): + class TestModule(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.nn.functional.max_pool3d( + x, kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode + ) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1, 1), (1, 2, 4, 4, 4), (2, 4, 4, 4, 4))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.max_pool3d} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_min.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_min.py index cac8d5778c..c88c08cfb1 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_min.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_min.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestMinConverter(AccTestCase): @@ -78,5 +78,54 @@ def forward(self, input, other): self.run_test(MinMethod(), inputs, expected_ops={acc_ops.minimum}) +class TestMinConverterWithDynamicShape(AccTestCase): + def test_min_full_reduce( + self, + ): + class MinFullReduce(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return torch.min(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + MinFullReduce(), input_specs, expected_ops={acc_ops.min_full_reduce} + ) + + def test_min_method(self): + class MinMethod(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, input, other): + return input.min(other) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + MinMethod(), input_specs, expected_ops={acc_ops.minimum} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_minimum.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_minimum.py index 1737fd766b..e0bd2ee94f 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_minimum.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_minimum.py @@ -1,7 +1,7 @@ import torch import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestMinimumConverter(AccTestCase): @@ -30,5 +30,53 @@ def forward(self, x, y): self.run_test(Minimum(), inputs, expected_ops={acc_ops.minimum}) +class TestMinimumConverterWithDynamicShape(AccTestCase): + def test_minimum(self): + class Minimum(torch.nn.Module): + def forward(self, x, y): + return torch.minimum(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + Minimum(), input_specs, expected_ops={acc_ops.minimum} + ) + + +class TestMinimumMethodConverterWithDynamicShape(AccTestCase): + def test_minimum(self): + class Minimum(torch.nn.Module): + def forward(self, x, y): + return x.minimum(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + Minimum(), input_specs, expected_ops={acc_ops.minimum} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_narrow.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_narrow.py index 6c212e4911..15243cb259 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_narrow.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_narrow.py @@ -27,5 +27,33 @@ def forward(self, x): ) +# Testing with (-1, -1, -1 , -1) results in following error: +# AssertionError: Can't chunk on dynamic shape dimension! +""" +class TestNarrowConverterWithDynamicShape(AccTestCase): + @parameterized.expand( + [ + ("positive_dim", 1, 0, 1), + ("negative_dim", -1, 1, 2), + ] + ) + def test_narrow(self, _, dim, start, length): + class Narrow(nn.Module): + def forward(self, x): + return x.narrow(dim, start, length) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + Narrow(), input_specs, expected_ops={acc_ops.slice_tensor} + ) +""" + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_ne.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_ne.py index a65ef8f724..0e0e8f70d9 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_ne.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_ne.py @@ -2,7 +2,7 @@ import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase +from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec class TestNeFunctionConverter(AccTestCase): @@ -55,6 +55,28 @@ def forward(self, x, y): ) +class TestNeFunctionConverterWithDynamicShape(AccTestCase): + def test_ne(self): + class Ne(torch.nn.Module): + def forward(self, x, y): + return torch.ne(x, y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Ne(), input_specs, expected_ops={acc_ops.ne}) + + class TestNeMethodConverter(AccTestCase): @parameterized.expand( [ @@ -105,6 +127,28 @@ def forward(self, x, y): ) +class TestNeMethodConverterWithDynamicShape(AccTestCase): + def test_ne(self): + class Ne(torch.nn.Module): + def forward(self, x, y): + return x.ne(y) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Ne(), input_specs, expected_ops={acc_ops.ne}) + + class TestNeOperatorConverter(AccTestCase): @parameterized.expand( [ @@ -155,6 +199,28 @@ def forward(self, x, y): ) +class TestNeOperatorConverterWithDynamicShape(AccTestCase): + def test_ne(self): + class Ne(torch.nn.Module): + def forward(self, x, y): + return x != y + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape(Ne(), input_specs, expected_ops={acc_ops.ne}) + + class TestNeOperatorConstantConverter(AccTestCase): @parameterized.expand( [ @@ -214,5 +280,25 @@ def forward(self, x): ) +class TestConstInputConverterWithDynamicShape(AccTestCase): + def test_ne(self): + class Ne(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x.shape[0] != 4 + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 5, 5), (2, 3, 5, 5), (2, 3, 5, 5))], + ), + ] + + self.run_test_with_dynamic_shape(Ne(), input_specs, expected_ops={acc_ops.ne}) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_new_ones.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_new_ones.py index f39357998b..206d088a55 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_new_ones.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_new_ones.py @@ -46,5 +46,58 @@ def forward(self, x): ) +class TestNewOnesConverterWithDynamicShape(AccTestCase): + def test_newone(self): + class TestModule(nn.Module): + def forward(self, x): + return x.new_ones((3, 5), dtype=torch.float16) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.new_ones} + ) + + def test_newone_no_dtype(self): + class TestModule(nn.Module): + def forward(self, x): + return x.new_ones((3, 5)) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.new_ones} + ) + + def test_newone_device(self): + class TestModule(nn.Module): + def forward(self, x): + return x.new_ones((3, 5), device="cuda") + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.new_ones} + ) + + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_numel.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_numel.py index d074852448..37ad14731d 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_numel.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_numel.py @@ -15,5 +15,27 @@ def forward(self, x): self.run_test(Numel(), inputs, expected_ops={acc_ops.numel}) +# Testing with (-1, -1, -1 , -1) results in following error: +# RuntimeError: numel does not support dynamic shapes. +""" +class TestNumelConverterWithDynamicShape(AccTestCase): + def test_numel(self): + class Numel(nn.Module): + def forward(self, x): + return torch.numel(x) * x + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], + ), + ] + + self.run_test_with_dynamic_shape( + Numel(), input_specs, expected_ops={acc_ops.numel} + ) +""" + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_permute.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_permute.py index 9916da6953..4e85248b8c 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_permute.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_permute.py @@ -65,6 +65,23 @@ def forward(self, x): Permute(), input_specs, expected_ops={acc_ops.permute} ) + def test_permute_with_dynamic_shape_four_dimensions(self): + class Permute(nn.Module): + def forward(self, x): + return x.permute(1, 2, 3, 0) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 5), (1, 2, 3, 5), (3, 3, 3, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + Permute(), input_specs, expected_ops={acc_ops.permute} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_quantize_per_tensor.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_quantize_per_tensor.py index 2f4758837d..c7b050c4ac 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_quantize_per_tensor.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_quantize_per_tensor.py @@ -43,6 +43,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.quantize_per_tensor} ) + def test_quantize_per_tensor_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return torch.quantize_per_tensor(x, 1, 0, torch.quint8) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.quantize_per_tensor} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_relu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_relu.py index 2d89d5026b..0ef2558ca0 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_relu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_relu.py @@ -30,6 +30,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.relu} ) + def test_relu_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.relu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 5), (1, 2, 3, 5), (3, 3, 3, 5))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.relu} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_reshape.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_reshape.py index 86e12c18c3..4776ed7a95 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_reshape.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_reshape.py @@ -50,6 +50,33 @@ def forward(self, x): TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape} ) + @parameterized.expand( + [ + ((-1, 2),), + ((1, 2, -1),), + ] + ) + def test_reshape_with_dynamic_shape_with_four_dimensions(self, target_shape): + class TestModule(torch.nn.Module): + def __init__(self, target_shape): + super().__init__() + self.target_shape = target_shape + + def forward(self, x): + return torch.reshape(x, self.target_shape) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_selu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_selu.py index b4c7e3868f..955ddc82f7 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_selu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_selu.py @@ -30,6 +30,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.selu} ) + def test_selu_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.selu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.selu} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_silu.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_silu.py index dd5d3b5b0d..38d8f5b645 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_silu.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_silu.py @@ -30,6 +30,23 @@ def forward(self, x): Silu(), input_specs, expected_ops={acc_ops.sigmoid, acc_ops.mul} ) + def test_silu_with_dynamic_shape_four_dimensions(self): + class Silu(nn.Module): + def forward(self, x): + return torch.nn.functional.silu(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Silu(), input_specs, expected_ops={acc_ops.sigmoid, acc_ops.mul} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_size.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_size.py index 3c3881d7ce..f7e55b12f6 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_size.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_size.py @@ -48,6 +48,24 @@ def forward(self, x): Size(), input_specs, expected_ops={acc_ops.size} ) + def test_size_dynamic_shape_four_dimensions(self): + class Size(nn.Module): + def forward(self, x): + bs = x.size(0) + return x.view(bs, -1) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 12, 32, 3), (3, 12, 32, 3), (100, 12, 32, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Size(), input_specs, expected_ops={acc_ops.size} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_softmax.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_softmax.py index 0aea850546..eca8a01607 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_softmax.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_softmax.py @@ -43,6 +43,23 @@ def forward(self, x): Softmax(), input_specs, expected_ops={acc_ops.softmax} ) + def test_softmax_with_dynamic_shape_four_dimensions(self): + class Softmax(nn.Module): + def forward(self, x): + return nn.functional.softmax(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Softmax(), input_specs, expected_ops={acc_ops.softmax} + ) + def test_softmax_with_implicit_batch_dim0_fail(self): class Softmax(nn.Module): def __init__(self): diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_softsign.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_softsign.py index 8ad72002ab..5f1b907bac 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_softsign.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_softsign.py @@ -30,6 +30,23 @@ def forward(self, x): TestModule(), input_specs, expected_ops={acc_ops.softsign} ) + def test_softsign_with_dynamic_shape_four_dimensions(self): + class TestModule(nn.Module): + def forward(self, x): + return nn.functional.softsign(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 1), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + TestModule(), input_specs, expected_ops={acc_ops.softsign} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/test/converters/acc_op/test_tanh.py b/py/torch_tensorrt/fx/test/converters/acc_op/test_tanh.py index 9715e1c210..5b4ce2903a 100644 --- a/py/torch_tensorrt/fx/test/converters/acc_op/test_tanh.py +++ b/py/torch_tensorrt/fx/test/converters/acc_op/test_tanh.py @@ -30,6 +30,23 @@ def forward(self, x): Tanh(), input_specs, expected_ops={acc_ops.tanh} ) + def test_tanh_with_dynamic_shape_four_dimensions(self): + class Tanh(nn.Module): + def forward(self, x): + return torch.tanh(x) + + input_specs = [ + InputTensorSpec( + shape=(-1, -1, -1, -1), + dtype=torch.float32, + shape_ranges=[((1, 1, 1, 3), (1, 2, 3, 3), (3, 3, 3, 3))], + ), + ] + + self.run_test_with_dynamic_shape( + Tanh(), input_specs, expected_ops={acc_ops.tanh} + ) + if __name__ == "__main__": run_tests() diff --git a/py/torch_tensorrt/fx/tracer/acc_tracer/acc_utils.py b/py/torch_tensorrt/fx/tracer/acc_tracer/acc_utils.py index fd00598e18..b4856e116f 100644 --- a/py/torch_tensorrt/fx/tracer/acc_tracer/acc_utils.py +++ b/py/torch_tensorrt/fx/tracer/acc_tracer/acc_utils.py @@ -10,7 +10,6 @@ from torch.fx.immutable_collections import immutable_list from torch.fx.node import _get_qualified_name from torch.fx.passes import graph_drawer -from torch.fx.passes.graph_manipulation import serialize_module from torch.fx.passes.shape_prop import TensorMetadata @@ -77,13 +76,6 @@ def is_acc_op_with_kwarg( return kwarg in inspect.signature(inspect.unwrap(target)).parameters -def serialize_module_json_to_file(fx_module: GraphModule, fname: str): - weights: Dict = {} - serialized_json = json.dumps(serialize_module(fx_module, weights), indent=2) - with open(fname, "w") as ofile: - ofile.write(serialized_json) - - def build_raw_tensor_meta( shape=None, dtype=None,