diff --git a/tests/py/dynamo/conversion/harness.py b/tests/py/dynamo/conversion/harness.py index 0d77f3f712..be13f7d2c1 100644 --- a/tests/py/dynamo/conversion/harness.py +++ b/tests/py/dynamo/conversion/harness.py @@ -4,8 +4,6 @@ from typing import Callable, List, Optional, Set, Tuple import torch -import torch_tensorrt.fx.tracer.dispatch_tracer.aten_tracer as aten_tracer -from torch.fx.passes.infra.pass_base import PassResult from torch.testing._internal.common_utils import TestCase from torch_tensorrt import Input from torch_tensorrt.dynamo._settings import CompilationSettings @@ -14,19 +12,6 @@ from torch_tensorrt.dynamo.conversion import TRTInterpreter from torch_tensorrt.dynamo.lowering import apply_lowering_passes from torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule -from torch_tensorrt.fx.passes.lower_basic_pass_aten import ( - compose_bmm, - compose_chunk, - compose_getitem_slice, - remove_ops, - replace_aten_op_with_indices, - replace_aten_reshape_alias_with_replace, - replace_builtin_ops, - replace_native_layernorm_with_layernorm, - replace_transpose_mm_op_with_linear, - run_const_fold, -) -from torch_tensorrt.fx.passes.pass_utils import chain_passes _LOGGER: logging.Logger = logging.getLogger(__name__) @@ -62,8 +47,6 @@ def run_test( self, mod, inputs, - expected_ops, - unexpected_ops, interpreter, rtol, atol, @@ -76,10 +59,6 @@ def run_test( cuda_inputs.append(i.cuda()) mod.eval() - if len(expected_ops): - self.assert_has_op(mod, expected_ops) - if unexpected_ops: - self.assert_unexpected_op(mod, unexpected_ops) start = time.perf_counter() interpreter_result = interpreter.run(precision=precision) sec = time.perf_counter() - start @@ -215,75 +194,44 @@ def generate_graph( self, mod: torch.nn.Module, original_inputs: List[torch.Tensor], - expected_ops: Set[Callable], - unexpected_ops: Optional[Set[Callable]] = None, - customized_passes: List[Callable] = None, - disable_passes: bool = False, + use_dynamo_tracer: bool, + enable_passes: bool, ): - # Torchdynamo+aot proxytensor tracer - # Below are common passes - passes_list = [ - compose_bmm, - compose_chunk, - compose_getitem_slice, - replace_aten_reshape_alias_with_replace, - replace_aten_op_with_indices, - replace_transpose_mm_op_with_linear, # after compose_bmm - replace_native_layernorm_with_layernorm, - remove_ops, - replace_builtin_ops, # after replace_native_layernorm_with_layernorm - ] - # Combine with customized passes specific to any model - if customized_passes: - passes_list.extend(customized_passes) - - if disable_passes: - passes_list = [] - - fx_module, _ = aten_tracer.trace(mod, original_inputs) - for passes in passes_list: - pr: PassResult = passes(fx_module) - fx_module = pr.graph_module - fx_module(*original_inputs) - - fx_module = run_const_fold(fx_module) + if use_dynamo_tracer: + fx_module = torch._dynamo.export( + mod, + *original_inputs, + aten_graph=True, + assume_static_by_default=True, + tracing_mode="real", + ).graph_module + else: + fx_module = torch.fx.symbolic_trace(mod) + if enable_passes: + fx_module = apply_lowering_passes(fx_module, original_inputs) _LOGGER.info(f"FX graph= {fx_module.graph}") - - if len(expected_ops): - self.assert_has_op(fx_module, expected_ops) - if unexpected_ops: - self.assert_unexpected_op(fx_module, unexpected_ops) - return fx_module def run_test( self, mod, inputs, - expected_ops, - unexpected_ops=None, - apply_passes=None, rtol=1e-03, atol=1e-03, precision=torch.float, check_dtype=True, - disable_passes=False, output_dtypes=None, + use_dynamo_tracer=False, + enable_passes=False, ): mod.eval() mod = self.generate_graph( mod, inputs, - expected_ops, - unexpected_ops, - None, - disable_passes=disable_passes, + use_dynamo_tracer=use_dynamo_tracer, + enable_passes=enable_passes, ) - if apply_passes is not None: - pass_tracer = chain_passes(*apply_passes) - mod = pass_tracer(mod, inputs) - # Previous instance of the interpreter auto-casted 64-bit inputs # We replicate this behavior here compilation_settings = CompilationSettings(truncate_long_and_double=True) @@ -297,8 +245,6 @@ def run_test( super().run_test( mod, inputs, - expected_ops, - unexpected_ops, interp, rtol, atol, @@ -310,22 +256,19 @@ def run_test_with_dynamic_shape( self, mod, input_specs, - expected_ops, - unexpected_ops=None, rtol=1e-03, atol=1e-03, - disable_passes=False, output_dtypes=None, + use_dynamo_tracer=False, + enable_passes=False, ): mod.eval() inputs = [spec.example_tensor("opt_shape") for spec in input_specs] mod = self.generate_graph( mod, inputs, - expected_ops, - unexpected_ops, - None, - disable_passes=disable_passes, + use_dynamo_tracer=use_dynamo_tracer, + enable_passes=enable_passes, ) # Previous instance of the interpreter auto-casted 64-bit inputs @@ -341,6 +284,4 @@ def run_test_with_dynamic_shape( # Since the lowering is based on optimal shape. We need to test with # different shape(for ex. max shape) for testing dynamic shape inputs_max = [spec.example_tensor("max_shape") for spec in input_specs] - super().run_test( - mod, inputs_max, expected_ops, unexpected_ops, interp, rtol, atol - ) + super().run_test(mod, inputs_max, interp, rtol, atol) diff --git a/tests/py/dynamo/conversion/test_abs_aten.py b/tests/py/dynamo/conversion/test_abs_aten.py index eb11730625..13beeb3bfa 100644 --- a/tests/py/dynamo/conversion/test_abs_aten.py +++ b/tests/py/dynamo/conversion/test_abs_aten.py @@ -18,13 +18,12 @@ class TestAbsConverter(DispatchTestCase): def test_abs_float(self, input_shape, dtype): class abs(nn.Module): def forward(self, input): - return torch.abs(input) + return torch.ops.aten.abs.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( abs(), inputs, - expected_ops={torch.ops.aten.abs.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_abs_int(self, input_shape, dtype, low, high): class abs(nn.Module): def forward(self, input): - return torch.abs(input) + return torch.ops.aten.abs.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( abs(), inputs, - expected_ops={torch.ops.aten.abs.default}, output_dtypes=[torch.int], ) diff --git a/tests/py/dynamo/conversion/test_acos_aten.py b/tests/py/dynamo/conversion/test_acos_aten.py index dc26e93ac8..503cc54f39 100644 --- a/tests/py/dynamo/conversion/test_acos_aten.py +++ b/tests/py/dynamo/conversion/test_acos_aten.py @@ -18,13 +18,12 @@ class TestAcosConverter(DispatchTestCase): def test_acos_float(self, input_shape, dtype): class acos(nn.Module): def forward(self, input): - return torch.acos(input) + return torch.ops.aten.acos.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( acos(), inputs, - expected_ops={torch.ops.aten.acos.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_acos_int(self, input_shape, dtype, low, high): class acos(nn.Module): def forward(self, input): - return torch.acos(input) + return torch.ops.aten.acos.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( acos(), inputs, - expected_ops={torch.ops.aten.acos.default}, ) diff --git a/tests/py/dynamo/conversion/test_acosh_aten.py b/tests/py/dynamo/conversion/test_acosh_aten.py index fc544b242d..d127bdd240 100644 --- a/tests/py/dynamo/conversion/test_acosh_aten.py +++ b/tests/py/dynamo/conversion/test_acosh_aten.py @@ -18,13 +18,12 @@ class TestAcoshConverter(DispatchTestCase): def test_acosh_float(self, input_shape, dtype): class acosh(nn.Module): def forward(self, input): - return torch.acosh(input) + return torch.ops.aten.acosh.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( acosh(), inputs, - expected_ops={torch.ops.aten.acosh.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_acosh_int(self, input_shape, dtype, low, high): class acosh(nn.Module): def forward(self, input): - return torch.acosh(input) + return torch.ops.aten.acosh.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( acosh(), inputs, - expected_ops={torch.ops.aten.acosh.default}, ) diff --git a/tests/py/dynamo/conversion/test_adaptive_avgpool_aten.py b/tests/py/dynamo/conversion/test_adaptive_avgpool_aten.py index e31657d76b..e19e1b6187 100644 --- a/tests/py/dynamo/conversion/test_adaptive_avgpool_aten.py +++ b/tests/py/dynamo/conversion/test_adaptive_avgpool_aten.py @@ -7,27 +7,11 @@ class TestAdaptiveAvgPoolConverter(DispatchTestCase): - def test_adaptive_avgpool_mean(self): - class TestModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.pool = torch.nn.AdaptiveAvgPool2d((1, 1)) - - def forward(self, x): - return self.pool(x) - - inputs = [torch.randn(1, 3, 256, 256)] - self.run_test( - TestModule(), - inputs, - expected_ops={torch.ops.aten.mean.dim}, - ) - @parameterized.expand( [ ((64, 64),), ((128, 64),), - (64,), + # (64,), This case has been there in previous code but it isn't a valid pytorch code. ] ) def test_adaptive_avgpool( @@ -46,7 +30,7 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten._adaptive_avg_pool2d.default}, + use_dynamo_tracer=True, ) def test_adaptive_avgpool_with_dynamic_shape(self): @@ -66,9 +50,7 @@ def forward(self, x): ), ] self.run_test_with_dynamic_shape( - TestModule(), - input_specs, - expected_ops={torch.ops.aten._adaptive_avg_pool2d.default}, + TestModule(), input_specs, use_dynamo_tracer=True ) @parameterized.expand( @@ -94,7 +76,7 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten._adaptive_avg_pool3d.default}, + use_dynamo_tracer=True, ) def test_adaptive_avgpool3d_with_dynamic_shape(self): @@ -118,7 +100,7 @@ def forward(self, x): self.run_test_with_dynamic_shape( TestModule(), input_specs, - expected_ops={torch.ops.aten._adaptive_avg_pool3d.default}, + use_dynamo_tracer=True, ) # Testing with shape(-1, -1, -1, -1) results into error: "AdaptiveAvgPool2d and AdaptiveAvgPool3d currently doesn't support dynamic shapes for last two dims." diff --git a/tests/py/dynamo/conversion/test_add_aten.py b/tests/py/dynamo/conversion/test_add_aten.py index b9fec820c6..9cee27e91d 100644 --- a/tests/py/dynamo/conversion/test_add_aten.py +++ b/tests/py/dynamo/conversion/test_add_aten.py @@ -17,13 +17,12 @@ class TestAddConverter(DispatchTestCase): def test_add_tensor(self, _, shape): class add(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.add(lhs_val, rhs_val) + return torch.ops.aten.add.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( add(), inputs, - expected_ops={torch.ops.aten.add.Tensor}, ) @parameterized.expand( @@ -35,13 +34,12 @@ def forward(self, lhs_val, rhs_val): def test_add_tensor_alpha(self, _, shape, alpha): class add(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.add(lhs_val, rhs_val, alpha=alpha) + return torch.ops.aten.add.Tensor(lhs_val, rhs_val, alpha=alpha) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( add(), inputs, - expected_ops={torch.ops.aten.add.Tensor}, ) @parameterized.expand( @@ -53,13 +51,12 @@ def forward(self, lhs_val, rhs_val): def test_add_scalar(self, _, shape, scalar): class add(nn.Module): def forward(self, lhs_val): - return torch.add(lhs_val, scalar) + return torch.ops.aten.add.Tensor(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( add(), inputs, - expected_ops={torch.ops.aten.add.Tensor}, ) @parameterized.expand( @@ -71,13 +68,12 @@ def forward(self, lhs_val): def test_add_scalar_alpha(self, _, shape, scalar, alpha): class add(nn.Module): def forward(self, lhs_val): - return torch.add(lhs_val, scalar, alpha=alpha) + return torch.ops.aten.add.Tensor(lhs_val, scalar, alpha=alpha) inputs = [torch.randn(shape)] self.run_test( add(), inputs, - expected_ops={torch.ops.aten.add.Tensor}, ) diff --git a/tests/py/dynamo/conversion/test_amax_aten.py b/tests/py/dynamo/conversion/test_amax_aten.py index 70aa9842ae..9ac95dfdd0 100644 --- a/tests/py/dynamo/conversion/test_amax_aten.py +++ b/tests/py/dynamo/conversion/test_amax_aten.py @@ -19,13 +19,12 @@ class TestAmaxConverter(DispatchTestCase): def test_amax_dim_int_default(self, input_shape, dim, keep_dims): class Amax(nn.Module): def forward(self, x): - return torch.amax(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.amax.default(x, dim, keep_dims) inputs = [torch.randn(*input_shape)] self.run_test( Amax(), inputs, - expected_ops={torch.ops.aten.amax.default}, ) @parameterized.expand( @@ -39,13 +38,12 @@ def forward(self, x): def test_amax_dim_tuple_default(self, input_shape, dim, keep_dims): class Amax(nn.Module): def forward(self, x): - return torch.amax(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.amax.default(x, dim, keep_dims) inputs = [torch.randn(*input_shape)] self.run_test( Amax(), inputs, - expected_ops={torch.ops.aten.amax.default}, ) @parameterized.expand( @@ -60,13 +58,12 @@ def forward(self, x): def test_amax_dim_int_int(self, input_shape, dim, keep_dims, dtype, low, high): class Amax(nn.Module): def forward(self, x): - return torch.amax(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.amax.default(x, dim, keep_dims) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( Amax(), inputs, - expected_ops={torch.ops.aten.amax.default}, check_dtype=False, ) @@ -82,13 +79,12 @@ def forward(self, x): def test_amax_dim_tuple_int(self, input_shape, dim, keep_dims, dtype, low, high): class Amax(nn.Module): def forward(self, x): - return torch.amax(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.amax.default(x, dim, keep_dims) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( Amax(), inputs, - expected_ops={torch.ops.aten.amax.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_asin_aten.py b/tests/py/dynamo/conversion/test_asin_aten.py index 0b0626fc94..c77452b370 100644 --- a/tests/py/dynamo/conversion/test_asin_aten.py +++ b/tests/py/dynamo/conversion/test_asin_aten.py @@ -18,13 +18,12 @@ class TestAsinConverter(DispatchTestCase): def test_asin_float(self, input_shape, dtype): class asin(nn.Module): def forward(self, input): - return torch.asin(input) + return torch.ops.aten.asin.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( asin(), inputs, - expected_ops={torch.ops.aten.asin.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_asin_int(self, input_shape, dtype, low, high): class asin(nn.Module): def forward(self, input): - return torch.asin(input) + return torch.ops.aten.asin.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( asin(), inputs, - expected_ops={torch.ops.aten.asin.default}, ) diff --git a/tests/py/dynamo/conversion/test_asinh_aten.py b/tests/py/dynamo/conversion/test_asinh_aten.py index 6fe45ed077..eb78084daa 100644 --- a/tests/py/dynamo/conversion/test_asinh_aten.py +++ b/tests/py/dynamo/conversion/test_asinh_aten.py @@ -18,13 +18,12 @@ class TestAsinhConverter(DispatchTestCase): def test_asinh_float(self, input_shape, dtype): class asinh(nn.Module): def forward(self, input): - return torch.asinh(input) + return torch.ops.aten.asinh.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( asinh(), inputs, - expected_ops={torch.ops.aten.asinh.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_asinh_int(self, input_shape, dtype, low, high): class asinh(nn.Module): def forward(self, input): - return torch.asinh(input) + return torch.ops.aten.asinh.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( asinh(), inputs, - expected_ops={torch.ops.aten.asinh.default}, ) diff --git a/tests/py/dynamo/conversion/test_atan_aten.py b/tests/py/dynamo/conversion/test_atan_aten.py index 54fe808a70..ee9e83b495 100644 --- a/tests/py/dynamo/conversion/test_atan_aten.py +++ b/tests/py/dynamo/conversion/test_atan_aten.py @@ -18,13 +18,12 @@ class TestAtanConverter(DispatchTestCase): def test_atan_float(self, input_shape, dtype): class atan(nn.Module): def forward(self, input): - return torch.atan(input) + return torch.ops.aten.atan.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( atan(), inputs, - expected_ops={torch.ops.aten.atan.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_atan_int(self, input_shape, dtype, low, high): class atan(nn.Module): def forward(self, input): - return torch.atan(input) + return torch.ops.aten.atan.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( atan(), inputs, - expected_ops={torch.ops.aten.atan.default}, ) diff --git a/tests/py/dynamo/conversion/test_atanh_aten.py b/tests/py/dynamo/conversion/test_atanh_aten.py index 2a5fae8847..500acb30fb 100644 --- a/tests/py/dynamo/conversion/test_atanh_aten.py +++ b/tests/py/dynamo/conversion/test_atanh_aten.py @@ -18,13 +18,12 @@ class TestAtanhConverter(DispatchTestCase): def test_atanh_float(self, input_shape, dtype): class atanh(nn.Module): def forward(self, input): - return torch.atanh(input) + return torch.ops.aten.atanh.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( atanh(), inputs, - expected_ops={torch.ops.aten.atanh.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_atanh_int(self, input_shape, dtype, low, high): class atanh(nn.Module): def forward(self, input): - return torch.atanh(input) + return torch.ops.aten.atanh.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( atanh(), inputs, - expected_ops={torch.ops.aten.atanh.default}, ) diff --git a/tests/py/dynamo/conversion/test_batchnorm_aten.py b/tests/py/dynamo/conversion/test_batchnorm_aten.py index 5729827deb..cb946bcc40 100644 --- a/tests/py/dynamo/conversion/test_batchnorm_aten.py +++ b/tests/py/dynamo/conversion/test_batchnorm_aten.py @@ -1,3 +1,5 @@ +import unittest + import torch from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input @@ -6,6 +8,7 @@ class TestBatchNormConverter(DispatchTestCase): + @unittest.skip("Pending ongoing work on batchnorm converter in Dynamo") def test_batchnorm(self): class TestModule(torch.nn.Module): def __init__(self): @@ -18,6 +21,7 @@ def forward(self, x): inputs = [torch.randn(1, 3, 224, 224)] self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.batch_norm}) + @unittest.skip("Pending ongoing work on batchnorm converter in Dynamo") def test_batchnorm1d_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): @@ -39,6 +43,7 @@ def forward(self, x): TestModule(), input_specs, expected_ops={torch.ops.aten.batch_norm} ) + @unittest.skip("Pending ongoing work on batchnorm converter in Dynamo") def test_batchnorm_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): diff --git a/tests/py/dynamo/conversion/test_binary_ops_aten.py b/tests/py/dynamo/conversion/test_binary_ops_aten.py index 90e220d63d..331fab591d 100644 --- a/tests/py/dynamo/conversion/test_binary_ops_aten.py +++ b/tests/py/dynamo/conversion/test_binary_ops_aten.py @@ -12,57 +12,39 @@ NEED_TEST_BOTH_CONSTANTS_CASE = True elementwise_ops = [ - ((lambda x, y: x + y), torch.ops.aten.add.Tensor, NEED_TEST_BOTH_CONSTANTS_CASE), + ((lambda x, y: torch.ops.aten.add.Tensor(x, y)), NEED_TEST_BOTH_CONSTANTS_CASE), + ((lambda x, y: torch.ops.aten.sub.Tensor(x, y)), NEED_TEST_BOTH_CONSTANTS_CASE), + ((lambda x, y: torch.ops.aten.div.Tensor(x, y)), NEED_TEST_BOTH_CONSTANTS_CASE), ( - (lambda x, y: torch.add(x, y)), - torch.ops.aten.add.Tensor, + (lambda x, y: torch.ops.aten.floor_divide.default(x, y)), NEED_TEST_BOTH_CONSTANTS_CASE, ), - ((lambda x, y: x.add(y)), torch.ops.aten.add.Tensor, NEED_TEST_BOTH_CONSTANTS_CASE), - ((lambda x, y: x - y), torch.ops.aten.sub.Tensor, NEED_TEST_BOTH_CONSTANTS_CASE), - ((lambda x, y: torch.sub(x, y)), torch.ops.aten.sub.Tensor, False), - ((lambda x, y: x.sub(y)), torch.ops.aten.sub.Tensor, False), - ((lambda x, y: x / y), torch.ops.aten.div.Tensor, NEED_TEST_BOTH_CONSTANTS_CASE), ( - (lambda x, y: x // y), - torch.ops.aten.floor_divide.default, - NEED_TEST_BOTH_CONSTANTS_CASE, - ), - ( - (lambda x, y: torch.div(x, y, rounding_mode="trunc")), - torch.ops.aten.div.Tensor_mode, + (lambda x, y: torch.ops.aten.div.Tensor_mode(x, y, rounding_mode="trunc")), not NEED_TEST_BOTH_CONSTANTS_CASE, ), ( - (lambda x, y: torch.div(x, y, rounding_mode="floor")), - torch.ops.aten.div.Tensor_mode, + (lambda x, y: torch.ops.aten.div.Tensor_mode(x, y, rounding_mode="floor")), NEED_TEST_BOTH_CONSTANTS_CASE, ), ( - (lambda x, y: torch.div(x, y)), - torch.ops.aten.div.Tensor, - NEED_TEST_BOTH_CONSTANTS_CASE, - ), - ( - (lambda x, y: torch.fmod(x, y)), torch.ops.aten.fmod.Tensor, not NEED_TEST_BOTH_CONSTANTS_CASE, ), ## torch.floor_divide rounds result toward zero, rather than -Inf. ## https://github.com/pytorch/pytorch/issues/43874 ( - (lambda x, y: torch.floor_divide(x, y)), - torch.ops.aten.floor_divide.default, + (lambda x, y: torch.ops.aten.floor_divide.default(x, y)), not NEED_TEST_BOTH_CONSTANTS_CASE, ), - ((lambda x, y: x * y), torch.ops.aten.mul.Tensor, NEED_TEST_BOTH_CONSTANTS_CASE), - (torch.pow, torch.ops.aten.pow.Tensor_Tensor, not NEED_TEST_BOTH_CONSTANTS_CASE), + ((lambda x, y: torch.ops.aten.mul.Tensor(x, y)), NEED_TEST_BOTH_CONSTANTS_CASE), + (torch.ops.aten.pow.Tensor_Tensor, not NEED_TEST_BOTH_CONSTANTS_CASE), ] class TestBinaryOpConverters(DispatchTestCase): - @parameterized.expand([(op[1].__name__, op[0], op[1]) for op in elementwise_ops]) - def test_elementwise_ops(self, name, orig_op: Callable, expected_op): + @parameterized.expand([(op[0].__name__, op[0]) for op in elementwise_ops]) + def test_elementwise_ops(self, name, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -74,13 +56,11 @@ def forward(self, x): m = TestModule(orig_op) # Avoid dividing by 0. inputs = [torch.rand(1, 1) + 1] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) - @parameterized.expand([(op[1].__name__, op[0], op[1]) for op in elementwise_ops]) + @parameterized.expand([(op[0].__name__, op[0]) for op in elementwise_ops]) @unittest.skip("Pending reimplementation of all binary converters in Dynamo") - def test_elementwise_ops_mismatched_dtypes( - self, name, orig_op: Callable, expected_op - ): + def test_elementwise_ops_mismatched_dtypes(self, name, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -95,12 +75,16 @@ def forward(self, x, y): 2 * torch.rand(1, 1, dtype=torch.float) + 1, torch.randint(1, 3, (1, 1), dtype=torch.int), ] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) - @parameterized.expand([(op[1].__name__, op[0], op[1]) for op in elementwise_ops]) - def test_elementwise_ops_with_one_constant( - self, name, orig_op: Callable, expected_op - ): + @parameterized.expand( + [ + (op[0].__name__, op[0]) + for op in elementwise_ops + if op[0].__name__ not in ["pow.Tensor_Tensor", "fmod.Tensor"] + ] + ) + def test_elementwise_ops_with_one_constant(self, name, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -113,14 +97,10 @@ def forward(self, x): m = TestModule(orig_op) inputs = [torch.randn(2, 2)] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) - @parameterized.expand( - [(op[1].__name__, op[0], op[1]) for op in elementwise_ops if op[2]] - ) - def test_elementwise_op_with_both_constants( - self, name, orig_op: Callable, expected_op - ): + @parameterized.expand([(op[0].__name__, op[0]) for op in elementwise_ops if op[1]]) + def test_elementwise_op_with_both_constants(self, name, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -134,10 +114,10 @@ def forward(self, x): m = TestModule(orig_op) inputs = [torch.randn(2, 2)] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) - @parameterized.expand([((lambda x, y: x / y), torch.ops.aten.div.Tensor)]) - def test_elementwise_op_div_with_two_ints(self, orig_op: Callable, expected_op): + @parameterized.expand([((lambda x, y: torch.ops.aten.div.Tensor(x, y)))]) + def test_elementwise_op_div_with_two_ints(self, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -148,12 +128,10 @@ def forward(self, x): m = TestModule(orig_op) inputs = [torch.randint(1, 10, (5,), dtype=torch.int32)] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) - @parameterized.expand([((lambda x, y: x / y), torch.ops.aten.div.Tensor)]) - def test_elementwise_op_div_with_one_int_one_constant( - self, orig_op: Callable, expected_op - ): + @parameterized.expand([(lambda x, y: torch.ops.aten.div.Tensor(x, y))]) + def test_elementwise_op_div_with_one_int_one_constant(self, orig_op: Callable): class TestModule(nn.Module): def __init__(self, orig_op): super().__init__() @@ -169,37 +147,35 @@ def forward(self, x): m = TestModule(orig_op) inputs = [torch.randint(1, 10, (5,), dtype=torch.int32)] - self.run_test(m, inputs, expected_ops={expected_op}) + self.run_test(m, inputs) # Dynamic shape test @parameterized.expand( [ ( - f"no_broadcast_{op[1].__name__}", + f"no_broadcast_{op[0].__name__}", (-1, -1), ((1, 1), (2, 2), (3, 3)), (-1, -1), ((1, 1), (2, 2), (3, 3)), op[0], - op[1], ) for op in elementwise_ops ] + [ ( - f"broadcast_{op[1].__name__}", + f"broadcast_{op[0].__name__}", (-1, -1, -1), ((1, 1, 1), (2, 2, 2), (3, 3, 3)), (-1, -1), ((1, 1), (2, 2), (3, 3)), op[0], - op[1], ) for op in elementwise_ops ] ) def test_elementwise_op_with_dynamic_shape( - self, _, x_shape, x_shape_ranges, y_shape, y_shape_ranges, orig_op, expected_op + self, _, x_shape, x_shape_ranges, y_shape, y_shape_ranges, orig_op ): class Op(nn.Module): def forward(self, x, y): @@ -217,29 +193,25 @@ def forward(self, x, y): shape_ranges=[y_shape_ranges], ), ] - self.run_test_with_dynamic_shape(Op(), input_specs, expected_ops={expected_op}) + self.run_test_with_dynamic_shape(Op(), input_specs) @parameterized.expand( [ ( - f"no_broadcast_{op[1].__name__}", + f"no_broadcast_{op[0].__name__}", op[0], - op[1], ) for op in elementwise_ops ] + [ ( - f"broadcast_{op[1].__name__}", + f"broadcast_{op[0].__name__}", op[0], - op[1], ) for op in elementwise_ops ] ) - def test_elementwise_op_with_dynamic_shape_four_dimensions( - self, _, orig_op, expected_op - ): + def test_elementwise_op_with_dynamic_shape_four_dimensions(self, _, orig_op): class Op(nn.Module): def forward(self, x, y): return orig_op(x, y) @@ -256,7 +228,7 @@ def forward(self, x, y): shape_ranges=[((1, 1, 1, 1), (3, 3, 3, 3), (5, 5, 5, 5))], ), ] - self.run_test_with_dynamic_shape(Op(), input_specs, expected_ops={expected_op}) + self.run_test_with_dynamic_shape(Op(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_bmm.py b/tests/py/dynamo/conversion/test_bmm.py index 391bd0bf89..f494e128f9 100644 --- a/tests/py/dynamo/conversion/test_bmm.py +++ b/tests/py/dynamo/conversion/test_bmm.py @@ -16,19 +16,14 @@ class TestBmmConverter(DispatchTestCase): ) def test_bmm(self, _, input_shape, mat2_shape): class BMM(nn.Module): - def __init__(self): - super().__init__() - def forward(self, input, mat2): - return torch.bmm(input, mat2) + return torch.ops.aten.bmm.default(input, mat2) inputs = [torch.randn(*input_shape), torch.randn(*mat2_shape)] self.run_test( BMM(), inputs, - disable_passes=True, - expected_ops={torch.ops.aten.bmm.default}, ) diff --git a/tests/py/dynamo/conversion/test_casts.py b/tests/py/dynamo/conversion/test_casts.py index 50d94713c5..f17eb7b1d4 100644 --- a/tests/py/dynamo/conversion/test_casts.py +++ b/tests/py/dynamo/conversion/test_casts.py @@ -10,42 +10,27 @@ class TestCloneConverter(DispatchTestCase): def test_clone_contiguous(self): class Clone(nn.Module): def forward(self, x): - y = torch.clone(x, memory_format=torch.contiguous_format) + y = torch.ops.aten.clone.default( + x, memory_format=torch.contiguous_format + ) return y + 1 inputs = [torch.randn((1, 3, 10))] self.run_test( Clone(), inputs, - expected_ops={torch.ops.aten.clone.default}, - disable_passes=True, ) def test_clone_regular(self): class Clone(nn.Module): def forward(self, x): - y = torch.clone(x) + y = torch.ops.aten.clone.default(x) return y + 1 inputs = [torch.randn((8, 2, 10))] self.run_test( Clone(), inputs, - expected_ops={torch.ops.aten.clone.default}, - disable_passes=True, - ) - - def test_clone_direct(self): - class Clone(nn.Module): - def forward(self, x): - return x.clone() - - inputs = [torch.randn((8, 2, 10))] - self.run_test( - Clone(), - inputs, - expected_ops={torch.ops.aten.clone.default}, - disable_passes=True, ) @@ -53,37 +38,33 @@ class TestToCopyConverter(DispatchTestCase): def test_to_copy_half(self): class ToCopyHalf(nn.Module): def forward(self, x): - y = x.to(dtype=torch.half) + y = torch.ops.aten._to_copy.default(x, dtype=torch.half) return y inputs = [torch.rand((1, 3, 10))] self.run_test( ToCopyHalf(), inputs, - expected_ops={torch.ops.aten._to_copy.default}, precision=torch.half, - disable_passes=True, ) def test_to_copy_float(self): class ToCopyFloat(nn.Module): def forward(self, x): - y = x.to(dtype=torch.float) + y = torch.ops.aten._to_copy.default(x, dtype=torch.float) return y inputs = [torch.rand((1, 3, 10)).half()] self.run_test( ToCopyFloat(), inputs, - expected_ops={torch.ops.aten._to_copy.default}, precision=torch.float, - disable_passes=True, ) def test_to_copy_unsupported(self): class ToCopy64Bit(nn.Module): def forward(self, x): - y = x.to(dtype=torch.int64) + y = torch.ops.aten._to_copy.default(x, dtype=torch.int64) return y inputs = [torch.randn((1, 3, 10)).int()] @@ -92,24 +73,8 @@ def forward(self, x): self.run_test( ToCopy64Bit(), inputs, - expected_ops={torch.ops.aten._to_copy.default}, - disable_passes=True, ) - def test_to_copy_direct(self): - class ToCopyFloat(nn.Module): - def forward(self, x): - return x.to(dtype=torch.float, copy=True) - - inputs = [torch.rand((1, 3, 10)).float()] - self.run_test( - ToCopyFloat(), - inputs, - expected_ops={torch.ops.aten._to_copy.default}, - precision=torch.float, - disable_passes=True, - ) - if __name__ == "__main__": run_tests() diff --git a/tests/py/dynamo/conversion/test_cat_aten.py b/tests/py/dynamo/conversion/test_cat_aten.py index 32e50bffbd..a8d8bae42f 100644 --- a/tests/py/dynamo/conversion/test_cat_aten.py +++ b/tests/py/dynamo/conversion/test_cat_aten.py @@ -17,13 +17,12 @@ class TestCatConverter(DispatchTestCase): def test_cat(self, _, dim): class Cat(nn.Module): def forward(self, x, y, z): - return torch.cat((x, y, z), dim) + return torch.ops.aten.cat.default((x, y, z), dim) inputs = [torch.randn(1, 2, 3), torch.randn(1, 1, 3), torch.randn(1, 3, 3)] self.run_test( Cat(), inputs, - expected_ops={torch.ops.aten.cat.default}, ) @parameterized.expand( @@ -35,7 +34,7 @@ def forward(self, x, y, z): def test_cat_dynamic_shape(self, _, dim): class Cat(nn.Module): def forward(self, x, y): - return torch.cat((x, y), dim) + return torch.ops.aten.cat.default((x, y), dim) input_specs = [ Input( @@ -52,25 +51,23 @@ def forward(self, x, y): self.run_test_with_dynamic_shape( Cat(), input_specs, - expected_ops={torch.ops.aten.cat.default}, ) def test_cat_no_dim(self): class Cat(nn.Module): def forward(self, x, y, z): - return torch.cat((x, y, z)) + return torch.ops.aten.cat.default((x, y, z)) inputs = [torch.randn(2, 1, 3), torch.randn(1, 1, 3), torch.randn(3, 1, 3)] self.run_test( Cat(), inputs, - expected_ops={torch.ops.aten.cat.default}, ) def test_cat_dynamic_shape_no_dim(self): class Cat(nn.Module): def forward(self, x, y): - return torch.cat((x, y)) + return torch.ops.aten.cat.default((x, y)) input_specs = [ Input( @@ -87,7 +84,6 @@ def forward(self, x, y): self.run_test_with_dynamic_shape( Cat(), input_specs, - expected_ops={torch.ops.aten.cat.default}, ) diff --git a/tests/py/dynamo/conversion/test_ceil_aten.py b/tests/py/dynamo/conversion/test_ceil_aten.py index 951579475e..321a3a45d7 100644 --- a/tests/py/dynamo/conversion/test_ceil_aten.py +++ b/tests/py/dynamo/conversion/test_ceil_aten.py @@ -18,13 +18,12 @@ class TestCeilConverter(DispatchTestCase): def test_ceil_float(self, input_shape, dtype): class ceil(nn.Module): def forward(self, input): - return torch.ceil(input) + return torch.ops.aten.ceil.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( ceil(), inputs, - expected_ops={torch.ops.aten.ceil.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_ceil_int(self, input_shape, dtype, low, high): class ceil(nn.Module): def forward(self, input): - return torch.ceil(input) + return torch.ops.aten.ceil.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( ceil(), inputs, - expected_ops={torch.ops.aten.ceil.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_clamp_aten.py b/tests/py/dynamo/conversion/test_clamp_aten.py index 00b5ba339a..fcee7bfa3c 100644 --- a/tests/py/dynamo/conversion/test_clamp_aten.py +++ b/tests/py/dynamo/conversion/test_clamp_aten.py @@ -24,10 +24,10 @@ def test_clamp( ): class TestModule(torch.nn.Module): def forward(self, x): - return torch.clamp(x, min, max) + return torch.ops.aten.clamp.default(x, min, max) inputs = [torch.randn(3, 4)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.clamp.default}) + self.run_test(TestModule(), inputs) @parameterized.expand( [ @@ -45,12 +45,12 @@ def test_clamp_with_dynamic_shape_four_dimensions( ): class TestModule(torch.nn.Module): def forward(self, x): - return torch.clamp(x, min, max) + return torch.ops.aten.clamp.default(x, min, max) class TestScalarModule(torch.nn.Module): def forward(self, x): - y = torch.mean(x) - return torch.clamp(y, min, max) + y = torch.ops.aten.mean.default(x) + return torch.ops.aten.clamp.default(y, min, max) input_specs = [ Input( @@ -60,12 +60,8 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.clamp.default} - ) - self.run_test_with_dynamic_shape( - TestScalarModule(), input_specs, expected_ops={torch.ops.aten.clamp.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) + self.run_test_with_dynamic_shape(TestScalarModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_clip_aten.py b/tests/py/dynamo/conversion/test_clip_aten.py index 01e885bc38..a3819fb4dd 100644 --- a/tests/py/dynamo/conversion/test_clip_aten.py +++ b/tests/py/dynamo/conversion/test_clip_aten.py @@ -1,9 +1,10 @@ import torch -from .harness import DispatchTestCase from parameterized import param, parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input +from .harness import DispatchTestCase + class TestClipConverter(DispatchTestCase): @parameterized.expand( @@ -18,10 +19,10 @@ class TestClipConverter(DispatchTestCase): def test_clip(self, test_name, min=None, max=None): class TestModule(torch.nn.Module): def forward(self, x): - return torch.clip(x, min, max) + return torch.ops.aten.clamp.default(x, min, max) inputs = [torch.randn(3, 4)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.clamp.default}) + self.run_test(TestModule(), inputs) @parameterized.expand( [ @@ -36,12 +37,12 @@ def test_clip_with_dynamic_shape_four_dimensions( ): class TestModule(torch.nn.Module): def forward(self, x): - return torch.clip(x, min, max) + return torch.ops.aten.clamp.default(x, min, max) class TestScalarModule(torch.nn.Module): def forward(self, x): - y = torch.mean(x) - return torch.clip(y, min, max) + y = torch.ops.aten.mean.default(x) + return torch.ops.aten.clamp.default(y, min, max) input_specs = [ Input( @@ -51,12 +52,8 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.clamp.default} - ) - self.run_test_with_dynamic_shape( - TestScalarModule(), input_specs, expected_ops={torch.ops.aten.clamp.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) + self.run_test_with_dynamic_shape(TestScalarModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_convolution_aten.py b/tests/py/dynamo/conversion/test_convolution_aten.py index 62e9e9a411..7d69c871a9 100644 --- a/tests/py/dynamo/conversion/test_convolution_aten.py +++ b/tests/py/dynamo/conversion/test_convolution_aten.py @@ -1,6 +1,7 @@ import torch from parameterized import param, parameterized from torch.testing._internal.common_utils import run_tests + from torch_tensorrt import Input from .harness import DispatchTestCase @@ -41,7 +42,7 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.convolution.default}, + use_dynamo_tracer=True, ) def test_conv1d_with_dynamic_shape( @@ -72,7 +73,9 @@ def forward(self, x): ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) @parameterized.expand( @@ -114,7 +117,9 @@ def forward(self, x): inputs = [torch.randn(1, 3, 32, 32)] self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + inputs, + use_dynamo_tracer=True, ) # Testing with (-1, -1, -1, -1) results into Error: @@ -137,7 +142,9 @@ def forward(self, x): ), ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) @parameterized.expand( @@ -173,7 +180,9 @@ def forward(self, x): inputs = [torch.randn(1, 3, 32, 32, 32)] self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + inputs, + use_dynamo_tracer=True, ) # Testing with (-1, -1, -1, -1, -1) results into Error: @@ -196,7 +205,9 @@ def forward(self, x): ), ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) diff --git a/tests/py/dynamo/conversion/test_cos_aten.py b/tests/py/dynamo/conversion/test_cos_aten.py index 7bbfad3673..505f303219 100644 --- a/tests/py/dynamo/conversion/test_cos_aten.py +++ b/tests/py/dynamo/conversion/test_cos_aten.py @@ -18,13 +18,12 @@ class TestCosConverter(DispatchTestCase): def test_cos_float(self, input_shape, dtype): class cos(nn.Module): def forward(self, input): - return torch.cos(input) + return torch.ops.aten.cos.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( cos(), inputs, - expected_ops={torch.ops.aten.cos.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_cos_int(self, input_shape, dtype, low, high): class cos(nn.Module): def forward(self, input): - return torch.cos(input) + return torch.ops.aten.cos.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( cos(), inputs, - expected_ops={torch.ops.aten.cos.default}, ) diff --git a/tests/py/dynamo/conversion/test_cosh_aten.py b/tests/py/dynamo/conversion/test_cosh_aten.py index 02c472bb1f..1175613796 100644 --- a/tests/py/dynamo/conversion/test_cosh_aten.py +++ b/tests/py/dynamo/conversion/test_cosh_aten.py @@ -18,13 +18,12 @@ class TestCoshConverter(DispatchTestCase): def test_cosh_float(self, input_shape, dtype): class cosh(nn.Module): def forward(self, input): - return torch.cosh(input) + return torch.ops.aten.cosh.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( cosh(), inputs, - expected_ops={torch.ops.aten.cosh.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_cosh_int(self, input_shape, dtype, low, high): class cosh(nn.Module): def forward(self, input): - return torch.cosh(input) + return torch.ops.aten.cosh.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( cosh(), inputs, - expected_ops={torch.ops.aten.cosh.default}, ) diff --git a/tests/py/dynamo/conversion/test_deconvolution_aten.py b/tests/py/dynamo/conversion/test_deconvolution_aten.py index 939a7ea9c0..6024b6946e 100644 --- a/tests/py/dynamo/conversion/test_deconvolution_aten.py +++ b/tests/py/dynamo/conversion/test_deconvolution_aten.py @@ -1,6 +1,7 @@ import torch from parameterized import param, parameterized from torch.testing._internal.common_utils import run_tests + from torch_tensorrt import Input from .harness import DispatchTestCase @@ -48,7 +49,7 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.convolution.default}, + use_dynamo_tracer=True, ) def test_deconv1d_with_dynamic_shape( @@ -86,7 +87,9 @@ def forward(self, x): ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) @parameterized.expand( @@ -128,7 +131,9 @@ def forward(self, x): inputs = [torch.randn(1, 3, 32, 32)] self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + inputs, + use_dynamo_tracer=True, ) # Testing with (-1, -1, -1, -1) results into Error: @@ -151,7 +156,9 @@ def forward(self, x): ), ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) @parameterized.expand( @@ -193,7 +200,9 @@ def forward(self, x): inputs = [torch.randn(1, 3, 32, 32, 32)] self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + inputs, + use_dynamo_tracer=True, ) # Testing with (-1, -1, -1, -1, -1) results into Error: @@ -216,7 +225,9 @@ def forward(self, x): ), ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.convolution.default} + TestModule(), + input_specs, + use_dynamo_tracer=True, ) diff --git a/tests/py/dynamo/conversion/test_div_aten.py b/tests/py/dynamo/conversion/test_div_aten.py index 49a13ea3a6..2facb52289 100644 --- a/tests/py/dynamo/conversion/test_div_aten.py +++ b/tests/py/dynamo/conversion/test_div_aten.py @@ -17,13 +17,12 @@ class TestDivConverter(DispatchTestCase): def test_div_tensor(self, _, shape): class div(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.div(lhs_val, rhs_val) + return torch.ops.aten.div.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( div(), inputs, - expected_ops={torch.ops.aten.div.Tensor}, ) @parameterized.expand( @@ -36,13 +35,14 @@ def forward(self, lhs_val, rhs_val): def test_div_tensor_rounding_mode(self, _, shape, rounding_mode): class div(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.div(lhs_val, rhs_val, rounding_mode=rounding_mode) + return torch.ops.aten.div.Tensor_mode( + lhs_val, rhs_val, rounding_mode=rounding_mode + ) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( div(), inputs, - expected_ops={torch.ops.aten.div.Tensor_mode}, ) @parameterized.expand( @@ -54,13 +54,12 @@ def forward(self, lhs_val, rhs_val): def test_div_tensor(self, _, shape, scalar): class div(nn.Module): def forward(self, lhs_val): - return torch.div(lhs_val, scalar) + return torch.ops.aten.div.Tensor(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( div(), inputs, - expected_ops={torch.ops.aten.div.Tensor}, ) @parameterized.expand( @@ -73,13 +72,14 @@ def forward(self, lhs_val): def test_div_tensor_rounding_mode(self, _, shape, scalar, rounding_mode): class div(nn.Module): def forward(self, lhs_val): - return torch.div(lhs_val, scalar, rounding_mode=rounding_mode) + return torch.ops.aten.div.Tensor_mode( + lhs_val, scalar, rounding_mode=rounding_mode + ) inputs = [torch.randn(shape)] self.run_test( div(), inputs, - expected_ops={torch.ops.aten.div.Tensor_mode}, ) diff --git a/tests/py/dynamo/conversion/test_elu_aten.py b/tests/py/dynamo/conversion/test_elu_aten.py index a4a10f9da2..efcb2ba4ba 100644 --- a/tests/py/dynamo/conversion/test_elu_aten.py +++ b/tests/py/dynamo/conversion/test_elu_aten.py @@ -10,15 +10,15 @@ class TestELUConverter(DispatchTestCase): def test_elu(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.elu(x) + return torch.ops.aten.elu.default(x) inputs = [torch.randn(1, 10)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.elu.default}) + self.run_test(TestModule(), inputs) def test_elu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.elu(x) + return torch.ops.aten.elu.default(x) input_specs = [ Input( @@ -27,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.elu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_elu_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.elu(x) + return torch.ops.aten.elu.default(x) input_specs = [ Input( @@ -44,9 +42,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.elu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_embedding_aten.py b/tests/py/dynamo/conversion/test_embedding_aten.py index 1573989492..0ce4c5b49b 100644 --- a/tests/py/dynamo/conversion/test_embedding_aten.py +++ b/tests/py/dynamo/conversion/test_embedding_aten.py @@ -12,18 +12,20 @@ class TestEmbeddingConverter(DispatchTestCase): [ param( test_name="1d_indices", - indices_tensor=torch.tensor([3, 1, 2]), - weights_tensor=torch.randn(5, 10), + indices_tensor=torch.tensor([3, 1, 2], dtype=torch.int32), + weights_tensor=torch.randn((5, 10), dtype=torch.float32), ), param( test_name="2d_indices", - indices_tensor=torch.tensor([[3, 1, 2], [4, 1, 3]]), - weights_tensor=torch.randn(5, 10), + indices_tensor=torch.tensor([[3, 1, 2], [4, 1, 3]], dtype=torch.int32), + weights_tensor=torch.randn((5, 10), dtype=torch.float32), ), param( test_name="3d_indices", - indices_tensor=torch.tensor([[[0, 1], [2, 3]], [[3, 4], [4, 0]]]), - weights_tensor=torch.randn(5, 10), + indices_tensor=torch.tensor( + [[[0, 1], [2, 3]], [[3, 4], [4, 0]]], dtype=torch.int32 + ), + weights_tensor=torch.randn((5, 10), dtype=torch.float32), ), ] ) @@ -32,54 +34,49 @@ def test_embedding( test_name, indices_tensor, weights_tensor, - padding_idx=None, + padding_idx=-1, max_norm=None, norm_type=2.0, - scale_grad_by_freq=False, - sparse=False, + scale_grad_by_freq=None, + sparse=None, ): class TestEmbedding(torch.nn.Module): def forward(self, indices, weights): - return torch.nn.functional.embedding( - input=indices, - weight=weights, - padding_idx=padding_idx, - max_norm=max_norm, - norm_type=norm_type, - scale_grad_by_freq=scale_grad_by_freq, - sparse=sparse, + return torch.ops.aten.embedding.default( + weights, + indices, + padding_idx, + scale_grad_by_freq, + sparse, ) self.run_test( TestEmbedding(), - inputs=[indices_tensor.int(), weights_tensor.float()], - expected_ops={torch.ops.aten.embedding.default}, + inputs=[indices_tensor, weights_tensor], ) def test_embedding_with_dynamic_shape_four_dimensions( self, - padding_idx=None, + padding_idx=-1, max_norm=None, norm_type=2.0, - scale_grad_by_freq=False, - sparse=False, + scale_grad_by_freq=None, + sparse=None, ): class TestEmbedding(torch.nn.Module): def forward(self, input, weights): - return torch.nn.functional.embedding( - input=input, - weight=weights, - padding_idx=padding_idx, - max_norm=max_norm, - norm_type=norm_type, - scale_grad_by_freq=scale_grad_by_freq, - sparse=sparse, + return torch.ops.aten.embedding.default( + weights, + input, + padding_idx, + scale_grad_by_freq, + sparse, ) input_specs = [ Input( shape=(-1, -1, -1, -1), - dtype=torch.int, + dtype=torch.int32, shape_ranges=[((1, 1, 1, 1), (2, 3, 4, 5), (2, 3, 10, 10))], ), Input( @@ -92,7 +89,6 @@ def forward(self, input, weights): self.run_test_with_dynamic_shape( TestEmbedding(), input_specs, - expected_ops={torch.ops.aten.embedding.default}, ) diff --git a/tests/py/dynamo/conversion/test_equal_aten.py b/tests/py/dynamo/conversion/test_equal_aten.py index edc2259487..7761b31410 100644 --- a/tests/py/dynamo/conversion/test_equal_aten.py +++ b/tests/py/dynamo/conversion/test_equal_aten.py @@ -17,13 +17,12 @@ class TestEqualConverter(DispatchTestCase): def test_equal_tensor(self, _, shape): class equal(nn.Module): def forward(self, lhs_val, rhs_val): - return lhs_val == rhs_val + return torch.ops.aten.eq.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( equal(), inputs, - expected_ops={torch.ops.aten.eq.Tensor}, output_dtypes=[torch.bool], ) @@ -36,13 +35,12 @@ def forward(self, lhs_val, rhs_val): def test_equal_tensor_scalar(self, _, shape, scalar): class equal(nn.Module): def forward(self, lhs_val): - return lhs_val == torch.tensor(scalar) + return torch.ops.aten.eq.Tensor(lhs_val, torch.tensor(scalar)) inputs = [torch.randn(shape)] self.run_test( equal(), inputs, - expected_ops={torch.ops.aten.eq.Tensor}, output_dtypes=[torch.bool], ) @@ -55,13 +53,13 @@ def forward(self, lhs_val): def test_equal_scalar(self, _, shape, scalar): class equal(nn.Module): def forward(self, lhs_val): - return lhs_val == scalar + return torch.ops.aten.eq.Scalar(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( equal(), inputs, - expected_ops={torch.ops.aten.eq.Scalar}, + # expected_ops={torch.ops.aten.eq.Scalar}, output_dtypes=[torch.bool], ) diff --git a/tests/py/dynamo/conversion/test_erf_aten.py b/tests/py/dynamo/conversion/test_erf_aten.py index e50deeb5bb..3f52e436b4 100644 --- a/tests/py/dynamo/conversion/test_erf_aten.py +++ b/tests/py/dynamo/conversion/test_erf_aten.py @@ -19,14 +19,13 @@ class TestErfConverter(DispatchTestCase): def test_erf_float(self, _, x, type): class erf(nn.Module): def forward(self, input): - return torch.erf(input) + return torch.ops.aten.erf.default(input) inputs = [torch.randn(x, dtype=type)] self.run_test( erf(), inputs, precision=type, - expected_ops={torch.ops.aten.erf.default}, ) @parameterized.expand( @@ -38,13 +37,12 @@ def forward(self, input): def test_erf_int(self, _, x, type, min, max): class erf(nn.Module): def forward(self, input): - return torch.erf(input) + return torch.ops.aten.erf.default(input) inputs = [torch.randint(min, max, x, dtype=type)] self.run_test( erf(), inputs, - expected_ops={torch.ops.aten.erf.default}, ) diff --git a/tests/py/dynamo/conversion/test_evaluators.py b/tests/py/dynamo/conversion/test_evaluators.py index b7a0ce7ac5..6302f7a0ac 100644 --- a/tests/py/dynamo/conversion/test_evaluators.py +++ b/tests/py/dynamo/conversion/test_evaluators.py @@ -29,8 +29,6 @@ def forward(self, x): self.run_test( GetItem(), inputs, - expected_ops={operator.getitem}, - disable_passes=True, ) diff --git a/tests/py/dynamo/conversion/test_exp_aten.py b/tests/py/dynamo/conversion/test_exp_aten.py index 4a333cbbb7..ac1c5dfbcb 100644 --- a/tests/py/dynamo/conversion/test_exp_aten.py +++ b/tests/py/dynamo/conversion/test_exp_aten.py @@ -18,13 +18,12 @@ class TestExpConverter(DispatchTestCase): def test_exp_float(self, input_shape, dtype): class exp(nn.Module): def forward(self, input): - return torch.exp(input) + return torch.ops.aten.exp.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( exp(), inputs, - expected_ops={torch.ops.aten.exp.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_exp_int(self, input_shape, dtype, low, high): class exp(nn.Module): def forward(self, input): - return torch.exp(input) + return torch.ops.aten.exp.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( exp(), inputs, - expected_ops={torch.ops.aten.exp.default}, ) diff --git a/tests/py/dynamo/conversion/test_expand_aten.py b/tests/py/dynamo/conversion/test_expand_aten.py index ca76c99f48..0d35700139 100644 --- a/tests/py/dynamo/conversion/test_expand_aten.py +++ b/tests/py/dynamo/conversion/test_expand_aten.py @@ -19,13 +19,12 @@ class TestExpandConverter(DispatchTestCase): def test_expand(self, _, sizes, init_size): class Expand(nn.Module): def forward(self, x): - return x.expand(*sizes) + return torch.ops.aten.expand.default(x, sizes) inputs = [torch.randn(*init_size)] self.run_test( Expand(), inputs, - expected_ops={torch.ops.aten.expand.default}, ) diff --git a/tests/py/dynamo/conversion/test_floor_aten.py b/tests/py/dynamo/conversion/test_floor_aten.py index 397e40391d..7b3e535590 100644 --- a/tests/py/dynamo/conversion/test_floor_aten.py +++ b/tests/py/dynamo/conversion/test_floor_aten.py @@ -18,13 +18,12 @@ class TestFloorConverter(DispatchTestCase): def test_floor_float(self, input_shape, dtype): class floor(nn.Module): def forward(self, input): - return torch.floor(input) + return torch.ops.aten.floor.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( floor(), inputs, - expected_ops={torch.ops.aten.floor.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_floor_int(self, input_shape, dtype, low, high): class floor(nn.Module): def forward(self, input): - return torch.floor(input) + return torch.ops.aten.floor.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( floor(), inputs, - expected_ops={torch.ops.aten.floor.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_floor_div_aten.py b/tests/py/dynamo/conversion/test_floor_div_aten.py index 329e8bca8a..1b7d0425f3 100644 --- a/tests/py/dynamo/conversion/test_floor_div_aten.py +++ b/tests/py/dynamo/conversion/test_floor_div_aten.py @@ -17,13 +17,12 @@ class TestFloorDivConverter(DispatchTestCase): def test_floor_div_default(self, _, shape): class floor_div(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.floor_divide(lhs_val, rhs_val) + return torch.ops.aten.floor_divide.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( floor_div(), inputs, - expected_ops={torch.ops.aten.floor_divide.default}, ) @parameterized.expand( @@ -35,13 +34,14 @@ def forward(self, lhs_val, rhs_val): def test_floor_div_tensor_scalar(self, _, shape, scalar): class floor_div(nn.Module): def forward(self, lhs_val): - return torch.floor_divide(lhs_val, torch.tensor(scalar)) + return torch.ops.aten.floor_divide.default( + lhs_val, torch.tensor(scalar) + ) inputs = [torch.randn(shape)] self.run_test( floor_div(), inputs, - expected_ops={torch.ops.aten.floor_divide.default}, ) @parameterized.expand( @@ -53,13 +53,12 @@ def forward(self, lhs_val): def test_floor_div_scalar(self, _, shape, scalar): class floor_div(nn.Module): def forward(self, lhs_val): - return torch.floor_divide(lhs_val, scalar) + return torch.ops.aten.floor_divide.default(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( floor_div(), inputs, - expected_ops={torch.ops.aten.floor_divide.default}, ) diff --git a/tests/py/dynamo/conversion/test_gelu_aten.py b/tests/py/dynamo/conversion/test_gelu_aten.py index e6f234f299..df0a0eca5f 100644 --- a/tests/py/dynamo/conversion/test_gelu_aten.py +++ b/tests/py/dynamo/conversion/test_gelu_aten.py @@ -12,15 +12,15 @@ class TestGeLUConverter(DispatchTestCase): def test_gelu(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.gelu(x) + return torch.ops.aten.gelu.default(x) inputs = [torch.randn(1, 10)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.gelu.default}) + self.run_test(TestModule(), inputs) def test_gelu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.gelu(x) + return torch.ops.aten.gelu.default(x) input_specs = [ Input( @@ -29,14 +29,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.gelu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_gelu_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.gelu(x) + return torch.ops.aten.gelu.default(x) input_specs = [ Input( @@ -46,9 +44,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.gelu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_greater_aten.py b/tests/py/dynamo/conversion/test_greater_aten.py index d677c1583f..230fff23d8 100644 --- a/tests/py/dynamo/conversion/test_greater_aten.py +++ b/tests/py/dynamo/conversion/test_greater_aten.py @@ -17,13 +17,12 @@ class TestGreaterConverter(DispatchTestCase): def test_greater_tensor(self, _, shape): class greater(nn.Module): def forward(self, lhs_val, rhs_val): - return lhs_val > rhs_val + return torch.ops.aten.gt.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( greater(), inputs, - expected_ops={torch.ops.aten.gt.Tensor}, output_dtypes=[torch.bool], ) @@ -36,13 +35,12 @@ def forward(self, lhs_val, rhs_val): def test_greater_tensor_scalar(self, _, shape, scalar): class greater(nn.Module): def forward(self, lhs_val): - return lhs_val > torch.tensor(scalar) + return torch.ops.aten.gt.Tensor(lhs_val, torch.tensor(scalar)) inputs = [torch.randn(shape)] self.run_test( greater(), inputs, - expected_ops={torch.ops.aten.gt.Tensor}, output_dtypes=[torch.bool], ) @@ -55,13 +53,12 @@ def forward(self, lhs_val): def test_greater_scalar(self, _, shape, scalar): class greater(nn.Module): def forward(self, lhs_val): - return lhs_val > scalar + return torch.ops.aten.gt.Scalar(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( greater(), inputs, - expected_ops={torch.ops.aten.gt.Scalar}, output_dtypes=[torch.bool], ) diff --git a/tests/py/dynamo/conversion/test_hard_sigmoid_aten.py b/tests/py/dynamo/conversion/test_hard_sigmoid_aten.py index 2e1f5ddd5b..20014b9347 100644 --- a/tests/py/dynamo/conversion/test_hard_sigmoid_aten.py +++ b/tests/py/dynamo/conversion/test_hard_sigmoid_aten.py @@ -1,25 +1,24 @@ import torch import torch.nn as nn -from .harness import DispatchTestCase from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input +from .harness import DispatchTestCase + class TestHardSigmoidConverter(DispatchTestCase): def test_hardsigmoid(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardsigmoid(x) + return torch.ops.aten.hardsigmoid.default(x) inputs = [torch.randn(1, 10)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.hardsigmoid.default} - ) + self.run_test(TestModule(), inputs) def test_hardsigmoid_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardsigmoid(x) + return torch.ops.aten.hardsigmoid.default(x) input_specs = [ Input( @@ -28,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.hardsigmoid.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_hardsigmoid_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardsigmoid(x) + return torch.ops.aten.hardsigmoid.default(x) input_specs = [ Input( @@ -45,20 +42,17 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.hardsigmoid.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_hardsigmoid_fp16(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardsigmoid(x) + return torch.ops.aten.hardsigmoid.default(x) inputs = [torch.randn(1, 10)] self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.hardsigmoid.default}, precision=torch.half, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_hardtanh_aten.py b/tests/py/dynamo/conversion/test_hardtanh_aten.py index d58a6880cb..1c8cae2d53 100644 --- a/tests/py/dynamo/conversion/test_hardtanh_aten.py +++ b/tests/py/dynamo/conversion/test_hardtanh_aten.py @@ -10,17 +10,15 @@ class TestHardTanHConverter(DispatchTestCase): def test_hardtanh(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardtanh(x) + return torch.ops.aten.hardtanh.default(x, -1.0, 1.0) inputs = [torch.randn(1, 10)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.hardtanh.default} - ) + self.run_test(TestModule(), inputs) def test_hardtanh_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardtanh(x) + return torch.ops.aten.hardtanh.default(x, -1.0, 1.0) input_specs = [ Input( @@ -29,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.hardtanh.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_hardtanh_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.hardtanh(x) + return torch.ops.aten.hardtanh.default(x, -1.0, 1.0) input_specs = [ Input( @@ -46,9 +42,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.hardtanh.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_index_aten.py b/tests/py/dynamo/conversion/test_index_aten.py index 828da7c5e9..393eb53c63 100644 --- a/tests/py/dynamo/conversion/test_index_aten.py +++ b/tests/py/dynamo/conversion/test_index_aten.py @@ -25,7 +25,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_three_dim(self): @@ -43,7 +42,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_one_index_two_three_dim(self): @@ -62,7 +60,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_one_four_dim(self): @@ -81,7 +78,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_one_four_dim_SD(self): @@ -104,7 +100,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_one_SD_unsqueeze_four_dim(self): @@ -125,7 +120,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_one_index_two_SD_unsqueeze_four_dim_broadcast(self): @@ -148,7 +142,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) def test_index_zero_index_one_index_four_dim_non_continuous(self): @@ -167,7 +160,6 @@ def forward(self, x): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.index.Tensor}, ) diff --git a/tests/py/dynamo/conversion/test_isinf_aten.py b/tests/py/dynamo/conversion/test_isinf_aten.py index b41294ca61..78695dbe21 100644 --- a/tests/py/dynamo/conversion/test_isinf_aten.py +++ b/tests/py/dynamo/conversion/test_isinf_aten.py @@ -29,13 +29,12 @@ class TestIsInfConverter(DispatchTestCase): def test_isinf_float(self, data): class isinf(nn.Module): def forward(self, input): - return torch.isinf(input) + return torch.ops.aten.isinf.default(input) inputs = [data] self.run_test( isinf(), inputs, - expected_ops={torch.ops.aten.isinf.default}, output_dtypes=[torch.bool], ) @@ -49,13 +48,12 @@ def forward(self, input): def test_isinf_int(self, input_shape, dtype, low, high): class isinf(nn.Module): def forward(self, input): - return torch.isinf(input) + return torch.ops.aten.isinf.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( isinf(), inputs, - expected_ops={torch.ops.aten.isinf.default}, output_dtypes=[torch.bool], ) diff --git a/tests/py/dynamo/conversion/test_layer_norm_aten.py b/tests/py/dynamo/conversion/test_layer_norm_aten.py index 8498877061..0cc374e307 100644 --- a/tests/py/dynamo/conversion/test_layer_norm_aten.py +++ b/tests/py/dynamo/conversion/test_layer_norm_aten.py @@ -1,3 +1,5 @@ +import unittest + import torch from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input @@ -6,6 +8,7 @@ class TestLayerNormConverter(DispatchTestCase): + @unittest.skip("Pending ongoing work on layernorm converter in Dynamo") def test_layer_norm(self): class TestModule(torch.nn.Module): def __init__(self): @@ -20,6 +23,7 @@ def forward(self, x): TestModule(), inputs, expected_ops={torch.ops.aten.layer_norm.default} ) + @unittest.skip("Pending ongoing work on layernorm converter in Dynamo") def test_layernorm_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): diff --git a/tests/py/dynamo/conversion/test_leaky_relu_aten.py b/tests/py/dynamo/conversion/test_leaky_relu_aten.py index 577b70a4af..346f1e92ec 100644 --- a/tests/py/dynamo/conversion/test_leaky_relu_aten.py +++ b/tests/py/dynamo/conversion/test_leaky_relu_aten.py @@ -10,17 +10,15 @@ class TestLeakyReLUConverter(DispatchTestCase): def test_leaky_relu(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.leaky_relu(x, negative_slope=0.05) + return torch.ops.aten.leaky_relu.default(x, 0.05) inputs = [torch.randn(1, 10)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.leaky_relu.default} - ) + self.run_test(TestModule(), inputs) def test_leaky_relu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.leaky_relu(x, negative_slope=0.05) + return torch.ops.aten.leaky_relu.default(x, 0.05) input_specs = [ Input( @@ -29,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.leaky_relu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_leaky_relu_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.leaky_relu(x, negative_slope=0.05) + return torch.ops.aten.leaky_relu.default(x, 0.05) input_specs = [ Input( @@ -46,9 +42,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.leaky_relu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_less_aten.py b/tests/py/dynamo/conversion/test_less_aten.py index 35efb38791..28ca2cb514 100644 --- a/tests/py/dynamo/conversion/test_less_aten.py +++ b/tests/py/dynamo/conversion/test_less_aten.py @@ -17,13 +17,12 @@ class TestLessConverter(DispatchTestCase): def test_less_tensor(self, _, shape): class less(nn.Module): def forward(self, lhs_val, rhs_val): - return lhs_val < rhs_val + return torch.ops.aten.lt.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( less(), inputs, - expected_ops={torch.ops.aten.lt.Tensor}, output_dtypes=[torch.bool], ) @@ -36,13 +35,12 @@ def forward(self, lhs_val, rhs_val): def test_less_tensor_scalar(self, _, shape, scalar): class less(nn.Module): def forward(self, lhs_val): - return lhs_val < torch.tensor(scalar) + return torch.ops.aten.lt.Tensor(lhs_val, torch.tensor(scalar)) inputs = [torch.randn(shape)] self.run_test( less(), inputs, - expected_ops={torch.ops.aten.lt.Tensor}, output_dtypes=[torch.bool], ) @@ -55,13 +53,12 @@ def forward(self, lhs_val): def test_less_scalar(self, _, shape, scalar): class less(nn.Module): def forward(self, lhs_val): - return lhs_val < scalar + return torch.ops.aten.lt.Scalar(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( less(), inputs, - expected_ops={torch.ops.aten.lt.Scalar}, output_dtypes=[torch.bool], ) diff --git a/tests/py/dynamo/conversion/test_linear_aten.py b/tests/py/dynamo/conversion/test_linear_aten.py index 37fed4a305..63b324c78f 100644 --- a/tests/py/dynamo/conversion/test_linear_aten.py +++ b/tests/py/dynamo/conversion/test_linear_aten.py @@ -8,20 +8,20 @@ class TestLinearConverter(DispatchTestCase): @parameterized.expand( [ - ("default", [1, 512], True, torch.ops.aten.linear), - ("matrix", [5, 512], True, torch.ops.aten.linear), - ("no_bias", [1, 512], False, torch.ops.aten.linear), + ("default", [1, 512], True, torch.ops.aten.linear.default), + ("matrix", [5, 512], True, torch.ops.aten.linear.default), + ("no_bias", [1, 512], False, torch.ops.aten.linear.default), ( "multi_dim_matrix", [4, 5, 512], True, - torch.ops.aten.linear, + torch.ops.aten.linear.default, ), ( "multi_dim_matrix", [4, 5, 512], False, - torch.ops.aten.linear, + torch.ops.aten.linear.default, ), ] ) @@ -29,13 +29,17 @@ def test_linear(self, test_name, shape, bias, op): class TestModule(torch.nn.Module): def __init__(self): super().__init__() - self.linear = torch.nn.Linear(512, 256, bias) + self.weight = torch.randn((256, 512)) + if bias: + self.bias = torch.randn((256)) + else: + self.bias = None def forward(self, x): - return self.linear(x) + return torch.ops.aten.linear.default(x, self.weight, self.bias) inputs = [torch.randn(shape)] - self.run_test(TestModule(), inputs, expected_ops={op}) + self.run_test(TestModule(), inputs) # linear will be decomposed to P531484488 and view(reshape) can not handle reshape pattern # like (2, 3, n)->(6, n) in implicit mode which is similar to dynamic shape test below. diff --git a/tests/py/dynamo/conversion/test_log_aten.py b/tests/py/dynamo/conversion/test_log_aten.py index 3c0b007799..662b7ab99d 100644 --- a/tests/py/dynamo/conversion/test_log_aten.py +++ b/tests/py/dynamo/conversion/test_log_aten.py @@ -18,13 +18,12 @@ class TestLogConverter(DispatchTestCase): def test_log_float(self, input_shape, dtype): class log(nn.Module): def forward(self, input): - return torch.log(input) + return torch.ops.aten.log.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( log(), inputs, - expected_ops={torch.ops.aten.log.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_log_int(self, input_shape, dtype, low, high): class log(nn.Module): def forward(self, input): - return torch.log(input) + return torch.ops.aten.log.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( log(), inputs, - expected_ops={torch.ops.aten.log.default}, ) diff --git a/tests/py/dynamo/conversion/test_logical_and_aten.py b/tests/py/dynamo/conversion/test_logical_and_aten.py index b9c1f383ba..ce57fccc1f 100644 --- a/tests/py/dynamo/conversion/test_logical_and_aten.py +++ b/tests/py/dynamo/conversion/test_logical_and_aten.py @@ -17,13 +17,12 @@ class TestLogicalAndConverter(DispatchTestCase): def test_logical_and(self, _, shape): class logical_and(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.logical_and(lhs_val, rhs_val) + return torch.ops.aten.logical_and.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( logical_and(), inputs, - expected_ops={torch.ops.aten.logical_and.default}, ) diff --git a/tests/py/dynamo/conversion/test_logical_not_aten.py b/tests/py/dynamo/conversion/test_logical_not_aten.py index cf269fe0e5..a36a8dbf72 100644 --- a/tests/py/dynamo/conversion/test_logical_not_aten.py +++ b/tests/py/dynamo/conversion/test_logical_not_aten.py @@ -16,13 +16,12 @@ class TestLogicalNotConverter(DispatchTestCase): def test_logical_not_bool(self, data): class logical_not(nn.Module): def forward(self, input): - return torch.logical_not(input) + return torch.ops.aten.logical_not.default(input) inputs = [data] self.run_test( logical_not(), inputs, - expected_ops={torch.ops.aten.logical_not.default}, output_dtypes=[torch.bool], ) @@ -36,13 +35,12 @@ def forward(self, input): def test_logical_not_int(self, input_shape, dtype, low, high): class logical_not(nn.Module): def forward(self, input): - return torch.logical_not(input) + return torch.ops.aten.logical_not.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( logical_not(), inputs, - expected_ops={torch.ops.aten.logical_not.default}, output_dtypes=[torch.bool], ) @@ -56,13 +54,12 @@ def forward(self, input): def test_logical_not_float(self, input_shape, dtype): class logical_not(nn.Module): def forward(self, input): - return torch.logical_not(input) + return torch.ops.aten.logical_not.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( logical_not(), inputs, - expected_ops={torch.ops.aten.logical_not.default}, output_dtypes=[torch.bool], ) diff --git a/tests/py/dynamo/conversion/test_logical_or_aten.py b/tests/py/dynamo/conversion/test_logical_or_aten.py index df8e577932..63772fe360 100644 --- a/tests/py/dynamo/conversion/test_logical_or_aten.py +++ b/tests/py/dynamo/conversion/test_logical_or_aten.py @@ -17,13 +17,12 @@ class TestLogicalOrConverter(DispatchTestCase): def test_logical_or(self, _, shape): class logical_or(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.logical_or(lhs_val, rhs_val) + return torch.ops.aten.logical_or.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( logical_or(), inputs, - expected_ops={torch.ops.aten.logical_or.default}, ) diff --git a/tests/py/dynamo/conversion/test_logical_xor_aten.py b/tests/py/dynamo/conversion/test_logical_xor_aten.py index c31a31541d..880ba77f53 100644 --- a/tests/py/dynamo/conversion/test_logical_xor_aten.py +++ b/tests/py/dynamo/conversion/test_logical_xor_aten.py @@ -17,13 +17,12 @@ class TestLogicalXorConverter(DispatchTestCase): def test_logical_xor(self, _, shape): class logical_xor(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.logical_xor(lhs_val, rhs_val) + return torch.ops.aten.logical_xor.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( logical_xor(), inputs, - expected_ops={torch.ops.aten.logical_xor.default}, ) diff --git a/tests/py/dynamo/conversion/test_matmul_aten.py b/tests/py/dynamo/conversion/test_matmul_aten.py index 816686c4ec..bb54348bcf 100644 --- a/tests/py/dynamo/conversion/test_matmul_aten.py +++ b/tests/py/dynamo/conversion/test_matmul_aten.py @@ -53,14 +53,13 @@ def __init__(self): self.other = nn.Parameter(torch.randn(*other_shape)) def forward(self, input): - return torch.matmul(input, self.other) + return torch.ops.aten.mm.default(input, self.other) inputs = [torch.randn(*input_shape)] self.run_test( MatMul(), inputs, - expected_ops={torch.ops.aten.mm.default}, ) @parameterized.expand( @@ -94,14 +93,13 @@ def __init__(self): self.other = nn.Parameter(torch.randn(*other_shape)) def forward(self, input): - return torch.matmul(input, self.other) + return torch.ops.aten.mv.default(input, self.other) inputs = [torch.randn(*input_shape)] self.run_test( MatMul(), inputs, - expected_ops={torch.ops.aten.mv.default}, ) @parameterized.expand( @@ -118,14 +116,13 @@ def forward(self, input): def test_matmul(self, _, input_shape, other_shape): class MatMul(nn.Module): def forward(self, input, other): - return torch.matmul(input, other) + return torch.ops.aten.mm.default(input, other) inputs = [torch.randn(*input_shape), torch.randn(*other_shape)] self.run_test( MatMul(), inputs, - expected_ops={torch.ops.aten.mm.default}, ) # FIXME: dynamic shape is giving bmm diff --git a/tests/py/dynamo/conversion/test_max_aten.py b/tests/py/dynamo/conversion/test_max_aten.py index 2be1d9c74b..d2247f61dd 100644 --- a/tests/py/dynamo/conversion/test_max_aten.py +++ b/tests/py/dynamo/conversion/test_max_aten.py @@ -17,13 +17,13 @@ class TestMaxConverter(DispatchTestCase): def test_max(self, _, shape): class max(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.max(lhs_val, rhs_val) + return torch.ops.aten.maximum.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( max(), inputs, - expected_ops={torch.ops.aten.maximum.default}, + # expected_ops={torch.ops.aten.maximum.default}, ) diff --git a/tests/py/dynamo/conversion/test_mean_aten.py b/tests/py/dynamo/conversion/test_mean_aten.py index c341dfe382..638dc9c62f 100644 --- a/tests/py/dynamo/conversion/test_mean_aten.py +++ b/tests/py/dynamo/conversion/test_mean_aten.py @@ -10,15 +10,15 @@ class TestMeanDimConverter(DispatchTestCase): def test_mean_dim_keepdims(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x, dim=[0, 1], keepdim=True) + return torch.ops.aten.mean.dim(x, [0, 1], True) inputs = [torch.randn(1, 10)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.mean.dim}) + self.run_test(TestModule(), inputs) def test_mean_dim_keepdims_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x, dim=[0, 1, 2], keepdim=True) + return torch.ops.aten.mean.dim(x, [0, 1, 2], True) input_specs = [ Input( @@ -27,22 +27,20 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.mean.dim} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_mean_dim_keepdims_false(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x, dim=0, keepdim=False) + return torch.ops.aten.mean.dim(x, 0, False) inputs = [torch.randn(3, 5, 7)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.mean.dim}) + self.run_test(TestModule(), inputs) def test_mean_dim_keepdims_false_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x, dim=-1, keepdim=False) + return torch.ops.aten.mean.dim(x, -1, False) input_specs = [ Input( @@ -51,24 +49,22 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.mean.dim} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) class TestMeanConverter(DispatchTestCase): def test_mean(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x) + return torch.ops.aten.mean.default(x) inputs = [torch.randn(3, 8, 5, 7, 1)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.mean.default}) + self.run_test(TestModule(), inputs) def test_mean_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return torch.mean(x) + return torch.ops.aten.mean.default(x) input_specs = [ Input( @@ -77,9 +73,7 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 5, 8), (3, 10, 10))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.mean.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_min_aten.py b/tests/py/dynamo/conversion/test_min_aten.py index 35d0d7163f..49853cb111 100644 --- a/tests/py/dynamo/conversion/test_min_aten.py +++ b/tests/py/dynamo/conversion/test_min_aten.py @@ -17,13 +17,12 @@ class TestMinConverter(DispatchTestCase): def test_min(self, _, shape): class min(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.min(lhs_val, rhs_val) + return torch.ops.aten.minimum.default(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( min(), inputs, - expected_ops={torch.ops.aten.minimum.default}, ) diff --git a/tests/py/dynamo/conversion/test_mul_aten.py b/tests/py/dynamo/conversion/test_mul_aten.py index fecd1e06f4..30845800c0 100644 --- a/tests/py/dynamo/conversion/test_mul_aten.py +++ b/tests/py/dynamo/conversion/test_mul_aten.py @@ -17,13 +17,12 @@ class TestMulConverter(DispatchTestCase): def test_mul_tensor(self, _, shape): class mul(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.mul(lhs_val, rhs_val) + return torch.ops.aten.mul.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( mul(), inputs, - expected_ops={torch.ops.aten.mul.Tensor}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, lhs_val, rhs_val): def test_mul_scalar(self, _, shape, scalar): class mul(nn.Module): def forward(self, lhs_val): - return torch.mul(lhs_val, scalar) + return torch.ops.aten.mul.Tensor(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( mul(), inputs, - expected_ops={torch.ops.aten.mul.Tensor}, ) diff --git a/tests/py/dynamo/conversion/test_neg_aten.py b/tests/py/dynamo/conversion/test_neg_aten.py index bcb95b4172..c49fc32c23 100644 --- a/tests/py/dynamo/conversion/test_neg_aten.py +++ b/tests/py/dynamo/conversion/test_neg_aten.py @@ -19,14 +19,13 @@ class TestNegConverter(DispatchTestCase): def test_neg_float(self, _, x, type): class neg(nn.Module): def forward(self, input): - return torch.neg(input) + return torch.ops.aten.neg.default(input) inputs = [torch.randn(x, dtype=type)] self.run_test( neg(), inputs, precision=type, - expected_ops={torch.ops.aten.neg.default}, ) @parameterized.expand( @@ -38,13 +37,12 @@ def forward(self, input): def test_neg_int(self, _, x, type, min, max): class neg(nn.Module): def forward(self, input): - return torch.neg(input) + return torch.ops.aten.neg.default(input) inputs = [torch.randint(min, max, x, dtype=type)] self.run_test( neg(), inputs, - expected_ops={torch.ops.aten.neg.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_permutation_aten.py b/tests/py/dynamo/conversion/test_permutation_aten.py index 04c1ab1092..5bcdba3fd9 100644 --- a/tests/py/dynamo/conversion/test_permutation_aten.py +++ b/tests/py/dynamo/conversion/test_permutation_aten.py @@ -17,10 +17,10 @@ class TestPermuteConverter(DispatchTestCase): def test_permute_list(self, _, permutation): class Permute(nn.Module): def forward(self, x): - return x.permute(permutation) + return torch.ops.aten.permute.default(x, permutation) inputs = [torch.randn(1, 3, 2)] - self.run_test(Permute(), inputs, expected_ops={torch.ops.aten.permute.default}) + self.run_test(Permute(), inputs) @parameterized.expand( [ @@ -31,15 +31,15 @@ def forward(self, x): def test_permute(self, _, permutation): class Permute(nn.Module): def forward(self, x): - return x.permute(*permutation) + return torch.ops.aten.permute.default(x, permutation) inputs = [torch.randn(1, 3, 2)] - self.run_test(Permute(), inputs, expected_ops={torch.ops.aten.permute.default}) + self.run_test(Permute(), inputs) def test_permute_with_dynamic_shape(self): class Permute(nn.Module): def forward(self, x): - return x.permute(1, 2, 0) + return torch.ops.aten.permute.default(x, (1, 2, 0)) input_specs = [ Input( @@ -48,14 +48,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - Permute(), input_specs, expected_ops={torch.ops.aten.permute.default} - ) + self.run_test_with_dynamic_shape(Permute(), input_specs) def test_permute_with_dynamic_shape_four_dimensions(self): class Permute(nn.Module): def forward(self, x): - return x.permute(1, 2, 3, 0) + return torch.ops.aten.permute.default(x, (1, 2, 3, 0)) input_specs = [ Input( @@ -65,9 +63,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - Permute(), input_specs, expected_ops={torch.ops.aten.permute.default} - ) + self.run_test_with_dynamic_shape(Permute(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_pool_aten.py b/tests/py/dynamo/conversion/test_pool_aten.py index 4bd6e8ba25..93f2094184 100644 --- a/tests/py/dynamo/conversion/test_pool_aten.py +++ b/tests/py/dynamo/conversion/test_pool_aten.py @@ -39,7 +39,7 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.avg_pool2d.default}, + use_dynamo_tracer=True, ) @parameterized.expand( @@ -77,9 +77,7 @@ def forward(self, x): return self.pool(x) inputs = [torch.randn(1, 3, 32, 32)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.avg_pool2d.default} - ) + self.run_test(TestModule(), inputs, use_dynamo_tracer=True) @parameterized.expand( [ @@ -116,9 +114,7 @@ def forward(self, x): return self.pool(x) inputs = [torch.randn(1, 3, 32, 32, 32)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.avg_pool3d.default} - ) + self.run_test(TestModule(), inputs, use_dynamo_tracer=True) @parameterized.expand( [ @@ -153,7 +149,8 @@ def forward(self, x): self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.max_pool2d}, + use_dynamo_tracer=True, + enable_passes=True, ) @parameterized.expand( @@ -191,7 +188,7 @@ def forward(self, x): return self.pool(x) inputs = [torch.randn(1, 3, 32, 32)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.max_pool2d}) + self.run_test(TestModule(), inputs, use_dynamo_tracer=True, enable_passes=True) @parameterized.expand( [ @@ -228,7 +225,7 @@ def forward(self, x): return self.pool(x) inputs = [torch.randn(1, 3, 32, 32, 32)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.max_pool3d}) + self.run_test(TestModule(), inputs, use_dynamo_tracer=True, enable_passes=True) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_pow_aten.py b/tests/py/dynamo/conversion/test_pow_aten.py index 29dd74eb07..5aeae49d62 100644 --- a/tests/py/dynamo/conversion/test_pow_aten.py +++ b/tests/py/dynamo/conversion/test_pow_aten.py @@ -17,13 +17,12 @@ class TestPowConverter(DispatchTestCase): def test_pow_tensor_tensor(self, _, shape): class pow(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.pow(lhs_val, rhs_val) + return torch.ops.aten.pow.Tensor_Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( pow(), inputs, - expected_ops={torch.ops.aten.pow.Tensor_Tensor}, ) @parameterized.expand( @@ -35,13 +34,12 @@ def forward(self, lhs_val, rhs_val): def test_pow_scalar(self, _, shape, scalar): class pow(nn.Module): def forward(self, rhs_val): - return torch.pow(scalar, rhs_val) + return torch.ops.aten.pow.Scalar(scalar, rhs_val) inputs = [torch.randn(shape)] self.run_test( pow(), inputs, - expected_ops={torch.ops.aten.pow.Scalar}, ) @parameterized.expand( @@ -53,13 +51,12 @@ def forward(self, rhs_val): def test_pow_tensor_scalar(self, _, shape, scalar): class pow(nn.Module): def forward(self, lhs_val): - return torch.pow(lhs_val, scalar) + return torch.ops.aten.pow.Tensor_Scalar(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( pow(), inputs, - expected_ops={torch.ops.aten.pow.Tensor_Scalar}, ) diff --git a/tests/py/dynamo/conversion/test_recip_aten.py b/tests/py/dynamo/conversion/test_recip_aten.py index e7fae73da2..c34fcb2f08 100644 --- a/tests/py/dynamo/conversion/test_recip_aten.py +++ b/tests/py/dynamo/conversion/test_recip_aten.py @@ -18,13 +18,12 @@ class TestRecipConverter(DispatchTestCase): def test_recip_float(self, input_shape, dtype): class recip(nn.Module): def forward(self, input): - return torch.reciprocal(input) + return torch.ops.aten.reciprocal.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( recip(), inputs, - expected_ops={torch.ops.aten.reciprocal.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_recip_int(self, input_shape, dtype, low, high): class recip(nn.Module): def forward(self, input): - return torch.reciprocal(input) + return torch.ops.aten.reciprocal.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( recip(), inputs, - expected_ops={torch.ops.aten.reciprocal.default}, ) diff --git a/tests/py/dynamo/conversion/test_relu_aten.py b/tests/py/dynamo/conversion/test_relu_aten.py index 4d70a95fd7..ca36a88599 100644 --- a/tests/py/dynamo/conversion/test_relu_aten.py +++ b/tests/py/dynamo/conversion/test_relu_aten.py @@ -10,15 +10,15 @@ class TestReLUConverter(DispatchTestCase): def test_relu(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.relu(x) + return torch.ops.aten.relu.default(x) inputs = [torch.randn(1, 10)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.relu.default}) + self.run_test(TestModule(), inputs) def test_relu_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.relu(x) + return torch.ops.aten.relu.default(x) input_specs = [ Input( @@ -27,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.relu.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_relu_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.relu(x) + return torch.ops.aten.relu.default(x) input_specs = [ Input( @@ -45,7 +43,8 @@ def forward(self, x): ] self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.relu.default} + TestModule(), + input_specs, ) diff --git a/tests/py/dynamo/conversion/test_reshape_aten.py b/tests/py/dynamo/conversion/test_reshape_aten.py index a4e2186999..6be138303d 100644 --- a/tests/py/dynamo/conversion/test_reshape_aten.py +++ b/tests/py/dynamo/conversion/test_reshape_aten.py @@ -27,13 +27,12 @@ def __init__(self, target_shape): self.target_shape = target_shape def forward(self, x): - return torch.reshape(x, self.target_shape) + return torch.ops.aten.view.default(x, self.target_shape) inputs = [torch.randn(1, 2, 10)] self.run_test( TestModule(target_shape), inputs, - expected_ops={torch.ops.aten.view.default}, ) @parameterized.expand( @@ -54,7 +53,7 @@ def __init__(self, target_shape): self.target_shape = target_shape def forward(self, x): - return torch.reshape(x, self.target_shape) + return torch.ops.aten.view.default(x, self.target_shape) input_specs = [ Input( @@ -66,37 +65,6 @@ def forward(self, x): self.run_test_with_dynamic_shape( TestModule(target_shape), input_specs, - expected_ops={torch.ops.aten.view.default}, - ) - - @unittest.skipIf( - trt.__version__ < "8.5", - "Shape tensor supported well in TensorRT 8.5 and later", - ) - def test_reshape_with_dynamic_shape_size(self): - class TestModule(torch.nn.Module): - def forward(self, x, y): - shape_y = y.shape - t = shape_y[1] - return torch.reshape(x, [-1, t, 3]) - - input_specs = [ - Input( - shape=(-1, 5, 6), - dtype=torch.float32, - shape_ranges=[((1, 5, 6), (3, 5, 6), (3, 5, 6))], - ), - Input( - shape=(-1, 5), - dtype=torch.float32, - shape_ranges=[((1, 5), (3, 5), (3, 5))], - ), - ] - - self.run_test_with_dynamic_shape( - TestModule(), - input_specs, - expected_ops={torch.ops.aten.view.default}, ) diff --git a/tests/py/dynamo/conversion/test_round_aten.py b/tests/py/dynamo/conversion/test_round_aten.py index 2db3a04bb6..248d3922a5 100644 --- a/tests/py/dynamo/conversion/test_round_aten.py +++ b/tests/py/dynamo/conversion/test_round_aten.py @@ -18,13 +18,12 @@ class TestRoundConverter(DispatchTestCase): def test_round_float(self, input_shape, dtype): class round(nn.Module): def forward(self, input): - return torch.round(input) + return torch.ops.aten.round.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( round(), inputs, - expected_ops={torch.ops.aten.round.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_round_int(self, input_shape, dtype, low, high): class round(nn.Module): def forward(self, input): - return torch.round(input) + return torch.ops.aten.round.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( round(), inputs, - expected_ops={torch.ops.aten.round.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_rsqrt_aten.py b/tests/py/dynamo/conversion/test_rsqrt_aten.py index 441b21cda3..6dbd425f60 100644 --- a/tests/py/dynamo/conversion/test_rsqrt_aten.py +++ b/tests/py/dynamo/conversion/test_rsqrt_aten.py @@ -17,13 +17,12 @@ class TestRSqrtConverter(DispatchTestCase): def test_rsqrt(self, _, x, alpha): class rsqrt(nn.Module): def forward(self, input): - return torch.rsqrt(input) + return torch.ops.aten.rsqrt.default(input) inputs = [torch.randn(x) + 1] self.run_test( rsqrt(), inputs, - expected_ops={torch.ops.aten.rsqrt.default}, ) diff --git a/tests/py/dynamo/conversion/test_select_aten.py b/tests/py/dynamo/conversion/test_select_aten.py index c708b36c58..4a9f0666a9 100644 --- a/tests/py/dynamo/conversion/test_select_aten.py +++ b/tests/py/dynamo/conversion/test_select_aten.py @@ -18,13 +18,12 @@ def __init__(self): super().__init__() def forward(self, input): - return torch.select(input, dim, index) + return torch.ops.aten.select.int(input, dim, index) input = [torch.randn(1, 2)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.select.int}, ) @@ -40,13 +39,12 @@ def __init__(self): super().__init__() def forward(self, input): - return torch.select(input, dim, index) + return torch.ops.aten.select.int(input, dim, index) input = [torch.randn(4, 4, 4, 4)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.select.int}, ) @@ -62,7 +60,7 @@ def __init__(self): super().__init__() def forward(self, input): - return torch.select(input, dim, index) + return torch.ops.aten.select.int(input, dim, index) input_spec = [ Input( @@ -71,9 +69,7 @@ def forward(self, input): shape_ranges=[((1, 3, 3), (3, 3, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_spec, expected_ops={torch.ops.aten.select.int} - ) + self.run_test_with_dynamic_shape(TestModule(), input_spec) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_selu_aten.py b/tests/py/dynamo/conversion/test_selu_aten.py deleted file mode 100644 index 6b1938c366..0000000000 --- a/tests/py/dynamo/conversion/test_selu_aten.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch -import torch.nn as nn -from torch.testing._internal.common_utils import run_tests -from torch_tensorrt import Input - -from .harness import DispatchTestCase - - -class TestSeLUConverter(DispatchTestCase): - def test_selu(self): - class TestModule(nn.Module): - def forward(self, x): - return nn.functional.selu(x) - - inputs = [torch.randn(1, 10)] - - # Here, selu re-uses elu op - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.elu.default}) - - def test_selu_with_dynamic_shape(self): - class TestModule(nn.Module): - def forward(self, x): - return nn.functional.selu(x) - - input_specs = [ - Input( - shape=(-1, -1, -1), - dtype=torch.float32, - shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], - ), - ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.elu.default} - ) - - def test_selu_with_dynamic_shape_four_dimensions(self): - class TestModule(nn.Module): - def forward(self, x): - return nn.functional.selu(x) - - input_specs = [ - Input( - shape=(-1, -1, -1, -1), - dtype=torch.float32, - shape_ranges=[((1, 1, 1, 5), (1, 2, 3, 5), (3, 3, 3, 5))], - ), - ] - - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.elu.default} - ) - - -if __name__ == "__main__": - run_tests() diff --git a/tests/py/dynamo/conversion/test_sigmoid_aten.py b/tests/py/dynamo/conversion/test_sigmoid_aten.py index cf0b579428..b8cb27574e 100644 --- a/tests/py/dynamo/conversion/test_sigmoid_aten.py +++ b/tests/py/dynamo/conversion/test_sigmoid_aten.py @@ -10,17 +10,15 @@ class TestSigmoidConverter(DispatchTestCase): def test_sigmoid(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.sigmoid(x) + return torch.ops.aten.sigmoid.default(x) inputs = [torch.randn(1, 10)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.sigmoid.default} - ) + self.run_test(TestModule(), inputs) def test_sigmoid_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.sigmoid(x) + return torch.ops.aten.sigmoid.default(x) input_specs = [ Input( @@ -29,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.sigmoid.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_sigmoid_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.sigmoid(x) + return torch.ops.aten.sigmoid.default(x) input_specs = [ Input( @@ -46,20 +42,17 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.sigmoid.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_sigmoid_fp16(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.sigmoid(x) + return torch.ops.aten.sigmoid.default(x) inputs = [torch.randn(1, 10)] self.run_test( TestModule(), inputs, - expected_ops={torch.ops.aten.sigmoid.default}, precision=torch.half, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_sign_aten.py b/tests/py/dynamo/conversion/test_sign_aten.py index 549e363d68..578d8b4040 100644 --- a/tests/py/dynamo/conversion/test_sign_aten.py +++ b/tests/py/dynamo/conversion/test_sign_aten.py @@ -18,13 +18,12 @@ class TestSignConverter(DispatchTestCase): def test_sign_float(self, input_shape, dtype): class sign(nn.Module): def forward(self, input): - return torch.sign(input) + return torch.ops.aten.sign.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( sign(), inputs, - expected_ops={torch.ops.aten.sign.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_sign_int(self, input_shape, dtype, low, high): class sign(nn.Module): def forward(self, input): - return torch.sign(input) + return torch.ops.aten.sign.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( sign(), inputs, - expected_ops={torch.ops.aten.sign.default}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_sin_aten.py b/tests/py/dynamo/conversion/test_sin_aten.py index 77660d73d9..70a3dc0315 100644 --- a/tests/py/dynamo/conversion/test_sin_aten.py +++ b/tests/py/dynamo/conversion/test_sin_aten.py @@ -18,13 +18,12 @@ class TestSinConverter(DispatchTestCase): def test_sin_float(self, input_shape, dtype): class sin(nn.Module): def forward(self, input): - return torch.sin(input) + return torch.ops.aten.sin.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( sin(), inputs, - expected_ops={torch.ops.aten.sin.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_sin_int(self, input_shape, dtype, low, high): class sin(nn.Module): def forward(self, input): - return torch.sin(input) + return torch.ops.aten.sin.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( sin(), inputs, - expected_ops={torch.ops.aten.sin.default}, ) diff --git a/tests/py/dynamo/conversion/test_sinh_aten.py b/tests/py/dynamo/conversion/test_sinh_aten.py index b482f5baf5..d17ab3b467 100644 --- a/tests/py/dynamo/conversion/test_sinh_aten.py +++ b/tests/py/dynamo/conversion/test_sinh_aten.py @@ -18,13 +18,12 @@ class TestSinhConverter(DispatchTestCase): def test_sinh_float(self, input_shape, dtype): class sinh(nn.Module): def forward(self, input): - return torch.sinh(input) + return torch.ops.aten.sinh.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( sinh(), inputs, - expected_ops={torch.ops.aten.sinh.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_sinh_int(self, input_shape, dtype, low, high): class sinh(nn.Module): def forward(self, input): - return torch.sinh(input) + return torch.ops.aten.sinh.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( sinh(), inputs, - expected_ops={torch.ops.aten.sinh.default}, ) diff --git a/tests/py/dynamo/conversion/test_slice_aten.py b/tests/py/dynamo/conversion/test_slice_aten.py index ad9275751c..60492aac62 100644 --- a/tests/py/dynamo/conversion/test_slice_aten.py +++ b/tests/py/dynamo/conversion/test_slice_aten.py @@ -25,7 +25,6 @@ def forward(self, input): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.slice.Tensor}, ) @@ -49,7 +48,6 @@ def forward(self, input): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.slice.Tensor}, ) @@ -79,7 +77,6 @@ def forward(self, input): self.run_test_with_dynamic_shape( TestModule(), input_specs, - expected_ops={torch.ops.aten.slice.Tensor}, ) diff --git a/tests/py/dynamo/conversion/test_softmax_aten.py b/tests/py/dynamo/conversion/test_softmax_aten.py index 84af9a92d7..8df9ab96f6 100644 --- a/tests/py/dynamo/conversion/test_softmax_aten.py +++ b/tests/py/dynamo/conversion/test_softmax_aten.py @@ -8,26 +8,16 @@ class TestSoftMaxConverter(DispatchTestCase): def test_softmax(self): class TestModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.softmax = torch.nn.Softmax(1) - def forward(self, x): - return self.softmax(x) + return torch.ops.aten._softmax.default(x, 1, False) inputs = [torch.randn(1, 3, 224, 224)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten._softmax.default} - ) + self.run_test(TestModule(), inputs) def test_softmax_with_dynamic_shape(self): class TestModule(torch.nn.Module): - def __init__(self): - super().__init__() - self.softmax = torch.nn.Softmax(2) - def forward(self, x): - return self.softmax(x) + return torch.ops.aten._softmax.default(x, 2, False) input_specs = [ Input( @@ -37,9 +27,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten._softmax.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_softplus_aten.py b/tests/py/dynamo/conversion/test_softplus_aten.py index 41c7804ed7..29f3cd6d53 100644 --- a/tests/py/dynamo/conversion/test_softplus_aten.py +++ b/tests/py/dynamo/conversion/test_softplus_aten.py @@ -1,25 +1,24 @@ import torch import torch.nn as nn -from .harness import DispatchTestCase from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input +from .harness import DispatchTestCase + class TestSoftplusConverter(DispatchTestCase): def test_softplus(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.softplus(x) + return torch.ops.aten.softplus.default(x) inputs = [torch.randn(1, 10)] - self.run_test( - TestModule(), inputs, expected_ops={torch.ops.aten.softplus.default} - ) + self.run_test(TestModule(), inputs) def test_softplus_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.softplus(x) + return torch.ops.aten.softplus.default(x) input_specs = [ Input( @@ -28,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.softplus.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_softplus_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.softplus(x) + return torch.ops.aten.softplus.default(x) input_specs = [ Input( @@ -45,9 +42,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.softplus.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_split_aten.py b/tests/py/dynamo/conversion/test_split_aten.py index ffd8e145b9..142f9b337c 100644 --- a/tests/py/dynamo/conversion/test_split_aten.py +++ b/tests/py/dynamo/conversion/test_split_aten.py @@ -1,10 +1,11 @@ import torch -from .harness import DispatchTestCase from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input from torch_tensorrt.dynamo.conversion import UnsupportedOperatorException +from .harness import DispatchTestCase + # FIXME: check about implicit and explicit batch class TestSplitConverterNoDim(DispatchTestCase): @@ -19,15 +20,13 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor) + out = torch.ops.aten.split.Tensor(input, split_size_or_tensor) return out input = [torch.randn(10).reshape(5, 2)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split.Tensor}, - disable_passes=True, ) @parameterized.expand( @@ -41,15 +40,15 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor) + out = torch.ops.aten.split_with_sizes.default( + input, split_size_or_tensor + ) return out input = [torch.randn(10).reshape(5, 2)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split_with_sizes.default}, - disable_passes=True, ) @parameterized.expand( @@ -63,15 +62,13 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor, dim) + out = torch.ops.aten.split.Tensor(input, split_size_or_tensor, dim) return out input = [torch.randn(10).reshape(5, 2)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split.Tensor}, - disable_passes=True, ) @parameterized.expand( @@ -85,15 +82,15 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor, dim) + out = torch.ops.aten.split_with_sizes.default( + input, split_size_or_tensor, dim + ) return out input = [torch.randn(10).reshape(5, 2)] self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split_with_sizes.default}, - disable_passes=True, ) @parameterized.expand( @@ -107,7 +104,9 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor, dim) + out = torch.ops.aten.split_with_sizes.default( + input, split_size_or_tensor, dim + ) return out input = [torch.randn(15).reshape(5, 3)] @@ -115,8 +114,6 @@ def forward(self, input): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split_with_sizes.default}, - disable_passes=True, ) @parameterized.expand( @@ -130,7 +127,7 @@ def __init__(self): super().__init__() def forward(self, input): - out = torch.split(input, split_size_or_tensor, dim) + out = torch.ops.aten.split.Tensor(input, split_size_or_tensor, dim) return out input_specs = [ @@ -143,8 +140,6 @@ def forward(self, input): self.run_test_with_dynamic_shape( TestModule(), input_specs, - expected_ops={torch.ops.aten.split.Tensor}, - disable_passes=True, ) @parameterized.expand( @@ -166,8 +161,6 @@ def forward(self, input): self.run_test( TestModule(), input, - expected_ops={torch.ops.aten.split.Tensor}, - disable_passes=True, ) diff --git a/tests/py/dynamo/conversion/test_sqrt_aten.py b/tests/py/dynamo/conversion/test_sqrt_aten.py index 7b70a87954..0862d13247 100644 --- a/tests/py/dynamo/conversion/test_sqrt_aten.py +++ b/tests/py/dynamo/conversion/test_sqrt_aten.py @@ -18,13 +18,12 @@ class TestSqrtConverter(DispatchTestCase): def test_sqrt_float(self, input_shape, dtype): class sqrt(nn.Module): def forward(self, input): - return torch.sqrt(input) + return torch.ops.aten.sqrt.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( sqrt(), inputs, - expected_ops={torch.ops.aten.sqrt.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_sqrt_int(self, input_shape, dtype, low, high): class sqrt(nn.Module): def forward(self, input): - return torch.sqrt(input) + return torch.ops.aten.sqrt.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( sqrt(), inputs, - expected_ops={torch.ops.aten.sqrt.default}, ) diff --git a/tests/py/dynamo/conversion/test_squeeze_aten.py b/tests/py/dynamo/conversion/test_squeeze_aten.py index f8a67b6a32..88483072ae 100644 --- a/tests/py/dynamo/conversion/test_squeeze_aten.py +++ b/tests/py/dynamo/conversion/test_squeeze_aten.py @@ -12,25 +12,34 @@ class TestSqueezeConverter(DispatchTestCase): [ ("2d_dim", (0), (2, 1)), ("3d_one_dim", (0), (2, 2, 1)), + ] + ) + def test_squeeze_single_dim(self, _, dim, init_size): + class Squeeze(nn.Module): + def forward(self, x): + return torch.ops.aten.squeeze.dim(x, dim) + + inputs = [torch.randn(*init_size)] + self.run_test( + Squeeze(), + inputs, + ) + + @parameterized.expand( + [ ("3d_two_dim", (0, 1), (2, 1, 1)), ("4d_dim", (0, 1, 2), (2, 2, 1, 1)), ] ) - def test_squeeze(self, _, dim, init_size): + def test_squeeze_multi_dims(self, _, dim, init_size): class Squeeze(nn.Module): def forward(self, x): - return torch.squeeze(x, dim) + return torch.ops.aten.squeeze.dims(x, dim) inputs = [torch.randn(*init_size)] - expected_op = {} - if isinstance(dim, int) == 1: - expected_op = {torch.ops.aten.squeeze.dim} - else: - expected_op = {torch.ops.aten.squeeze.dims} self.run_test( Squeeze(), inputs, - expected_ops=expected_op, ) @@ -39,18 +48,13 @@ class TestSqueezeConverter(DispatchTestCase): [ ("2d_dim", (1), (-1, 1), [((1, 1), (1, 1), (3, 1))]), ("3d_one_dim", (1), (-1, 2, 1), [((1, 2, 1), (1, 2, 1), (3, 2, 1))]), - # ("3d_two_dim", (0, 1), (-1, -1, 1), [((1, 3, 1, 1), (1, 3, 1, 1))]), ] ) def test_squeeze(self, _, dim, init_size, shape_range): class Squeeze(nn.Module): def forward(self, x): - return torch.squeeze(x, dim) + return torch.ops.aten.squeeze.dim(x, dim) - if isinstance(dim, int) == 1: - expected_op = {torch.ops.aten.squeeze.dim} - else: - expected_op = {torch.ops.aten.squeeze.dims} input_specs = [ Input( shape=init_size, @@ -61,7 +65,6 @@ def forward(self, x): self.run_test_with_dynamic_shape( Squeeze(), input_specs, - expected_ops=expected_op, ) diff --git a/tests/py/dynamo/conversion/test_sub_aten.py b/tests/py/dynamo/conversion/test_sub_aten.py index 1ad7e340e3..fa4d8b5b80 100644 --- a/tests/py/dynamo/conversion/test_sub_aten.py +++ b/tests/py/dynamo/conversion/test_sub_aten.py @@ -17,13 +17,12 @@ class TestSubConverter(DispatchTestCase): def test_sub_tensor(self, _, shape): class sub(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.sub(lhs_val, rhs_val) + return torch.ops.aten.sub.Tensor(lhs_val, rhs_val) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( sub(), inputs, - expected_ops={torch.ops.aten.sub.Tensor}, ) @parameterized.expand( @@ -35,13 +34,12 @@ def forward(self, lhs_val, rhs_val): def test_sub_tensor_alpha(self, _, shape, alpha): class sub(nn.Module): def forward(self, lhs_val, rhs_val): - return torch.sub(lhs_val, rhs_val, alpha=alpha) + return torch.ops.aten.sub.Tensor(lhs_val, rhs_val, alpha=alpha) inputs = [torch.randn(shape), torch.randn(shape)] self.run_test( sub(), inputs, - expected_ops={torch.ops.aten.sub.Tensor}, ) @parameterized.expand( @@ -53,13 +51,12 @@ def forward(self, lhs_val, rhs_val): def test_sub_scalar(self, _, shape, scalar): class sub(nn.Module): def forward(self, lhs_val): - return torch.sub(lhs_val, scalar) + return torch.ops.aten.sub.Tensor(lhs_val, scalar) inputs = [torch.randn(shape)] self.run_test( sub(), inputs, - expected_ops={torch.ops.aten.sub.Tensor}, ) @parameterized.expand( @@ -71,13 +68,12 @@ def forward(self, lhs_val): def test_sub_scalar_alpha(self, _, shape, scalar, alpha): class sub(nn.Module): def forward(self, lhs_val): - return torch.sub(lhs_val, scalar, alpha=alpha) + return torch.ops.aten.sub.Tensor(lhs_val, scalar, alpha=alpha) inputs = [torch.randn(shape)] self.run_test( sub(), inputs, - expected_ops={torch.ops.aten.sub.Tensor}, ) diff --git a/tests/py/dynamo/conversion/test_sum_aten.py b/tests/py/dynamo/conversion/test_sum_aten.py index e69300f283..b279bed43e 100644 --- a/tests/py/dynamo/conversion/test_sum_aten.py +++ b/tests/py/dynamo/conversion/test_sum_aten.py @@ -18,13 +18,12 @@ class TestSumConverter(DispatchTestCase): def test_sum_dim_int_default(self, input_shape): class Sum(nn.Module): def forward(self, x): - return torch.sum(x) + return torch.ops.aten.sum.default(x) inputs = [torch.randn(*input_shape)] self.run_test( Sum(), inputs, - expected_ops={torch.ops.aten.sum.default}, ) @parameterized.expand( @@ -40,13 +39,12 @@ def forward(self, x): def test_sum_dim_int(self, input_shape, dim, keep_dims): class Sum(nn.Module): def forward(self, x): - return torch.sum(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.sum.dim_IntList(x, dim, keep_dims) inputs = [torch.randn(*input_shape)] self.run_test( Sum(), inputs, - expected_ops={torch.ops.aten.sum.dim_IntList}, ) @parameterized.expand( @@ -61,13 +59,12 @@ def forward(self, x): def test_sum_dim_tuple(self, input_shape, dim, keep_dims): class Sum(nn.Module): def forward(self, x): - return torch.sum(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.sum.dim_IntList(x, dim, keep_dims) inputs = [torch.randn(*input_shape)] self.run_test( Sum(), inputs, - expected_ops={torch.ops.aten.sum.dim_IntList}, ) @parameterized.expand( @@ -81,13 +78,12 @@ def forward(self, x): def test_sum_dim_int_int(self, input_shape, dim, keep_dims, dtype, low, high): class Sum(nn.Module): def forward(self, x): - return torch.sum(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.sum.dim_IntList(x, dim, keep_dims) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( Sum(), inputs, - expected_ops={torch.ops.aten.sum.dim_IntList}, check_dtype=False, ) @@ -102,13 +98,12 @@ def forward(self, x): def test_sum_dim_tuple_int(self, input_shape, dim, keep_dims, dtype, low, high): class Sum(nn.Module): def forward(self, x): - return torch.sum(x, dim=dim, keepdim=keep_dims) + return torch.ops.aten.sum.dim_IntList(x, dim, keep_dims) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( Sum(), inputs, - expected_ops={torch.ops.aten.sum.dim_IntList}, check_dtype=False, ) diff --git a/tests/py/dynamo/conversion/test_tan_aten.py b/tests/py/dynamo/conversion/test_tan_aten.py index 8aa664cc7a..137025dbc6 100644 --- a/tests/py/dynamo/conversion/test_tan_aten.py +++ b/tests/py/dynamo/conversion/test_tan_aten.py @@ -18,13 +18,12 @@ class TestTanConverter(DispatchTestCase): def test_tan_float(self, input_shape, dtype): class tan(nn.Module): def forward(self, input): - return torch.tan(input) + return torch.ops.aten.tan.default(input) inputs = [torch.randn(input_shape, dtype=dtype)] self.run_test( tan(), inputs, - expected_ops={torch.ops.aten.tan.default}, ) @parameterized.expand( @@ -37,13 +36,12 @@ def forward(self, input): def test_tan_int(self, input_shape, dtype, low, high): class tan(nn.Module): def forward(self, input): - return torch.tan(input) + return torch.ops.aten.tan.default(input) inputs = [torch.randint(low, high, input_shape, dtype=dtype)] self.run_test( tan(), inputs, - expected_ops={torch.ops.aten.tan.default}, ) diff --git a/tests/py/dynamo/conversion/test_tanh_aten.py b/tests/py/dynamo/conversion/test_tanh_aten.py index 87b647abc2..10757528d0 100644 --- a/tests/py/dynamo/conversion/test_tanh_aten.py +++ b/tests/py/dynamo/conversion/test_tanh_aten.py @@ -10,15 +10,15 @@ class TestTanhConverter(DispatchTestCase): def test_tanh(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.tanh(x) + return torch.ops.aten.tanh.default(x) inputs = [torch.randn(1, 10)] - self.run_test(TestModule(), inputs, expected_ops={torch.ops.aten.tanh.default}) + self.run_test(TestModule(), inputs) def test_tanh_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.tanh(x) + return torch.ops.aten.tanh.default(x) input_specs = [ Input( @@ -27,14 +27,12 @@ def forward(self, x): shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.tanh.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) def test_tanh_with_dynamic_shape_four_dimensions(self): class TestModule(nn.Module): def forward(self, x): - return nn.functional.tanh(x) + return torch.ops.aten.tanh.default(x) input_specs = [ Input( @@ -44,9 +42,7 @@ def forward(self, x): ), ] - self.run_test_with_dynamic_shape( - TestModule(), input_specs, expected_ops={torch.ops.aten.tanh.default} - ) + self.run_test_with_dynamic_shape(TestModule(), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_unsqueeze_aten.py b/tests/py/dynamo/conversion/test_unsqueeze_aten.py index 87d06496af..e448c4f925 100644 --- a/tests/py/dynamo/conversion/test_unsqueeze_aten.py +++ b/tests/py/dynamo/conversion/test_unsqueeze_aten.py @@ -22,12 +22,10 @@ def __init__(self, dim): self.dim = dim def forward(self, x): - return torch.unsqueeze(x, self.dim) + return torch.ops.aten.unsqueeze.default(x, self.dim) inputs = [torch.randn(1, 2, 3)] - self.run_test( - Unsqueeze(dim), inputs, expected_ops={torch.ops.aten.unsqueeze.default} - ) + self.run_test(Unsqueeze(dim), inputs) # Testing with more than one dynamic dims results in following error: # AssertionError: Currently we don't support unsqueeze with more than one dynamic dims. @@ -45,7 +43,7 @@ def __init__(self, dim): self.dim = dim def forward(self, x): - return torch.unsqueeze(x, self.dim) + return torch.ops.aten.unsqueeze.default(x, self.dim) input_specs = [ Input( @@ -54,9 +52,7 @@ def forward(self, x): shape_ranges=[((1, 2, 3), (2, 2, 3), (3, 2, 3))], ), ] - self.run_test_with_dynamic_shape( - Unsqueeze(dim), input_specs, expected_ops={torch.ops.aten.unsqueeze.default} - ) + self.run_test_with_dynamic_shape(Unsqueeze(dim), input_specs) if __name__ == "__main__": diff --git a/tests/py/dynamo/conversion/test_where_aten.py b/tests/py/dynamo/conversion/test_where_aten.py index 6e9466b96f..2a4bf108da 100644 --- a/tests/py/dynamo/conversion/test_where_aten.py +++ b/tests/py/dynamo/conversion/test_where_aten.py @@ -19,7 +19,7 @@ class TestWhereConverter(DispatchTestCase): def test_(self, _, x_size, y_size): class Where(nn.Module): def forward(self, condition, x, y): - return torch.where(condition, x, y) + return torch.ops.aten.where.self(condition, x, y) inputX = torch.randn(*x_size) inputOther = torch.randn(*y_size) @@ -27,13 +27,12 @@ def forward(self, condition, x, y): self.run_test( Where(), (condition, inputX, inputOther), - expected_ops={torch.ops.aten.where.self}, ) def test_0D_input(self): class Where(nn.Module): def forward(self, condition, x, y): - return torch.where(condition, x, y) + return torch.ops.aten.where.self(condition, x, y) inputX = torch.randn((5, 6, 7, 1, 3)) inputOther = torch.tensor(8.0, dtype=torch.float) @@ -41,7 +40,6 @@ def forward(self, condition, x, y): self.run_test( Where(), (condition, inputX, inputOther), - expected_ops={torch.ops.aten.where.self}, )