diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index d9b49fafbe..3878efc6af 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -24,7 +24,7 @@ def args_bounds_check( return args[i] if len(args) > i else replacement -@dynamo_tensorrt_converter(torch.ops.aten.batch_norm) +@dynamo_tensorrt_converter(torch.ops.aten.batch_norm) # type: ignore[misc] def aten_ops_batch_norm( network: TRTNetwork, target: Target, @@ -48,9 +48,9 @@ def aten_ops_batch_norm( ) -@dynamo_tensorrt_converter(torch.ops.aten.div.default) -@dynamo_tensorrt_converter(torch.ops.aten.div.Tensor_mode) -@dynamo_tensorrt_converter(torch.ops.aten.div.Tensor) +@dynamo_tensorrt_converter(torch.ops.aten.div.default) # type: ignore[misc] +@dynamo_tensorrt_converter(torch.ops.aten.div.Tensor_mode) # type: ignore[misc] +@dynamo_tensorrt_converter(torch.ops.aten.div.Tensor) # type: ignore[misc] def aten_ops_div( network: TRTNetwork, target: Target, @@ -119,7 +119,7 @@ def embedding_param_validator(embedding_node: Node) -> bool: @dynamo_tensorrt_converter( torch.ops.aten.embedding.default, capability_validator=embedding_param_validator -) +) # type: ignore[misc] def aten_ops_embedding( network: TRTNetwork, target: Target, @@ -140,8 +140,8 @@ def aten_ops_embedding( ) -@dynamo_tensorrt_converter(torch.ops.aten.fmod.Scalar) -@dynamo_tensorrt_converter(torch.ops.aten.fmod.Tensor) +@dynamo_tensorrt_converter(torch.ops.aten.fmod.Scalar) # type: ignore[misc] +@dynamo_tensorrt_converter(torch.ops.aten.fmod.Tensor) # type: ignore[misc] def aten_ops_fmod( network: TRTNetwork, target: Target, @@ -152,7 +152,7 @@ def aten_ops_fmod( return impl.elementwise.fmod(network, target, SourceIR.ATEN, name, args[0], args[1]) -@dynamo_tensorrt_converter(torch.ops.aten.gelu.default) +@dynamo_tensorrt_converter(torch.ops.aten.gelu.default) # type: ignore[misc] def aten_ops_gelu( network: TRTNetwork, target: Target, @@ -169,8 +169,8 @@ def aten_ops_gelu( ) -@dynamo_tensorrt_converter(torch.ops.aten.matmul) -@dynamo_tensorrt_converter(torch.ops.aten.mm.default) +@dynamo_tensorrt_converter(torch.ops.aten.matmul) # type: ignore[misc] +@dynamo_tensorrt_converter(torch.ops.aten.mm.default) # type: ignore[misc] def aten_ops_matmul( network: TRTNetwork, target: Target, @@ -183,7 +183,7 @@ def aten_ops_matmul( ) -@dynamo_tensorrt_converter(torch.ops.aten.layer_norm.default) +@dynamo_tensorrt_converter(torch.ops.aten.layer_norm.default) # type: ignore[misc] def aten_ops_layernorm( network: TRTNetwork, target: Target, @@ -204,7 +204,7 @@ def aten_ops_layernorm( ) -@dynamo_tensorrt_converter(torch.ops.aten.relu.default) +@dynamo_tensorrt_converter(torch.ops.aten.relu.default) # type: ignore[misc] def aten_ops_relu( network: TRTNetwork, target: Target, @@ -221,7 +221,7 @@ def aten_ops_relu( ) -@dynamo_tensorrt_converter(torch.ops.aten.rsqrt.default) +@dynamo_tensorrt_converter(torch.ops.aten.rsqrt.default) # type: ignore[misc] def aten_ops_rsqrt( network: TRTNetwork, target: Target, @@ -238,8 +238,8 @@ def aten_ops_rsqrt( ) -@dynamo_tensorrt_converter(torch.ops.aten.squeeze.dim) -@dynamo_tensorrt_converter(torch.ops.aten.squeeze.dims) +@dynamo_tensorrt_converter(torch.ops.aten.squeeze.dim) # type: ignore[misc] +@dynamo_tensorrt_converter(torch.ops.aten.squeeze.dims) # type: ignore[misc] def aten_ops_squeeze( network: TRTNetwork, target: Target, @@ -250,7 +250,7 @@ def aten_ops_squeeze( return impl.squeeze.squeeze(network, target, SourceIR.ATEN, name, args[0], args[1]) -@dynamo_tensorrt_converter(torch.ops.aten.unsqueeze.default) +@dynamo_tensorrt_converter(torch.ops.aten.unsqueeze.default) # type: ignore[misc] def aten_ops_unsqueeze( network: TRTNetwork, target: Target, @@ -263,7 +263,7 @@ def aten_ops_unsqueeze( ) -@dynamo_tensorrt_converter(torch.ops.aten._softmax.default) +@dynamo_tensorrt_converter(torch.ops.aten._softmax.default) # type: ignore[misc] def aten_ops_softmax( network: TRTNetwork, target: Target, @@ -276,7 +276,7 @@ def aten_ops_softmax( ) -@dynamo_tensorrt_converter(torch.ops.aten.where.self) +@dynamo_tensorrt_converter(torch.ops.aten.where.self) # type: ignore[misc] def aten_ops_where( network: TRTNetwork, target: Target, @@ -295,7 +295,7 @@ def aten_ops_where( ) -@dynamo_tensorrt_converter(torch.ops.aten.clamp.default) +@dynamo_tensorrt_converter(torch.ops.aten.clamp.default) # type: ignore[misc] def aten_ops_clamp( network: TRTNetwork, target: Target, @@ -314,7 +314,7 @@ def aten_ops_clamp( ) -@dynamo_tensorrt_converter(torch.ops.aten.select.int) +@dynamo_tensorrt_converter(torch.ops.aten.select.int) # type: ignore[misc] def aten_ops_select( network: TRTNetwork, target: Target, @@ -327,7 +327,7 @@ def aten_ops_select( ) -@dynamo_tensorrt_converter(torch.ops.aten.slice.Tensor) +@dynamo_tensorrt_converter(torch.ops.aten.slice.Tensor) # type: ignore[misc] def aten_ops_slice( network: TRTNetwork, target: Target, @@ -348,7 +348,7 @@ def aten_ops_slice( ) -@dynamo_tensorrt_converter(torch.ops.aten.permute.default) +@dynamo_tensorrt_converter(torch.ops.aten.permute.default) # type: ignore[misc] def aten_ops_permute( network: TRTNetwork, target: Target, @@ -387,7 +387,7 @@ def to_copy_dtype_validator(to_copy_node: Node) -> bool: @dynamo_tensorrt_converter( torch.ops.aten._to_copy.default, capability_validator=to_copy_dtype_validator -) +) # type: ignore[misc] def aten_ops_to_copy_dtype( network: TRTNetwork, target: Target, @@ -405,7 +405,7 @@ def aten_ops_to_copy_dtype( ) -@dynamo_tensorrt_converter(torch.ops.aten.clone.default) +@dynamo_tensorrt_converter(torch.ops.aten.clone.default) # type: ignore[misc] def aten_ops_clone( network: TRTNetwork, target: Target, @@ -438,3 +438,377 @@ def aten_ops_expand( args[0], args[1], ) + + +@dynamo_tensorrt_converter(torch.ops.aten.exp.default) # type: ignore[misc] +def aten_ops_exp( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.exp( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.log.default) # type: ignore[misc] +def aten_ops_log( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.log( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.sqrt.default) # type: ignore[misc] +def aten_ops_sqrt( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.sqrt( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.reciprocal.default) # type: ignore[misc] +def aten_ops_recip( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.recip( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.abs.default) # type: ignore[misc] +def aten_ops_abs( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.abs( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.sin.default) # type: ignore[misc] +def aten_ops_sin( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.sin( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.cos.default) # type: ignore[misc] +def aten_ops_cos( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.cos( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.tan.default) # type: ignore[misc] +def aten_ops_tan( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.tan( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.sinh.default) # type: ignore[misc] +def aten_ops_sinh( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.sinh( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.cosh.default) # type: ignore[misc] +def aten_ops_cosh( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.cosh( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.asin.default) # type: ignore[misc] +def aten_ops_asin( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.asin( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.acos.default) # type: ignore[misc] +def aten_ops_acos( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.acos( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.atan.default) # type: ignore[misc] +def aten_ops_atan( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.atan( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.asinh.default) # type: ignore[misc] +def aten_ops_asinh( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.asinh( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.acosh.default) # type: ignore[misc] +def aten_ops_acosh( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.acosh( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.atanh.default) # type: ignore[misc] +def aten_ops_atanh( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.atanh( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.ceil.default) # type: ignore[misc] +def aten_ops_ceil( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.ceil( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.floor.default) # type: ignore[misc] +def aten_ops_floor( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.floor( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.logical_not.default) # type: ignore[misc] +def aten_ops_logical_not( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.logical_not( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.sign.default) # type: ignore[misc] +def aten_ops_sign( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.sign( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.round.default) # type: ignore[misc] +def aten_ops_round( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.round( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) + + +@dynamo_tensorrt_converter(torch.ops.aten.isinf.default) # type: ignore[misc] +def aten_ops_isinf( + network: TRTNetwork, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +) -> Union[TRTTensor, Sequence[TRTTensor]]: + return impl.unary.isinf( + network, + target, + SourceIR.ATEN, + name, + args[0], + ) diff --git a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py index 0ae27e0933..3470328e44 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/elementwise/ops.py @@ -1,6 +1,7 @@ from typing import Optional import numpy as np +import tensorrt as trt from torch.fx.node import Target from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion.impl.elementwise.base import ( @@ -16,8 +17,6 @@ from torch_tensorrt.fx.types import TRTNetwork, TRTTensor from torch_tensorrt.fx.utils import Frameworks, unified_dtype_converter -import tensorrt as trt - def trunc_div( network: TRTNetwork, diff --git a/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py index 22376deedd..4a6380c964 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/unary/ops.py @@ -1,17 +1,14 @@ from typing import Optional +import tensorrt as trt from torch.fx.node import Target from torch_tensorrt.dynamo._SourceIR import SourceIR -from torch_tensorrt.dynamo.conversion.impl.elementwise.base import ( - convert_binary_elementwise, -) +from torch_tensorrt.dynamo.conversion.converter_utils import cast_trt_tensor from torch_tensorrt.dynamo.conversion.impl.unary.base import convert_unary from torch_tensorrt.fx.types import TRTNetwork, TRTTensor -import tensorrt as trt - -def sign( +def exp( network: TRTNetwork, target: Target, source_ir: Optional[SourceIR], @@ -19,13 +16,6 @@ def sign( input_val: TRTTensor, ) -> TRTTensor: """ - Sign is calculated as below: - x = input - sign = (exp(x) // exp(abs(x))) * 2 - 1 - For positive number and 0, (exp(x) // exp(abs(x))) yield 1; for negative number, (exp(x) // exp(abs(x))) yield 0. - With multiply 2, the value become 2(for pos and 0) and 0(for neg). - Finally minus 1, the value become 1(for pos and 0) and -1(for neg). - Args: network (TRTNetwork): TensorRT network object. target (Target): fx node target. @@ -34,59 +24,363 @@ def sign( input_val (TRTTensor): The input tensor. Returns: - A TensorRT tensor represent the result of sign operator. + TRTTensor: A TensorRT tensor represent the result of exp operator. """ - input_exp_output = convert_unary( - network, - target, - source_ir, - f"{name}_prod_exp", - trt.UnaryOperation.EXP, - input_val, - ) - input_abs_output = convert_unary( - network, - target, - source_ir, - f"{name}_prod_abs", - trt.UnaryOperation.ABS, - input_val, - ) - input_abs_exp_output = convert_unary( - network, - target, - source_ir, - f"{name}_prod_abs_exp", - trt.UnaryOperation.EXP, - input_abs_output, - ) - - floor_div_output = convert_binary_elementwise( - network, - target, - source_ir, - f"{name}_exp_floor_div", - trt.ElementWiseOperation.FLOOR_DIV, - input_exp_output, - input_abs_exp_output, - ) - - double_floor_div_output = convert_binary_elementwise( - network, - target, - source_ir, - f"{name}_floor_div*2", - trt.ElementWiseOperation.PROD, - floor_div_output, - 2, - ) - - return convert_binary_elementwise( - network, - target, - source_ir, - f"{name}_sign", - trt.ElementWiseOperation.SUB, - double_floor_div_output, - 1, + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.EXP, input_val + ) + + +def log( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.LOG, input_val + ) + + +def sqrt( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.SQRT, input_val + ) + + +def recip( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.RECIP, input_val + ) + + +def abs( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ABS, input_val + ) + + +def sin( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.SIN, input_val + ) + + +def cos( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.COS, input_val + ) + + +def tan( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.TAN, input_val + ) + + +def sinh( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.SINH, input_val + ) + + +def cosh( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.COSH, input_val + ) + + +def asin( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ASIN, input_val + ) + + +def acos( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ACOS, input_val + ) + + +def atan( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ATAN, input_val + ) + + +def asinh( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ASINH, input_val + ) + + +def acosh( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ACOSH, input_val + ) + + +def atanh( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ATANH, input_val + ) + + +def ceil( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.CEIL, input_val + ) + + +def floor( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.FLOOR, input_val + ) + + +def logical_not( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and input_val.dtype != trt.bool: + input_val = cast_trt_tensor(network, input_val, trt.bool, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.NOT, input_val + ) + + +def sign( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.SIGN, input_val + ) + + +def round( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ROUND, input_val + ) + + +def isinf( + network: TRTNetwork, + target: Target, + source_ir: Optional[SourceIR], + name: str, + input_val: TRTTensor, +) -> TRTTensor: + if (isinstance(input_val, TRTTensor)) and ( + input_val.dtype == trt.int8 or input_val.dtype == trt.int32 + ): + input_val = cast_trt_tensor(network, input_val, trt.float32, name) + + return convert_unary( + network, target, source_ir, name, trt.UnaryOperation.ISINF, input_val ) diff --git a/tests/py/dynamo/converters/harness.py b/tests/py/dynamo/converters/harness.py index f6ff25fb77..c8ea5bb5c0 100644 --- a/tests/py/dynamo/converters/harness.py +++ b/tests/py/dynamo/converters/harness.py @@ -266,6 +266,7 @@ def run_test( precision=torch.float, check_dtype=True, disable_passes=False, + output_dtypes=None, ): mod.eval() mod = self.generate_graph( @@ -284,6 +285,7 @@ def run_test( interp = TRTInterpreter( mod, Input.from_tensors(inputs), + output_dtypes=output_dtypes, ) super().run_test( mod, @@ -306,6 +308,7 @@ def run_test_with_dynamic_shape( rtol=1e-03, atol=1e-03, disable_passes=False, + output_dtypes=None, ): mod.eval() inputs = [spec.example_tensor("opt_shape") for spec in input_specs] @@ -321,6 +324,7 @@ def run_test_with_dynamic_shape( interp = TRTInterpreter( mod, input_specs, + output_dtypes=output_dtypes, ) # Since the lowering is based on optimal shape. We need to test with # different shape(for ex. max shape) for testing dynamic shape diff --git a/tests/py/dynamo/converters/test_abs_aten.py b/tests/py/dynamo/converters/test_abs_aten.py new file mode 100644 index 0000000000..27f1334c52 --- /dev/null +++ b/tests/py/dynamo/converters/test_abs_aten.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAbsConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_abs_float(self, input_shape, dtype): + class abs(nn.Module): + def forward(self, input): + return torch.abs(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + abs(), + inputs, + expected_ops={torch.ops.aten.abs.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_abs_int(self, input_shape, dtype, low, high): + class abs(nn.Module): + def forward(self, input): + return torch.abs(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + abs(), + inputs, + expected_ops={torch.ops.aten.abs.default}, + output_dtypes=[torch.int], + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_acos_aten.py b/tests/py/dynamo/converters/test_acos_aten.py new file mode 100644 index 0000000000..e0c185de48 --- /dev/null +++ b/tests/py/dynamo/converters/test_acos_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAcosConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_acos_float(self, input_shape, dtype): + class acos(nn.Module): + def forward(self, input): + return torch.acos(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + acos(), + inputs, + expected_ops={torch.ops.aten.acos.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_acos_int(self, input_shape, dtype, low, high): + class acos(nn.Module): + def forward(self, input): + return torch.acos(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + acos(), + inputs, + expected_ops={torch.ops.aten.acos.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_acosh_aten.py b/tests/py/dynamo/converters/test_acosh_aten.py new file mode 100644 index 0000000000..8fd2be738b --- /dev/null +++ b/tests/py/dynamo/converters/test_acosh_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAcoshConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_acosh_float(self, input_shape, dtype): + class acosh(nn.Module): + def forward(self, input): + return torch.acosh(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + acosh(), + inputs, + expected_ops={torch.ops.aten.acosh.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_acosh_int(self, input_shape, dtype, low, high): + class acosh(nn.Module): + def forward(self, input): + return torch.acosh(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + acosh(), + inputs, + expected_ops={torch.ops.aten.acosh.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_asin_aten.py b/tests/py/dynamo/converters/test_asin_aten.py new file mode 100644 index 0000000000..3f0dc6ec8d --- /dev/null +++ b/tests/py/dynamo/converters/test_asin_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAsinConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_asin_float(self, input_shape, dtype): + class asin(nn.Module): + def forward(self, input): + return torch.asin(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + asin(), + inputs, + expected_ops={torch.ops.aten.asin.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_asin_int(self, input_shape, dtype, low, high): + class asin(nn.Module): + def forward(self, input): + return torch.asin(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + asin(), + inputs, + expected_ops={torch.ops.aten.asin.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_asinh_aten.py b/tests/py/dynamo/converters/test_asinh_aten.py new file mode 100644 index 0000000000..53ae22cac5 --- /dev/null +++ b/tests/py/dynamo/converters/test_asinh_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAsinhConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_asinh_float(self, input_shape, dtype): + class asinh(nn.Module): + def forward(self, input): + return torch.asinh(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + asinh(), + inputs, + expected_ops={torch.ops.aten.asinh.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_asinh_int(self, input_shape, dtype, low, high): + class asinh(nn.Module): + def forward(self, input): + return torch.asinh(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + asinh(), + inputs, + expected_ops={torch.ops.aten.asinh.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_atan_aten.py b/tests/py/dynamo/converters/test_atan_aten.py new file mode 100644 index 0000000000..2129591f73 --- /dev/null +++ b/tests/py/dynamo/converters/test_atan_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAtanConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_atan_float(self, input_shape, dtype): + class atan(nn.Module): + def forward(self, input): + return torch.atan(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + atan(), + inputs, + expected_ops={torch.ops.aten.atan.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_atan_int(self, input_shape, dtype, low, high): + class atan(nn.Module): + def forward(self, input): + return torch.atan(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + atan(), + inputs, + expected_ops={torch.ops.aten.atan.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_atanh_aten.py b/tests/py/dynamo/converters/test_atanh_aten.py new file mode 100644 index 0000000000..ae8c57be7f --- /dev/null +++ b/tests/py/dynamo/converters/test_atanh_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestAtanhConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_atanh_float(self, input_shape, dtype): + class atanh(nn.Module): + def forward(self, input): + return torch.atanh(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + atanh(), + inputs, + expected_ops={torch.ops.aten.atanh.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_atanh_int(self, input_shape, dtype, low, high): + class atanh(nn.Module): + def forward(self, input): + return torch.atanh(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + atanh(), + inputs, + expected_ops={torch.ops.aten.atanh.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_ceil_aten.py b/tests/py/dynamo/converters/test_ceil_aten.py new file mode 100644 index 0000000000..3b29ea7040 --- /dev/null +++ b/tests/py/dynamo/converters/test_ceil_aten.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestCeilConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_ceil_float(self, input_shape, dtype): + class ceil(nn.Module): + def forward(self, input): + return torch.ceil(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + ceil(), + inputs, + expected_ops={torch.ops.aten.ceil.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_ceil_int(self, input_shape, dtype, low, high): + class ceil(nn.Module): + def forward(self, input): + return torch.ceil(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + ceil(), + inputs, + expected_ops={torch.ops.aten.ceil.default}, + check_dtype=False, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_cos_aten.py b/tests/py/dynamo/converters/test_cos_aten.py new file mode 100644 index 0000000000..9e49709bd2 --- /dev/null +++ b/tests/py/dynamo/converters/test_cos_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestCosConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_cos_float(self, input_shape, dtype): + class cos(nn.Module): + def forward(self, input): + return torch.cos(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + cos(), + inputs, + expected_ops={torch.ops.aten.cos.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_cos_int(self, input_shape, dtype, low, high): + class cos(nn.Module): + def forward(self, input): + return torch.cos(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + cos(), + inputs, + expected_ops={torch.ops.aten.cos.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_cosh_aten.py b/tests/py/dynamo/converters/test_cosh_aten.py new file mode 100644 index 0000000000..7d1881d26b --- /dev/null +++ b/tests/py/dynamo/converters/test_cosh_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestCoshConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_cosh_float(self, input_shape, dtype): + class cosh(nn.Module): + def forward(self, input): + return torch.cosh(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + cosh(), + inputs, + expected_ops={torch.ops.aten.cosh.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_cosh_int(self, input_shape, dtype, low, high): + class cosh(nn.Module): + def forward(self, input): + return torch.cosh(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + cosh(), + inputs, + expected_ops={torch.ops.aten.cosh.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_exp_aten.py b/tests/py/dynamo/converters/test_exp_aten.py new file mode 100644 index 0000000000..7245f90a65 --- /dev/null +++ b/tests/py/dynamo/converters/test_exp_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestExpConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_exp_float(self, input_shape, dtype): + class exp(nn.Module): + def forward(self, input): + return torch.exp(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + exp(), + inputs, + expected_ops={torch.ops.aten.exp.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_exp_int(self, input_shape, dtype, low, high): + class exp(nn.Module): + def forward(self, input): + return torch.exp(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + exp(), + inputs, + expected_ops={torch.ops.aten.exp.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_floor_aten.py b/tests/py/dynamo/converters/test_floor_aten.py new file mode 100644 index 0000000000..11284a10e7 --- /dev/null +++ b/tests/py/dynamo/converters/test_floor_aten.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestFloorConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_floor_float(self, input_shape, dtype): + class floor(nn.Module): + def forward(self, input): + return torch.floor(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + floor(), + inputs, + expected_ops={torch.ops.aten.floor.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_floor_int(self, input_shape, dtype, low, high): + class floor(nn.Module): + def forward(self, input): + return torch.floor(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + floor(), + inputs, + expected_ops={torch.ops.aten.floor.default}, + check_dtype=False, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_isinf_aten.py b/tests/py/dynamo/converters/test_isinf_aten.py new file mode 100644 index 0000000000..3975e14cab --- /dev/null +++ b/tests/py/dynamo/converters/test_isinf_aten.py @@ -0,0 +1,64 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestIsInfConverter(DispatchTestCase): + @parameterized.expand( + [ + ( + torch.tensor( + [ + 1.23, + -4.56, + float("inf"), + float("-inf"), + -100.0, + float("nan"), + 0.13, + -0.13, + 3.14159265, + ] + ), + ), + ] + ) + def test_isinf_float(self, data): + class isinf(nn.Module): + def forward(self, input): + return torch.isinf(input) + + inputs = [data] + self.run_test( + isinf(), + inputs, + expected_ops={torch.ops.aten.isinf.default}, + output_dtypes=[torch.bool], + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_isinf_int(self, input_shape, dtype, low, high): + class isinf(nn.Module): + def forward(self, input): + return torch.isinf(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + isinf(), + inputs, + expected_ops={torch.ops.aten.isinf.default}, + output_dtypes=[torch.bool], + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_log_aten.py b/tests/py/dynamo/converters/test_log_aten.py new file mode 100644 index 0000000000..fe5ec06aa3 --- /dev/null +++ b/tests/py/dynamo/converters/test_log_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestLogConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_log_float(self, input_shape, dtype): + class log(nn.Module): + def forward(self, input): + return torch.log(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + log(), + inputs, + expected_ops={torch.ops.aten.log.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_log_int(self, input_shape, dtype, low, high): + class log(nn.Module): + def forward(self, input): + return torch.log(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + log(), + inputs, + expected_ops={torch.ops.aten.log.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_logical_not_aten.py b/tests/py/dynamo/converters/test_logical_not_aten.py new file mode 100644 index 0000000000..52bd66254f --- /dev/null +++ b/tests/py/dynamo/converters/test_logical_not_aten.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestLogicalNotConverter(DispatchTestCase): + @parameterized.expand( + [ + (torch.tensor([True, False, False, True]),), + (torch.tensor([[True, False, True], [True, False, False]]),), + ] + ) + def test_logical_not_bool(self, data): + class logical_not(nn.Module): + def forward(self, input): + return torch.logical_not(input) + + inputs = [data] + self.run_test( + logical_not(), + inputs, + expected_ops={torch.ops.aten.logical_not.default}, + output_dtypes=[torch.bool], + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 3), + ((1, 20), torch.int32, -2, 2), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_logical_not_int(self, input_shape, dtype, low, high): + class logical_not(nn.Module): + def forward(self, input): + return torch.logical_not(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + logical_not(), + inputs, + expected_ops={torch.ops.aten.logical_not.default}, + output_dtypes=[torch.bool], + ) + + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 5), torch.float), + ((2, 3, 4), torch.float), + ] + ) + def test_logical_not_float(self, input_shape, dtype): + class logical_not(nn.Module): + def forward(self, input): + return torch.logical_not(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + logical_not(), + inputs, + expected_ops={torch.ops.aten.logical_not.default}, + output_dtypes=[torch.bool], + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_recip_aten.py b/tests/py/dynamo/converters/test_recip_aten.py new file mode 100644 index 0000000000..5c92901c3c --- /dev/null +++ b/tests/py/dynamo/converters/test_recip_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestRecipConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_recip_float(self, input_shape, dtype): + class recip(nn.Module): + def forward(self, input): + return torch.reciprocal(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + recip(), + inputs, + expected_ops={torch.ops.aten.reciprocal.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_recip_int(self, input_shape, dtype, low, high): + class recip(nn.Module): + def forward(self, input): + return torch.reciprocal(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + recip(), + inputs, + expected_ops={torch.ops.aten.reciprocal.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_round_aten.py b/tests/py/dynamo/converters/test_round_aten.py new file mode 100644 index 0000000000..719f825978 --- /dev/null +++ b/tests/py/dynamo/converters/test_round_aten.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestRoundConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_round_float(self, input_shape, dtype): + class round(nn.Module): + def forward(self, input): + return torch.round(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + round(), + inputs, + expected_ops={torch.ops.aten.round.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_round_int(self, input_shape, dtype, low, high): + class round(nn.Module): + def forward(self, input): + return torch.round(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + round(), + inputs, + expected_ops={torch.ops.aten.round.default}, + check_dtype=False, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_sign_aten.py b/tests/py/dynamo/converters/test_sign_aten.py new file mode 100644 index 0000000000..2553bc1157 --- /dev/null +++ b/tests/py/dynamo/converters/test_sign_aten.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestSignConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_sign_float(self, input_shape, dtype): + class sign(nn.Module): + def forward(self, input): + return torch.sign(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + sign(), + inputs, + expected_ops={torch.ops.aten.sign.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, -2, 2), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -100, 100), + ] + ) + def test_sign_int(self, input_shape, dtype, low, high): + class sign(nn.Module): + def forward(self, input): + return torch.sign(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + sign(), + inputs, + expected_ops={torch.ops.aten.sign.default}, + check_dtype=False, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_sin_aten.py b/tests/py/dynamo/converters/test_sin_aten.py new file mode 100644 index 0000000000..8ffa6e430f --- /dev/null +++ b/tests/py/dynamo/converters/test_sin_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestSinConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_sin_float(self, input_shape, dtype): + class sin(nn.Module): + def forward(self, input): + return torch.sin(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + sin(), + inputs, + expected_ops={torch.ops.aten.sin.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_sin_int(self, input_shape, dtype, low, high): + class sin(nn.Module): + def forward(self, input): + return torch.sin(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + sin(), + inputs, + expected_ops={torch.ops.aten.sin.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_sinh_aten.py b/tests/py/dynamo/converters/test_sinh_aten.py new file mode 100644 index 0000000000..0d5fe38c6b --- /dev/null +++ b/tests/py/dynamo/converters/test_sinh_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestSinhConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_sinh_float(self, input_shape, dtype): + class sinh(nn.Module): + def forward(self, input): + return torch.sinh(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + sinh(), + inputs, + expected_ops={torch.ops.aten.sinh.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_sinh_int(self, input_shape, dtype, low, high): + class sinh(nn.Module): + def forward(self, input): + return torch.sinh(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + sinh(), + inputs, + expected_ops={torch.ops.aten.sinh.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_sqrt_aten.py b/tests/py/dynamo/converters/test_sqrt_aten.py new file mode 100644 index 0000000000..a04d0c82f4 --- /dev/null +++ b/tests/py/dynamo/converters/test_sqrt_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestSqrtConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_sqrt_float(self, input_shape, dtype): + class sqrt(nn.Module): + def forward(self, input): + return torch.sqrt(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + sqrt(), + inputs, + expected_ops={torch.ops.aten.sqrt.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_sqrt_int(self, input_shape, dtype, low, high): + class sqrt(nn.Module): + def forward(self, input): + return torch.sqrt(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + sqrt(), + inputs, + expected_ops={torch.ops.aten.sqrt.default}, + ) + + +if __name__ == "__main__": + run_tests() diff --git a/tests/py/dynamo/converters/test_tan_aten.py b/tests/py/dynamo/converters/test_tan_aten.py new file mode 100644 index 0000000000..6b27781068 --- /dev/null +++ b/tests/py/dynamo/converters/test_tan_aten.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +from harness import DispatchTestCase +from parameterized import parameterized +from torch.testing._internal.common_utils import run_tests +from torch_tensorrt import Input + + +class TestTanConverter(DispatchTestCase): + @parameterized.expand( + [ + ((10,), torch.float), + ((1, 20), torch.float), + ((2, 3, 4), torch.float), + ((2, 3, 4, 5), torch.float), + ] + ) + def test_tan_float(self, input_shape, dtype): + class tan(nn.Module): + def forward(self, input): + return torch.tan(input) + + inputs = [torch.randn(input_shape, dtype=dtype)] + self.run_test( + tan(), + inputs, + expected_ops={torch.ops.aten.tan.default}, + ) + + @parameterized.expand( + [ + ((10,), torch.int, 0, 5), + ((1, 20), torch.int32, -10, 10), + ((2, 3, 4), torch.int, -5, 5), + ] + ) + def test_tan_int(self, input_shape, dtype, low, high): + class tan(nn.Module): + def forward(self, input): + return torch.tan(input) + + inputs = [torch.randint(low, high, input_shape, dtype=dtype)] + self.run_test( + tan(), + inputs, + expected_ops={torch.ops.aten.tan.default}, + ) + + +if __name__ == "__main__": + run_tests()