diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1269e18978..1fe4f18ada 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,12 +32,17 @@ repos: hooks: - id: pyupgrade args: [--py37-plus] - name: Upgrade code + name: Upgrade code excluding monai networks exclude: | - (?x)^( - versioneer.py| - monai/_version.py - )$ + (?x)( + ^versioneer.py| + ^monai/_version.py| + ^monai/networks/ # no PEP 604 for torchscript tensorrt + ) + - id: pyupgrade + args: [--py37-plus, --keep-runtime-typing] + name: Upgrade monai networks + files: (?x)(^monai/networks/) - repo: https://github.com/asottile/yesqa rev: v1.4.0 diff --git a/monai/networks/nets/basic_unet.py b/monai/networks/nets/basic_unet.py index a29d620ced..7fc57edc42 100644 --- a/monai/networks/nets/basic_unet.py +++ b/monai/networks/nets/basic_unet.py @@ -12,7 +12,7 @@ from __future__ import annotations from collections.abc import Sequence -from typing import Any +from typing import Optional import torch import torch.nn as nn @@ -150,7 +150,7 @@ def __init__( self.convs = TwoConv(spatial_dims, cat_chns + up_chns, out_chns, act, norm, bias, dropout) self.is_pad = is_pad - def forward(self, x: torch.Tensor, x_e: Any): + def forward(self, x: torch.Tensor, x_e: Optional[torch.Tensor]): """ Args: @@ -159,7 +159,7 @@ def forward(self, x: torch.Tensor, x_e: Any): """ x_0 = self.upsample(x) - if torch.jit.isinstance(x_e, torch.Tensor): + if x_e is not None and torch.jit.isinstance(x_e, torch.Tensor): if self.is_pad: # handling spatial shapes due to the 2x maxpooling with odd edge lengths. dimensions = len(x.shape) - 2 diff --git a/monai/networks/nets/dints.py b/monai/networks/nets/dints.py index 437789ef0c..6e3420d136 100644 --- a/monai/networks/nets/dints.py +++ b/monai/networks/nets/dints.py @@ -13,6 +13,7 @@ import datetime import warnings +from typing import Optional import numpy as np import torch @@ -40,7 +41,7 @@ class CellInterface(torch.nn.Module): """interface for torchscriptable Cell""" - def forward(self, x: torch.Tensor, weight) -> torch.Tensor: # type: ignore + def forward(self, x: torch.Tensor, weight: Optional[torch.Tensor]) -> torch.Tensor: # type: ignore pass @@ -170,7 +171,7 @@ def __init__(self, c: int, ops: dict, arch_code_c=None): if arch_c > 0: self.ops.append(ops[op_name](c)) - def forward(self, x: torch.Tensor, weight: torch.Tensor | None = None): + def forward(self, x: torch.Tensor, weight: Optional[torch.Tensor] = None): """ Args: x: input tensor. @@ -298,7 +299,7 @@ def __init__( self.op = MixedOp(c, self.OPS, arch_code_c) - def forward(self, x: torch.Tensor, weight: torch.Tensor | None) -> torch.Tensor: + def forward(self, x: torch.Tensor, weight: Optional[torch.Tensor]) -> torch.Tensor: """ Args: x: input tensor diff --git a/monai/networks/nets/dynunet.py b/monai/networks/nets/dynunet.py index a761f5993a..59e1e4a758 100644 --- a/monai/networks/nets/dynunet.py +++ b/monai/networks/nets/dynunet.py @@ -9,9 +9,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import annotations +# isort: dont-add-import: from __future__ import annotations -from collections.abc import Sequence +from typing import List, Optional, Sequence, Tuple, Union import torch import torch.nn as nn @@ -32,7 +32,7 @@ class DynUNetSkipLayer(nn.Module): forward passes of the network. """ - heads: list[torch.Tensor] | None + heads: Optional[List[torch.Tensor]] def __init__(self, index, downsample, upsample, next_layer, heads=None, super_head=None): super().__init__() @@ -132,13 +132,13 @@ def __init__( spatial_dims: int, in_channels: int, out_channels: int, - kernel_size: Sequence[Sequence[int] | int], - strides: Sequence[Sequence[int] | int], - upsample_kernel_size: Sequence[Sequence[int] | int], - filters: Sequence[int] | None = None, - dropout: tuple | str | float | None = None, - norm_name: tuple | str = ("INSTANCE", {"affine": True}), - act_name: tuple | str = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}), + kernel_size: Sequence[Union[Sequence[int], int]], + strides: Sequence[Union[Sequence[int], int]], + upsample_kernel_size: Sequence[Union[Sequence[int], int]], + filters: Optional[Sequence[int]] = None, + dropout: Optional[Union[Tuple, str, float]] = None, + norm_name: Union[Tuple, str] = ("INSTANCE", {"affine": True}), + act_name: Union[Tuple, str] = ("leakyrelu", {"inplace": True, "negative_slope": 0.01}), deep_supervision: bool = False, deep_supr_num: int = 1, res_block: bool = False, @@ -169,7 +169,7 @@ def __init__( self.deep_supervision = deep_supervision self.deep_supr_num = deep_supr_num # initialize the typed list of supervision head outputs so that Torchscript can recognize what's going on - self.heads: list[torch.Tensor] = [torch.rand(1)] * self.deep_supr_num + self.heads: List[torch.Tensor] = [torch.rand(1)] * self.deep_supr_num if self.deep_supervision: self.deep_supervision_heads = self.get_deep_supervision_heads() self.check_deep_supr_num() @@ -305,24 +305,30 @@ def get_output_block(self, idx: int): def get_downsamples(self): inp, out = self.filters[:-2], self.filters[1:-1] strides, kernel_size = self.strides[1:-1], self.kernel_size[1:-1] - return self.get_module_list(inp, out, kernel_size, strides, self.conv_block) + return self.get_module_list(inp, out, kernel_size, strides, self.conv_block) # type: ignore def get_upsamples(self): inp, out = self.filters[1:][::-1], self.filters[:-1][::-1] strides, kernel_size = self.strides[1:][::-1], self.kernel_size[1:][::-1] upsample_kernel_size = self.upsample_kernel_size[::-1] return self.get_module_list( - inp, out, kernel_size, strides, UnetUpBlock, upsample_kernel_size, trans_bias=self.trans_bias + inp, # type: ignore + out, # type: ignore + kernel_size, + strides, + UnetUpBlock, # type: ignore + upsample_kernel_size, + trans_bias=self.trans_bias, ) def get_module_list( self, - in_channels: Sequence[int], - out_channels: Sequence[int], - kernel_size: Sequence[Sequence[int] | int], - strides: Sequence[Sequence[int] | int], - conv_block: type[nn.Module], - upsample_kernel_size: Sequence[Sequence[int] | int] | None = None, + in_channels: List[int], + out_channels: List[int], + kernel_size: Sequence[Union[Sequence[int], int]], + strides: Sequence[Union[Sequence[int], int]], + conv_block: nn.Module, + upsample_kernel_size: Optional[Sequence[Union[Sequence[int], int]]] = None, trans_bias: bool = False, ): layers = []