diff --git a/references/classification/transforms.py b/references/classification/transforms.py index 7788c9e5c3f..fb8cf62703e 100644 --- a/references/classification/transforms.py +++ b/references/classification/transforms.py @@ -40,11 +40,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """ if batch.ndim != 4: raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim)) - elif target.ndim != 1: + if target.ndim != 1: raise ValueError("Target ndim should be 1. Got {}".format(target.ndim)) - elif not batch.is_floating_point(): + if not batch.is_floating_point(): raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype)) - elif target.dtype != torch.int64: + if target.dtype != torch.int64: raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype)) if not self.inplace: @@ -116,11 +116,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """ if batch.ndim != 4: raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim)) - elif target.ndim != 1: + if target.ndim != 1: raise ValueError("Target ndim should be 1. Got {}".format(target.ndim)) - elif not batch.is_floating_point(): + if not batch.is_floating_point(): raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype)) - elif target.dtype != torch.int64: + if target.dtype != torch.int64: raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype)) if not self.inplace: diff --git a/references/detection/transforms.py b/references/detection/transforms.py index 787bb75a5c5..d7af27260ba 100644 --- a/references/detection/transforms.py +++ b/references/detection/transforms.py @@ -104,7 +104,7 @@ def forward( if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) - elif image.ndimension() == 2: + if image.ndimension() == 2: image = image.unsqueeze(0) orig_w, orig_h = F.get_image_size(image) @@ -186,7 +186,7 @@ def forward( if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) - elif image.ndimension() == 2: + if image.ndimension() == 2: image = image.unsqueeze(0) if torch.rand(1) < self.p: @@ -245,7 +245,7 @@ def forward( if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension())) - elif image.ndimension() == 2: + if image.ndimension() == 2: image = image.unsqueeze(0) r = torch.rand(7) diff --git a/test/datasets_utils.py b/test/datasets_utils.py index 646babdda1e..a921e1c9bf1 100644 --- a/test/datasets_utils.py +++ b/test/datasets_utils.py @@ -486,7 +486,7 @@ def _inject_fake_data(self, tmpdir, config): "The method 'inject_fake_data' needs to return at least an integer indicating the number of " "examples for the current configuration." ) - elif isinstance(info, int): + if isinstance(info, int): info = dict(num_examples=info) elif not isinstance(info, dict): raise UsageError( diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py index 0fdb3395a5e..ef3d13f5540 100644 --- a/torchvision/datasets/imagenet.py +++ b/torchvision/datasets/imagenet.py @@ -48,7 +48,7 @@ def __init__(self, root: str, split: str = "train", download: Optional[str] = No "directory." ) raise RuntimeError(msg) - elif download is False: + if download is False: msg = "The use of the download flag is deprecated, since the dataset " "is no longer publicly accessible." warnings.warn(msg, RuntimeWarning) diff --git a/torchvision/datasets/inaturalist.py b/torchvision/datasets/inaturalist.py index 1e2d09d39f8..b1ad8f844a6 100644 --- a/torchvision/datasets/inaturalist.py +++ b/torchvision/datasets/inaturalist.py @@ -214,11 +214,10 @@ def category_name(self, category_type: str, category_id: int) -> str: else: if category_type not in self.categories_index: raise ValueError(f"Invalid category type '{category_type}'") - else: - for name, id in self.categories_index[category_type].items(): - if id == category_id: - return name - raise ValueError(f"Invalid category id {category_id} for {category_type}") + for name, id in self.categories_index[category_type].items(): + if id == category_id: + return name + raise ValueError(f"Invalid category id {category_id} for {category_type}") def _check_integrity(self) -> bool: return os.path.exists(self.root) and len(os.listdir(self.root)) > 0 diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 1bb1be347f9..30b1932fdc5 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -276,8 +276,7 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor: # empty targets or proposals not supported during training if match_quality_matrix.shape[0] == 0: raise ValueError("No ground-truth boxes available for one of the images " "during training") - else: - raise ValueError("No proposal boxes available for one of the images " "during training") + raise ValueError("No proposal boxes available for one of the images " "during training") # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction diff --git a/torchvision/models/efficientnet.py b/torchvision/models/efficientnet.py index b9a5913ea77..d4c94828fcd 100644 --- a/torchvision/models/efficientnet.py +++ b/torchvision/models/efficientnet.py @@ -172,7 +172,7 @@ def __init__( if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") - elif not ( + if not ( isinstance(inverted_residual_setting, Sequence) and all([isinstance(s, MBConvConfig) for s in inverted_residual_setting]) ): diff --git a/torchvision/models/mobilenetv3.py b/torchvision/models/mobilenetv3.py index 00a6a200c70..f7b189900a0 100644 --- a/torchvision/models/mobilenetv3.py +++ b/torchvision/models/mobilenetv3.py @@ -151,7 +151,7 @@ def __init__( if not inverted_residual_setting: raise ValueError("The inverted_residual_setting should not be empty") - elif not ( + if not ( isinstance(inverted_residual_setting, Sequence) and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting]) ): diff --git a/torchvision/models/segmentation/segmentation.py b/torchvision/models/segmentation/segmentation.py index c19e36e4705..c26683e6a7e 100644 --- a/torchvision/models/segmentation/segmentation.py +++ b/torchvision/models/segmentation/segmentation.py @@ -100,9 +100,8 @@ def _load_weights(model: nn.Module, arch_type: str, backbone: str, progress: boo model_url = model_urls.get(arch, None) if model_url is None: raise NotImplementedError("pretrained {} is not supported as of now".format(arch)) - else: - state_dict = load_state_dict_from_url(model_url, progress=progress) - model.load_state_dict(state_dict) + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) def _segm_lraspp_mobilenetv3(backbone_name: str, num_classes: int, pretrained_backbone: bool = True) -> LRASPP: diff --git a/torchvision/models/shufflenetv2.py b/torchvision/models/shufflenetv2.py index a9bb58fc9d1..626beb46670 100644 --- a/torchvision/models/shufflenetv2.py +++ b/torchvision/models/shufflenetv2.py @@ -161,9 +161,8 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa model_url = model_urls[arch] if model_url is None: raise NotImplementedError("pretrained {} is not supported as of now".format(arch)) - else: - state_dict = load_state_dict_from_url(model_url, progress=progress) - model.load_state_dict(state_dict) + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) return model diff --git a/torchvision/prototype/datasets/benchmark.py b/torchvision/prototype/datasets/benchmark.py index 55290e02906..f0ec14efaf0 100644 --- a/torchvision/prototype/datasets/benchmark.py +++ b/torchvision/prototype/datasets/benchmark.py @@ -250,7 +250,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def delta(self): if self._start is None: raise RuntimeError() - elif self._stop is None: + if self._stop is None: raise RuntimeError() return self._stop - self._start diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 9578134cae0..ff55a75d94b 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -228,11 +228,11 @@ def to_pil_image(pic, mode=None): if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): raise TypeError("pic should be Tensor or ndarray. Got {}.".format(type(pic))) - elif isinstance(pic, torch.Tensor): + if isinstance(pic, torch.Tensor): if pic.ndimension() not in {2, 3}: raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndimension())) - elif pic.ndimension() == 2: + if pic.ndimension() == 2: # if 2D image, add channel dimension (CHW) pic = pic.unsqueeze(0) @@ -244,7 +244,7 @@ def to_pil_image(pic, mode=None): if pic.ndim not in {2, 3}: raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim)) - elif pic.ndim == 2: + if pic.ndim == 2: # if 2D image, add channel dimension (HWC) pic = np.expand_dims(pic, 2) diff --git a/torchvision/utils.py b/torchvision/utils.py index a71e0f234b4..9a2cf02502f 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -176,11 +176,11 @@ def draw_bounding_boxes( if not isinstance(image, torch.Tensor): raise TypeError(f"Tensor expected, got {type(image)}") - elif image.dtype != torch.uint8: + if image.dtype != torch.uint8: raise ValueError(f"Tensor uint8 expected, got {image.dtype}") - elif image.dim() != 3: + if image.dim() != 3: raise ValueError("Pass individual images, not batches") - elif image.size(0) not in {1, 3}: + if image.size(0) not in {1, 3}: raise ValueError("Only grayscale and RGB images are supported") if image.size(0) == 1: @@ -254,11 +254,11 @@ def draw_segmentation_masks( if not isinstance(image, torch.Tensor): raise TypeError(f"The image must be a tensor, got {type(image)}") - elif image.dtype != torch.uint8: + if image.dtype != torch.uint8: raise ValueError(f"The image dtype must be uint8, got {image.dtype}") - elif image.dim() != 3: + if image.dim() != 3: raise ValueError("Pass individual images, not batches") - elif image.size()[0] != 3: + if image.size()[0] != 3: raise ValueError("Pass an RGB image. Other Image formats are not supported") if masks.ndim == 2: masks = masks[None, :, :]