Skip to content

Refactor unnecessary else / elif when if block has a raise statement #4604

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions references/classification/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""
if batch.ndim != 4:
raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim))
elif target.ndim != 1:
if target.ndim != 1:
raise ValueError("Target ndim should be 1. Got {}".format(target.ndim))
elif not batch.is_floating_point():
if not batch.is_floating_point():
raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype))
elif target.dtype != torch.int64:
if target.dtype != torch.int64:
raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype))

if not self.inplace:
Expand Down Expand Up @@ -116,11 +116,11 @@ def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""
if batch.ndim != 4:
raise ValueError("Batch ndim should be 4. Got {}".format(batch.ndim))
elif target.ndim != 1:
if target.ndim != 1:
raise ValueError("Target ndim should be 1. Got {}".format(target.ndim))
elif not batch.is_floating_point():
if not batch.is_floating_point():
raise TypeError("Batch dtype should be a float tensor. Got {}.".format(batch.dtype))
elif target.dtype != torch.int64:
if target.dtype != torch.int64:
raise TypeError("Target dtype should be torch.int64. Got {}".format(target.dtype))

if not self.inplace:
Expand Down
6 changes: 3 additions & 3 deletions references/detection/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def forward(
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension()))
elif image.ndimension() == 2:
if image.ndimension() == 2:
image = image.unsqueeze(0)

orig_w, orig_h = F.get_image_size(image)
Expand Down Expand Up @@ -186,7 +186,7 @@ def forward(
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension()))
elif image.ndimension() == 2:
if image.ndimension() == 2:
image = image.unsqueeze(0)

if torch.rand(1) < self.p:
Expand Down Expand Up @@ -245,7 +245,7 @@ def forward(
if isinstance(image, torch.Tensor):
if image.ndimension() not in {2, 3}:
raise ValueError("image should be 2/3 dimensional. Got {} dimensions.".format(image.ndimension()))
elif image.ndimension() == 2:
if image.ndimension() == 2:
image = image.unsqueeze(0)

r = torch.rand(7)
Expand Down
2 changes: 1 addition & 1 deletion test/datasets_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ def _inject_fake_data(self, tmpdir, config):
"The method 'inject_fake_data' needs to return at least an integer indicating the number of "
"examples for the current configuration."
)
elif isinstance(info, int):
if isinstance(info, int):
info = dict(num_examples=info)
elif not isinstance(info, dict):
raise UsageError(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/datasets/imagenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(self, root: str, split: str = "train", download: Optional[str] = No
"directory."
)
raise RuntimeError(msg)
elif download is False:
if download is False:
msg = "The use of the download flag is deprecated, since the dataset " "is no longer publicly accessible."
warnings.warn(msg, RuntimeWarning)

Expand Down
9 changes: 4 additions & 5 deletions torchvision/datasets/inaturalist.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,11 +214,10 @@ def category_name(self, category_type: str, category_id: int) -> str:
else:
if category_type not in self.categories_index:
raise ValueError(f"Invalid category type '{category_type}'")
else:
for name, id in self.categories_index[category_type].items():
if id == category_id:
return name
raise ValueError(f"Invalid category id {category_id} for {category_type}")
for name, id in self.categories_index[category_type].items():
if id == category_id:
return name
raise ValueError(f"Invalid category id {category_id} for {category_type}")

def _check_integrity(self) -> bool:
return os.path.exists(self.root) and len(os.listdir(self.root)) > 0
Expand Down
3 changes: 1 addition & 2 deletions torchvision/models/detection/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,8 +276,7 @@ def __call__(self, match_quality_matrix: Tensor) -> Tensor:
# empty targets or proposals not supported during training
if match_quality_matrix.shape[0] == 0:
raise ValueError("No ground-truth boxes available for one of the images " "during training")
else:
raise ValueError("No proposal boxes available for one of the images " "during training")
raise ValueError("No proposal boxes available for one of the images " "during training")

# match_quality_matrix is M (gt) x N (predicted)
# Max over gt elements (dim 0) to find best gt candidate for each prediction
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/efficientnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def __init__(

if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
if not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, MBConvConfig) for s in inverted_residual_setting])
):
Expand Down
2 changes: 1 addition & 1 deletion torchvision/models/mobilenetv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def __init__(

if not inverted_residual_setting:
raise ValueError("The inverted_residual_setting should not be empty")
elif not (
if not (
isinstance(inverted_residual_setting, Sequence)
and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])
):
Expand Down
5 changes: 2 additions & 3 deletions torchvision/models/segmentation/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,8 @@ def _load_weights(model: nn.Module, arch_type: str, backbone: str, progress: boo
model_url = model_urls.get(arch, None)
if model_url is None:
raise NotImplementedError("pretrained {} is not supported as of now".format(arch))
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)


def _segm_lraspp_mobilenetv3(backbone_name: str, num_classes: int, pretrained_backbone: bool = True) -> LRASPP:
Expand Down
5 changes: 2 additions & 3 deletions torchvision/models/shufflenetv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,8 @@ def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwa
model_url = model_urls[arch]
if model_url is None:
raise NotImplementedError("pretrained {} is not supported as of now".format(arch))
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)

return model

Expand Down
2 changes: 1 addition & 1 deletion torchvision/prototype/datasets/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def delta(self):
if self._start is None:
raise RuntimeError()
elif self._stop is None:
if self._stop is None:
raise RuntimeError()
return self._stop - self._start

Expand Down
6 changes: 3 additions & 3 deletions torchvision/transforms/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,11 +228,11 @@ def to_pil_image(pic, mode=None):
if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError("pic should be Tensor or ndarray. Got {}.".format(type(pic)))

elif isinstance(pic, torch.Tensor):
if isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndimension()))

elif pic.ndimension() == 2:
if pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)

Expand All @@ -244,7 +244,7 @@ def to_pil_image(pic, mode=None):
if pic.ndim not in {2, 3}:
raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim))

elif pic.ndim == 2:
if pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)

Expand Down
12 changes: 6 additions & 6 deletions torchvision/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,11 +176,11 @@ def draw_bounding_boxes(

if not isinstance(image, torch.Tensor):
raise TypeError(f"Tensor expected, got {type(image)}")
elif image.dtype != torch.uint8:
if image.dtype != torch.uint8:
raise ValueError(f"Tensor uint8 expected, got {image.dtype}")
elif image.dim() != 3:
if image.dim() != 3:
raise ValueError("Pass individual images, not batches")
elif image.size(0) not in {1, 3}:
if image.size(0) not in {1, 3}:
raise ValueError("Only grayscale and RGB images are supported")

if image.size(0) == 1:
Expand Down Expand Up @@ -254,11 +254,11 @@ def draw_segmentation_masks(

if not isinstance(image, torch.Tensor):
raise TypeError(f"The image must be a tensor, got {type(image)}")
elif image.dtype != torch.uint8:
if image.dtype != torch.uint8:
raise ValueError(f"The image dtype must be uint8, got {image.dtype}")
elif image.dim() != 3:
if image.dim() != 3:
raise ValueError("Pass individual images, not batches")
elif image.size()[0] != 3:
if image.size()[0] != 3:
raise ValueError("Pass an RGB image. Other Image formats are not supported")
if masks.ndim == 2:
masks = masks[None, :, :]
Expand Down