diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 7cbf8885ca9..8460f9c6422 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -227,7 +227,7 @@ def equalize_image_tensor(image: torch.Tensor) -> torch.Tensor: if image.numel() == 0: return image - return _equalize_image_tensor_vec(image.view(-1, height, width)).view(image.shape) + return _equalize_image_tensor_vec(image.view(-1, height, width)).reshape(image.shape) equalize_image_pil = _FP.equalize diff --git a/torchvision/transforms/functional_tensor.py b/torchvision/transforms/functional_tensor.py index 20b76fbf079..4944c75fab8 100644 --- a/torchvision/transforms/functional_tensor.py +++ b/torchvision/transforms/functional_tensor.py @@ -875,7 +875,7 @@ def _scale_channel(img_chan: Tensor) -> Tensor: if img_chan.is_cuda: hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255) else: - hist = torch.bincount(img_chan.view(-1), minlength=256) + hist = torch.bincount(img_chan.reshape(-1), minlength=256) nonzero_hist = hist[hist != 0] step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode="floor")