From f2e4a0bb98590de02b0c6c9ea0f354cc5a3b09b5 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 9 Nov 2022 17:30:40 +0000 Subject: [PATCH 1/5] Performance optimization on adjust_hue_image_tensor --- .../prototype/transforms/functional/_color.py | 22 ++++++++++--------- .../prototype/transforms/functional/_meta.py | 1 + .../transforms/functional/_type_conversion.py | 2 +- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 12fa5288abc..994956ce66a 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -189,21 +189,21 @@ def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: channels_range = maxc - minc # Since `eqc => channels_range = 0`, replacing denominator with 1 when `eqc` is fine. ones = torch.ones_like(maxc) - s = channels_range / torch.where(eqc, ones, maxc) + s = channels_range.div_(torch.where(eqc, ones, maxc)) # Note that `eqc => maxc = minc = r = g = b`. So the following calculation # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it # would not matter what values `rc`, `gc`, and `bc` have here, and thus # replacing denominator with 1 when `eqc` is fine. channels_range_divisor = torch.where(eqc, ones, channels_range).unsqueeze_(dim=-3) - rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image) / channels_range_divisor).unbind(dim=-3) + rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image).div_(channels_range_divisor)).unbind(dim=-3) mask_maxc_neq_r = maxc != r mask_maxc_eq_g = maxc == g mask_maxc_neq_g = ~mask_maxc_eq_g - hr = (bc - gc).mul_(~mask_maxc_neq_r) - hg = (2.0 + rc).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r) - hb = (4.0 + gc).sub_(rc).mul_(mask_maxc_neq_g & mask_maxc_neq_r) + hg = rc.add(2.0).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r) + hr = bc.sub_(gc).mul_(~mask_maxc_neq_r) + hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_g & mask_maxc_neq_r) h = hr.add_(hg).add_(hb) h = h.mul_(1.0 / 6.0).add_(1.0).fmod_(1.0) @@ -212,14 +212,16 @@ def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: def _hsv_to_rgb(img: torch.Tensor) -> torch.Tensor: h, s, v = img.unbind(dim=-3) - h6 = h * 6 + h6 = h.mul(6) i = torch.floor(h6) - f = h6 - i + f = h6.sub_(i) i = i.to(dtype=torch.int32) - p = (v * (1.0 - s)).clamp_(0.0, 1.0) - q = (v * (1.0 - s * f)).clamp_(0.0, 1.0) - t = (v * (1.0 - s * (1.0 - f))).clamp_(0.0, 1.0) + sxf = s * f + one_minus_s = 1.0 - s + q = (1.0 - sxf).mul_(v).clamp_(0.0, 1.0) + t = sxf.add_(one_minus_s).mul_(v).clamp_(0.0, 1.0) + p = one_minus_s.mul_(v).clamp_(0.0, 1.0) i.remainder_(6) mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1) diff --git a/torchvision/prototype/transforms/functional/_meta.py b/torchvision/prototype/transforms/functional/_meta.py index 8bcd8176733..ef07c866653 100644 --- a/torchvision/prototype/transforms/functional/_meta.py +++ b/torchvision/prototype/transforms/functional/_meta.py @@ -164,6 +164,7 @@ def convert_format_bounding_box( if new_format == old_format: return bounding_box + # TODO: Add _xywh_to_cxcywh and _cxcywh_to_xywh to improve performance if old_format == BoundingBoxFormat.XYWH: bounding_box = _xywh_to_xyxy(bounding_box, inplace) elif old_format == BoundingBoxFormat.CXCYWH: diff --git a/torchvision/prototype/transforms/functional/_type_conversion.py b/torchvision/prototype/transforms/functional/_type_conversion.py index 712ca62ecb5..de3a850adbe 100644 --- a/torchvision/prototype/transforms/functional/_type_conversion.py +++ b/torchvision/prototype/transforms/functional/_type_conversion.py @@ -1,4 +1,3 @@ -import unittest.mock from typing import Any, Dict, Tuple, Union import numpy as np @@ -20,6 +19,7 @@ def decode_image_with_pil(encoded_image: torch.Tensor) -> features.Image: @torch.jit.unused def decode_video_with_av(encoded_video: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]: + import unittest.mock with unittest.mock.patch("torchvision.io.video.os.path.exists", return_value=True): return read_video(ReadOnlyTensorBuffer(encoded_video)) # type: ignore[arg-type] From 76218acc782676145f789bbe0981f02391369794 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 9 Nov 2022 17:48:17 +0000 Subject: [PATCH 2/5] handle ints --- torchvision/prototype/transforms/functional/_color.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 994956ce66a..63c4b8d581e 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -189,13 +189,13 @@ def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: channels_range = maxc - minc # Since `eqc => channels_range = 0`, replacing denominator with 1 when `eqc` is fine. ones = torch.ones_like(maxc) - s = channels_range.div_(torch.where(eqc, ones, maxc)) + s = channels_range / torch.where(eqc, ones, maxc) # Note that `eqc => maxc = minc = r = g = b`. So the following calculation # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it # would not matter what values `rc`, `gc`, and `bc` have here, and thus # replacing denominator with 1 when `eqc` is fine. channels_range_divisor = torch.where(eqc, ones, channels_range).unsqueeze_(dim=-3) - rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image).div_(channels_range_divisor)).unbind(dim=-3) + rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image) / channels_range_divisor).unbind(dim=-3) mask_maxc_neq_r = maxc != r mask_maxc_eq_g = maxc == g From 78230f43a02e0a7593047bfaf2f952a0039197fc Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 9 Nov 2022 18:07:04 +0000 Subject: [PATCH 3/5] Inplace logical ops --- torchvision/prototype/transforms/functional/_color.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index 63c4b8d581e..5118b161a53 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -199,11 +199,10 @@ def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor: mask_maxc_neq_r = maxc != r mask_maxc_eq_g = maxc == g - mask_maxc_neq_g = ~mask_maxc_eq_g hg = rc.add(2.0).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r) hr = bc.sub_(gc).mul_(~mask_maxc_neq_r) - hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_g & mask_maxc_neq_r) + hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_r.logical_and_(mask_maxc_eq_g.logical_not_())) h = hr.add_(hg).add_(hb) h = h.mul_(1.0 / 6.0).add_(1.0).fmod_(1.0) From a99db93b77fe9dbde56bd55eb42ea62b52993114 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 9 Nov 2022 18:26:15 +0000 Subject: [PATCH 4/5] Remove unnecessary casting. --- torchvision/prototype/transforms/functional/_color.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/prototype/transforms/functional/_color.py b/torchvision/prototype/transforms/functional/_color.py index cb1d4a266cc..fa93973f668 100644 --- a/torchvision/prototype/transforms/functional/_color.py +++ b/torchvision/prototype/transforms/functional/_color.py @@ -235,7 +235,7 @@ def _hsv_to_rgb(img: torch.Tensor) -> torch.Tensor: a3 = torch.stack((p, p, t, v, v, q), dim=-3) a4 = torch.stack((a1, a2, a3), dim=-4) - return (a4.mul_(mask.to(dtype=img.dtype).unsqueeze(dim=-4))).sum(dim=-3) + return (a4.mul_(mask.unsqueeze(dim=-4))).sum(dim=-3) def adjust_hue_image_tensor(image: torch.Tensor, hue_factor: float) -> torch.Tensor: From 6d9cea1ef80d807d7053cb1838e4c027f9f94942 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Wed, 9 Nov 2022 19:08:15 +0000 Subject: [PATCH 5/5] Fix linter. --- torchvision/prototype/transforms/functional/_type_conversion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/torchvision/prototype/transforms/functional/_type_conversion.py b/torchvision/prototype/transforms/functional/_type_conversion.py index de3a850adbe..c99d3d9affc 100644 --- a/torchvision/prototype/transforms/functional/_type_conversion.py +++ b/torchvision/prototype/transforms/functional/_type_conversion.py @@ -20,6 +20,7 @@ def decode_image_with_pil(encoded_image: torch.Tensor) -> features.Image: @torch.jit.unused def decode_video_with_av(encoded_video: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]: import unittest.mock + with unittest.mock.patch("torchvision.io.video.os.path.exists", return_value=True): return read_video(ReadOnlyTensorBuffer(encoded_video)) # type: ignore[arg-type]