Skip to content

Commit 0874338

Browse files
authored
Rename convert_bounding_box_format => convert_format_bounding_box (#6582)
* Rename `convert_bounding_box_format` => `convert_format_bounding_box` * Add missed replacement.
1 parent 321f655 commit 0874338

File tree

9 files changed

+40
-40
lines changed

9 files changed

+40
-40
lines changed

test/test_prototype_transforms_functional.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from torch import jit
2020
from torchvision.prototype import features
2121
from torchvision.prototype.transforms.functional._geometry import _center_crop_compute_padding
22-
from torchvision.prototype.transforms.functional._meta import convert_bounding_box_format
22+
from torchvision.prototype.transforms.functional._meta import convert_format_bounding_box
2323
from torchvision.transforms.functional import _get_perspective_coeffs
2424

2525

@@ -633,7 +633,7 @@ def _compute_expected_bbox(bbox, angle_, translate_, scale_, shear_, center_):
633633
affine_matrix = _compute_affine_matrix(angle_, translate_, scale_, shear_, center_)
634634
affine_matrix = affine_matrix[:2, :]
635635

636-
bbox_xyxy = convert_bounding_box_format(
636+
bbox_xyxy = convert_format_bounding_box(
637637
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
638638
)
639639
points = np.array(
@@ -658,7 +658,7 @@ def _compute_expected_bbox(bbox, angle_, translate_, scale_, shear_, center_):
658658
dtype=torch.float32,
659659
device=bbox.device,
660660
)
661-
return convert_bounding_box_format(
661+
return convert_format_bounding_box(
662662
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
663663
)
664664

@@ -835,7 +835,7 @@ def _compute_expected_bbox(bbox, angle_, expand_, center_):
835835
affine_matrix = affine_matrix[:2, :]
836836

837837
image_size = bbox.image_size
838-
bbox_xyxy = convert_bounding_box_format(
838+
bbox_xyxy = convert_format_bounding_box(
839839
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
840840
)
841841
points = np.array(
@@ -876,7 +876,7 @@ def _compute_expected_bbox(bbox, angle_, expand_, center_):
876876
dtype=torch.float32,
877877
device=bbox.device,
878878
)
879-
return convert_bounding_box_format(
879+
return convert_format_bounding_box(
880880
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
881881
)
882882

@@ -1097,7 +1097,7 @@ def test_correctness_crop_bounding_box(device, format, top, left, height, width,
10971097
]
10981098
in_boxes = features.BoundingBox(in_boxes, format=features.BoundingBoxFormat.XYXY, image_size=size, device=device)
10991099
if format != features.BoundingBoxFormat.XYXY:
1100-
in_boxes = convert_bounding_box_format(in_boxes, features.BoundingBoxFormat.XYXY, format)
1100+
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
11011101

11021102
output_boxes = F.crop_bounding_box(
11031103
in_boxes,
@@ -1107,7 +1107,7 @@ def test_correctness_crop_bounding_box(device, format, top, left, height, width,
11071107
)
11081108

11091109
if format != features.BoundingBoxFormat.XYXY:
1110-
output_boxes = convert_bounding_box_format(output_boxes, format, features.BoundingBoxFormat.XYXY)
1110+
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
11111111

11121112
torch.testing.assert_close(output_boxes.tolist(), expected_bboxes)
11131113

@@ -1213,12 +1213,12 @@ def _compute_expected_bbox(bbox, top_, left_, height_, width_, size_):
12131213
in_boxes, format=features.BoundingBoxFormat.XYXY, image_size=image_size, device=device
12141214
)
12151215
if format != features.BoundingBoxFormat.XYXY:
1216-
in_boxes = convert_bounding_box_format(in_boxes, features.BoundingBoxFormat.XYXY, format)
1216+
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
12171217

12181218
output_boxes = F.resized_crop_bounding_box(in_boxes, format, top, left, height, width, size)
12191219

12201220
if format != features.BoundingBoxFormat.XYXY:
1221-
output_boxes = convert_bounding_box_format(output_boxes, format, features.BoundingBoxFormat.XYXY)
1221+
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
12221222

12231223
torch.testing.assert_close(output_boxes, expected_bboxes)
12241224

@@ -1268,12 +1268,12 @@ def _compute_expected_bbox(bbox, padding_):
12681268

12691269
bbox_format = bbox.format
12701270
bbox_dtype = bbox.dtype
1271-
bbox = convert_bounding_box_format(bbox, old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY)
1271+
bbox = convert_format_bounding_box(bbox, old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY)
12721272

12731273
bbox[0::2] += pad_left
12741274
bbox[1::2] += pad_up
12751275

1276-
bbox = convert_bounding_box_format(
1276+
bbox = convert_format_bounding_box(
12771277
bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox_format, copy=False
12781278
)
12791279
if bbox.dtype != bbox_dtype:
@@ -1396,7 +1396,7 @@ def _compute_expected_bbox(bbox, pcoeffs_):
13961396
]
13971397
)
13981398

1399-
bbox_xyxy = convert_bounding_box_format(
1399+
bbox_xyxy = convert_format_bounding_box(
14001400
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
14011401
)
14021402
points = np.array(
@@ -1423,7 +1423,7 @@ def _compute_expected_bbox(bbox, pcoeffs_):
14231423
dtype=torch.float32,
14241424
device=bbox.device,
14251425
)
1426-
return convert_bounding_box_format(
1426+
return convert_format_bounding_box(
14271427
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
14281428
)
14291429

@@ -1528,7 +1528,7 @@ def test_correctness_center_crop_bounding_box(device, output_size):
15281528
def _compute_expected_bbox(bbox, output_size_):
15291529
format_ = bbox.format
15301530
image_size_ = bbox.image_size
1531-
bbox = convert_bounding_box_format(bbox, format_, features.BoundingBoxFormat.XYWH)
1531+
bbox = convert_format_bounding_box(bbox, format_, features.BoundingBoxFormat.XYWH)
15321532

15331533
if len(output_size_) == 1:
15341534
output_size_.append(output_size_[-1])
@@ -1548,7 +1548,7 @@ def _compute_expected_bbox(bbox, output_size_):
15481548
dtype=bbox.dtype,
15491549
device=bbox.device,
15501550
)
1551-
return convert_bounding_box_format(out_bbox, features.BoundingBoxFormat.XYWH, format_, copy=False)
1551+
return convert_format_bounding_box(out_bbox, features.BoundingBoxFormat.XYWH, format_, copy=False)
15521552

15531553
for bboxes in make_bounding_boxes(
15541554
image_sizes=[(32, 32), (24, 33), (32, 25)],

torchvision/prototype/features/_bounding_box.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox:
6565
format = BoundingBoxFormat.from_str(format.upper())
6666

6767
return BoundingBox.new_like(
68-
self, self._F.convert_bounding_box_format(self, old_format=self.format, new_format=format), format=format
68+
self, self._F.convert_format_bounding_box(self, old_format=self.format, new_format=format), format=format
6969
)
7070

7171
def horizontal_flip(self) -> BoundingBox:

torchvision/prototype/transforms/_augment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ def _copy_paste(
254254
# There is a similar +1 in other reference implementations:
255255
# https://github.com/pytorch/vision/blob/b6feccbc4387766b76a3e22b13815dbbbfa87c0f/torchvision/models/detection/roi_heads.py#L418-L422
256256
xyxy_boxes[:, 2:] += 1
257-
boxes = F.convert_bounding_box_format(
257+
boxes = F.convert_format_bounding_box(
258258
xyxy_boxes, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox_format, copy=False
259259
)
260260
out_target["boxes"] = torch.cat([boxes, paste_boxes])
@@ -263,7 +263,7 @@ def _copy_paste(
263263
out_target["labels"] = torch.cat([labels, paste_labels])
264264

265265
# Check for degenerated boxes and remove them
266-
boxes = F.convert_bounding_box_format(
266+
boxes = F.convert_format_bounding_box(
267267
out_target["boxes"], old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY
268268
)
269269
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]

torchvision/prototype/transforms/_geometry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -655,7 +655,7 @@ def _get_params(self, sample: Any) -> Dict[str, Any]:
655655
continue
656656

657657
# check for any valid boxes with centers within the crop area
658-
xyxy_bboxes = F.convert_bounding_box_format(
658+
xyxy_bboxes = F.convert_format_bounding_box(
659659
bboxes, old_format=bboxes.format, new_format=features.BoundingBoxFormat.XYXY, copy=True
660660
)
661661
cx = 0.5 * (xyxy_bboxes[..., 0] + xyxy_bboxes[..., 2])

torchvision/prototype/transforms/_meta.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def __init__(self, format: Union[str, features.BoundingBoxFormat]) -> None:
1717
self.format = format
1818

1919
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
20-
output = F.convert_bounding_box_format(inpt, old_format=inpt.format, new_format=params["format"])
20+
output = F.convert_format_bounding_box(inpt, old_format=inpt.format, new_format=params["format"])
2121
return features.BoundingBox.new_like(inpt, output, format=params["format"])
2222

2323

torchvision/prototype/transforms/_misc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def _get_params(self, sample: Any) -> Dict[str, Any]:
163163
# be in XYXY format only to calculate the width and height internally. Thus, if the box is in XYWH or CXCYWH
164164
# format,we need to convert first just to afterwards compute the width and height again, although they were
165165
# there in the first place for these formats.
166-
bounding_box = F.convert_bounding_box_format(
166+
bounding_box = F.convert_format_bounding_box(
167167
bounding_box, old_format=bounding_box.format, new_format=features.BoundingBoxFormat.XYXY
168168
)
169169
valid_indices = remove_small_boxes(bounding_box, min_size=self.min_size)

torchvision/prototype/transforms/functional/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from torchvision.transforms import InterpolationMode # usort: skip
44
from ._meta import (
55
clamp_bounding_box,
6-
convert_bounding_box_format,
6+
convert_format_bounding_box,
77
convert_color_space_image_tensor,
88
convert_color_space_image_pil,
99
convert_color_space,

torchvision/prototype/transforms/functional/_geometry.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
)
1818
from torchvision.transforms.functional_tensor import _parse_pad_padding
1919

20-
from ._meta import convert_bounding_box_format, get_dimensions_image_pil, get_dimensions_image_tensor
20+
from ._meta import convert_format_bounding_box, get_dimensions_image_pil, get_dimensions_image_tensor
2121

2222

2323
# shortcut type
@@ -37,13 +37,13 @@ def horizontal_flip_bounding_box(
3737
) -> torch.Tensor:
3838
shape = bounding_box.shape
3939

40-
bounding_box = convert_bounding_box_format(
40+
bounding_box = convert_format_bounding_box(
4141
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
4242
).view(-1, 4)
4343

4444
bounding_box[:, [0, 2]] = image_size[1] - bounding_box[:, [2, 0]]
4545

46-
return convert_bounding_box_format(
46+
return convert_format_bounding_box(
4747
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
4848
).view(shape)
4949

@@ -70,13 +70,13 @@ def vertical_flip_bounding_box(
7070
) -> torch.Tensor:
7171
shape = bounding_box.shape
7272

73-
bounding_box = convert_bounding_box_format(
73+
bounding_box = convert_format_bounding_box(
7474
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
7575
).view(-1, 4)
7676

7777
bounding_box[:, [1, 3]] = image_size[0] - bounding_box[:, [3, 1]]
7878

79-
return convert_bounding_box_format(
79+
return convert_format_bounding_box(
8080
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
8181
).view(shape)
8282

@@ -362,15 +362,15 @@ def affine_bounding_box(
362362
center: Optional[List[float]] = None,
363363
) -> torch.Tensor:
364364
original_shape = bounding_box.shape
365-
bounding_box = convert_bounding_box_format(
365+
bounding_box = convert_format_bounding_box(
366366
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
367367
).view(-1, 4)
368368

369369
out_bboxes = _affine_bounding_box_xyxy(bounding_box, image_size, angle, translate, scale, shear, center)
370370

371371
# out_bboxes should be of shape [N boxes, 4]
372372

373-
return convert_bounding_box_format(
373+
return convert_format_bounding_box(
374374
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
375375
).view(original_shape)
376376

@@ -530,13 +530,13 @@ def rotate_bounding_box(
530530
center = None
531531

532532
original_shape = bounding_box.shape
533-
bounding_box = convert_bounding_box_format(
533+
bounding_box = convert_format_bounding_box(
534534
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
535535
).view(-1, 4)
536536

537537
out_bboxes = _affine_bounding_box_xyxy(bounding_box, image_size, angle=-angle, center=center, expand=expand)
538538

539-
return convert_bounding_box_format(
539+
return convert_format_bounding_box(
540540
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
541541
).view(original_shape)
542542

@@ -704,15 +704,15 @@ def crop_bounding_box(
704704
top: int,
705705
left: int,
706706
) -> torch.Tensor:
707-
bounding_box = convert_bounding_box_format(
707+
bounding_box = convert_format_bounding_box(
708708
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
709709
)
710710

711711
# Crop or implicit pad if left and/or top have negative values:
712712
bounding_box[..., 0::2] -= left
713713
bounding_box[..., 1::2] -= top
714714

715-
return convert_bounding_box_format(
715+
return convert_format_bounding_box(
716716
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
717717
)
718718

@@ -758,7 +758,7 @@ def perspective_bounding_box(
758758
raise ValueError("Argument perspective_coeffs should have 8 float values")
759759

760760
original_shape = bounding_box.shape
761-
bounding_box = convert_bounding_box_format(
761+
bounding_box = convert_format_bounding_box(
762762
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
763763
).view(-1, 4)
764764

@@ -828,7 +828,7 @@ def perspective_bounding_box(
828828

829829
# out_bboxes should be of shape [N boxes, 4]
830830

831-
return convert_bounding_box_format(
831+
return convert_format_bounding_box(
832832
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
833833
).view(original_shape)
834834

@@ -900,7 +900,7 @@ def elastic_bounding_box(
900900
displacement = displacement.to(bounding_box.device)
901901

902902
original_shape = bounding_box.shape
903-
bounding_box = convert_bounding_box_format(
903+
bounding_box = convert_format_bounding_box(
904904
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
905905
).view(-1, 4)
906906

@@ -926,7 +926,7 @@ def elastic_bounding_box(
926926
out_bbox_maxs, _ = torch.max(transformed_points, dim=1)
927927
out_bboxes = torch.cat([out_bbox_mins, out_bbox_maxs], dim=1)
928928

929-
return convert_bounding_box_format(
929+
return convert_format_bounding_box(
930930
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
931931
).view(original_shape)
932932

torchvision/prototype/transforms/functional/_meta.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor) -> torch.Tensor:
7979
return torch.stack((cx, cy, w, h), dim=-1)
8080

8181

82-
def convert_bounding_box_format(
82+
def convert_format_bounding_box(
8383
bounding_box: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, copy: bool = True
8484
) -> torch.Tensor:
8585
if new_format == old_format:
@@ -106,10 +106,10 @@ def clamp_bounding_box(
106106
) -> torch.Tensor:
107107
# TODO: (PERF) Possible speed up clamping if we have different implementations for each bbox format.
108108
# Not sure if they yield equivalent results.
109-
xyxy_boxes = convert_bounding_box_format(bounding_box, format, BoundingBoxFormat.XYXY)
109+
xyxy_boxes = convert_format_bounding_box(bounding_box, format, BoundingBoxFormat.XYXY)
110110
xyxy_boxes[..., 0::2].clamp_(min=0, max=image_size[1])
111111
xyxy_boxes[..., 1::2].clamp_(min=0, max=image_size[0])
112-
return convert_bounding_box_format(xyxy_boxes, BoundingBoxFormat.XYXY, format, copy=False)
112+
return convert_format_bounding_box(xyxy_boxes, BoundingBoxFormat.XYXY, format, copy=False)
113113

114114

115115
def _split_alpha(image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:

0 commit comments

Comments
 (0)