Skip to content

Commit 2399c0a

Browse files
NicolasHugfacebook-github-bot
authored andcommitted
[fbsync] separate transforms v2 legacy test utils (#7842)
Summary: (Note: this ignores all push blocking failures!) Reviewed By: matteobettini Differential Revision: D48900374 fbshipit-source-id: 69750e3297c88df98e5bb523364317ca05ec4722
1 parent 1178cb2 commit 2399c0a

12 files changed

+754
-594
lines changed

test/common_utils.py

+4-469
Large diffs are not rendered by default.

test/prototype_common_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@
44

55
import pytest
66
import torch
7-
8-
from common_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader
97
from torch.nn.functional import one_hot
108

119
from torchvision.prototype import datapoints
1210

11+
from transforms_v2_legacy_utils import combinations_grid, DEFAULT_EXTRA_DIMS, from_loader, from_loaders, TensorLoader
12+
1313

1414
@dataclasses.dataclass
1515
class LabelLoader(TensorLoader):

test/test_datapoints.py

+13-13
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import pytest
44
import torch
5-
from common_utils import assert_equal, make_bounding_box, make_image, make_segmentation_mask, make_video
5+
from common_utils import assert_equal, make_bounding_boxes, make_image, make_segmentation_mask, make_video
66
from PIL import Image
77

88
from torchvision import datapoints
@@ -68,7 +68,7 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad):
6868
assert datapoint.requires_grad is expected_requires_grad
6969

7070

71-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
71+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
7272
def test_isinstance(make_input):
7373
assert isinstance(make_input(), torch.Tensor)
7474

@@ -80,7 +80,7 @@ def test_wrapping_no_copy():
8080
assert image.data_ptr() == tensor.data_ptr()
8181

8282

83-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
83+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
8484
def test_to_wrapping(make_input):
8585
dp = make_input()
8686

@@ -90,7 +90,7 @@ def test_to_wrapping(make_input):
9090
assert dp_to.dtype is torch.float64
9191

9292

93-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
93+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
9494
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
9595
def test_to_datapoint_reference(make_input, return_type):
9696
tensor = torch.rand((3, 16, 16), dtype=torch.float64)
@@ -104,7 +104,7 @@ def test_to_datapoint_reference(make_input, return_type):
104104
assert type(tensor) is torch.Tensor
105105

106106

107-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
107+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
108108
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
109109
def test_clone_wrapping(make_input, return_type):
110110
dp = make_input()
@@ -116,7 +116,7 @@ def test_clone_wrapping(make_input, return_type):
116116
assert dp_clone.data_ptr() != dp.data_ptr()
117117

118118

119-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
119+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
120120
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
121121
def test_requires_grad__wrapping(make_input, return_type):
122122
dp = make_input(dtype=torch.float)
@@ -131,7 +131,7 @@ def test_requires_grad__wrapping(make_input, return_type):
131131
assert dp_requires_grad.requires_grad
132132

133133

134-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
134+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
135135
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
136136
def test_detach_wrapping(make_input, return_type):
137137
dp = make_input(dtype=torch.float).requires_grad_(True)
@@ -170,7 +170,7 @@ def test_force_subclass_with_metadata(return_type):
170170
datapoints.set_return_type("tensor")
171171

172172

173-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
173+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
174174
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
175175
def test_other_op_no_wrapping(make_input, return_type):
176176
dp = make_input()
@@ -182,7 +182,7 @@ def test_other_op_no_wrapping(make_input, return_type):
182182
assert type(output) is (type(dp) if return_type == "datapoint" else torch.Tensor)
183183

184184

185-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
185+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
186186
@pytest.mark.parametrize(
187187
"op",
188188
[
@@ -199,7 +199,7 @@ def test_no_tensor_output_op_no_wrapping(make_input, op):
199199
assert type(output) is not type(dp)
200200

201201

202-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
202+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
203203
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
204204
def test_inplace_op_no_wrapping(make_input, return_type):
205205
dp = make_input()
@@ -212,7 +212,7 @@ def test_inplace_op_no_wrapping(make_input, return_type):
212212
assert type(dp) is original_type
213213

214214

215-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
215+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
216216
def test_wrap(make_input):
217217
dp = make_input()
218218

@@ -225,7 +225,7 @@ def test_wrap(make_input):
225225
assert dp_new.data_ptr() == output.data_ptr()
226226

227227

228-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
228+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
229229
@pytest.mark.parametrize("requires_grad", [False, True])
230230
def test_deepcopy(make_input, requires_grad):
231231
dp = make_input(dtype=torch.float)
@@ -242,7 +242,7 @@ def test_deepcopy(make_input, requires_grad):
242242
assert dp_deepcopied.requires_grad is requires_grad
243243

244244

245-
@pytest.mark.parametrize("make_input", [make_image, make_bounding_box, make_segmentation_mask, make_video])
245+
@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video])
246246
@pytest.mark.parametrize("return_type", ["Tensor", "datapoint"])
247247
@pytest.mark.parametrize(
248248
"op",

test/test_prototype_transforms.py

+16-16
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,21 @@
44
import pytest
55
import torch
66

7-
from common_utils import (
8-
assert_equal,
9-
DEFAULT_EXTRA_DIMS,
10-
make_bounding_box,
11-
make_detection_mask,
12-
make_image,
13-
make_video,
14-
)
7+
from common_utils import assert_equal
158

169
from prototype_common_utils import make_label
1710

1811
from torchvision.datapoints import BoundingBoxes, BoundingBoxFormat, Image, Mask, Video
1912
from torchvision.prototype import datapoints, transforms
2013
from torchvision.transforms.v2.functional import clamp_bounding_boxes, InterpolationMode, pil_to_tensor, to_pil_image
2114
from torchvision.transforms.v2.utils import check_type, is_pure_tensor
15+
from transforms_v2_legacy_utils import (
16+
DEFAULT_EXTRA_DIMS,
17+
make_bounding_boxes,
18+
make_detection_mask,
19+
make_image,
20+
make_video,
21+
)
2222

2323
BATCH_EXTRA_DIMS = [extra_dims for extra_dims in DEFAULT_EXTRA_DIMS if extra_dims]
2424

@@ -167,7 +167,7 @@ def test__get_params(self, mocker):
167167

168168
flat_inputs = [
169169
make_image(size=canvas_size, color_space="RGB"),
170-
make_bounding_box(format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=batch_shape),
170+
make_bounding_boxes(format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=batch_shape),
171171
]
172172
params = transform._get_params(flat_inputs)
173173

@@ -202,7 +202,7 @@ def test__transform_culling(self, mocker):
202202
),
203203
)
204204

205-
bounding_boxes = make_bounding_box(
205+
bounding_boxes = make_bounding_boxes(
206206
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
207207
)
208208
masks = make_detection_mask(size=canvas_size, batch_dims=(batch_size,))
@@ -240,7 +240,7 @@ def test__transform_bounding_boxes_clamping(self, mocker):
240240
),
241241
)
242242

243-
bounding_boxes = make_bounding_box(
243+
bounding_boxes = make_bounding_boxes(
244244
format=BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(batch_size,)
245245
)
246246
mock = mocker.patch(
@@ -283,7 +283,7 @@ class TestPermuteDimensions:
283283
def test_call(self, dims, inverse_dims):
284284
sample = dict(
285285
image=make_image(),
286-
bounding_boxes=make_bounding_box(format=BoundingBoxFormat.XYXY),
286+
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
287287
video=make_video(),
288288
str="str",
289289
int=0,
@@ -327,7 +327,7 @@ class TestTransposeDimensions:
327327
def test_call(self, dims):
328328
sample = dict(
329329
image=make_image(),
330-
bounding_boxes=make_bounding_box(format=BoundingBoxFormat.XYXY),
330+
bounding_boxes=make_bounding_boxes(format=BoundingBoxFormat.XYXY),
331331
video=make_video(),
332332
str="str",
333333
int=0,
@@ -389,7 +389,7 @@ def make_datapoints():
389389

390390
pil_image = to_pil_image(make_image(size=size, color_space="RGB"))
391391
target = {
392-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
392+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
393393
"labels": make_label(extra_dims=(num_objects,), categories=80),
394394
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
395395
}
@@ -398,7 +398,7 @@ def make_datapoints():
398398

399399
tensor_image = torch.Tensor(make_image(size=size, color_space="RGB"))
400400
target = {
401-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
401+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
402402
"labels": make_label(extra_dims=(num_objects,), categories=80),
403403
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
404404
}
@@ -407,7 +407,7 @@ def make_datapoints():
407407

408408
datapoint_image = make_image(size=size, color_space="RGB")
409409
target = {
410-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
410+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
411411
"labels": make_label(extra_dims=(num_objects,), categories=80),
412412
"masks": make_detection_mask(size=size, num_objects=num_objects, dtype=torch.long),
413413
}

test/test_transforms_v2.py

+14-16
Original file line numberDiff line numberDiff line change
@@ -11,25 +11,23 @@
1111
import torch
1212
import torchvision.transforms.v2 as transforms
1313

14-
from common_utils import (
15-
assert_equal,
16-
assert_run_python_script,
17-
cpu_and_cuda,
18-
make_bounding_box,
14+
from common_utils import assert_equal, assert_run_python_script, cpu_and_cuda
15+
from torch.utils._pytree import tree_flatten, tree_unflatten
16+
from torchvision import datapoints
17+
from torchvision.ops.boxes import box_iou
18+
from torchvision.transforms.functional import to_pil_image
19+
from torchvision.transforms.v2 import functional as F
20+
from torchvision.transforms.v2.utils import check_type, is_pure_tensor, query_chw
21+
from transforms_v2_legacy_utils import (
1922
make_bounding_boxes,
2023
make_detection_mask,
2124
make_image,
2225
make_images,
26+
make_multiple_bounding_boxes,
2327
make_segmentation_mask,
2428
make_video,
2529
make_videos,
2630
)
27-
from torch.utils._pytree import tree_flatten, tree_unflatten
28-
from torchvision import datapoints
29-
from torchvision.ops.boxes import box_iou
30-
from torchvision.transforms.functional import to_pil_image
31-
from torchvision.transforms.v2 import functional as F
32-
from torchvision.transforms.v2.utils import check_type, is_pure_tensor, query_chw
3331

3432

3533
def make_vanilla_tensor_images(*args, **kwargs):
@@ -45,7 +43,7 @@ def make_pil_images(*args, **kwargs):
4543

4644

4745
def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
48-
for bounding_boxes in make_bounding_boxes(*args, **kwargs):
46+
for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
4947
yield bounding_boxes.data
5048

5149

@@ -180,13 +178,13 @@ def test_common(self, transform, adapter, container_type, image_or_video, device
180178
image_datapoint=make_image(size=canvas_size),
181179
video_datapoint=make_video(size=canvas_size),
182180
image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
183-
bounding_boxes_xyxy=make_bounding_box(
181+
bounding_boxes_xyxy=make_bounding_boxes(
184182
format=datapoints.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
185183
),
186-
bounding_boxes_xywh=make_bounding_box(
184+
bounding_boxes_xywh=make_bounding_boxes(
187185
format=datapoints.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
188186
),
189-
bounding_boxes_cxcywh=make_bounding_box(
187+
bounding_boxes_cxcywh=make_bounding_boxes(
190188
format=datapoints.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
191189
),
192190
bounding_boxes_degenerate_xyxy=datapoints.BoundingBoxes(
@@ -813,7 +811,7 @@ def test__transform(self, mocker):
813811

814812
size = (32, 24)
815813
image = make_image(size)
816-
bboxes = make_bounding_box(format="XYXY", canvas_size=size, batch_dims=(6,))
814+
bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
817815
masks = make_detection_mask(size, num_objects=6)
818816

819817
sample = [image, bboxes, masks]

test/test_transforms_v2_consistency.py

+12-14
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,7 @@
1212

1313
import torch
1414
import torchvision.transforms.v2 as v2_transforms
15-
from common_utils import (
16-
ArgsKwargs,
17-
assert_close,
18-
assert_equal,
19-
make_bounding_box,
20-
make_detection_mask,
21-
make_image,
22-
make_images,
23-
make_segmentation_mask,
24-
set_rng_seed,
25-
)
15+
from common_utils import assert_close, assert_equal, set_rng_seed
2616
from torch import nn
2717
from torchvision import datapoints, transforms as legacy_transforms
2818
from torchvision._utils import sequence_to_str
@@ -32,6 +22,14 @@
3222
from torchvision.transforms.v2._utils import _get_fill
3323
from torchvision.transforms.v2.functional import to_pil_image
3424
from torchvision.transforms.v2.utils import query_size
25+
from transforms_v2_legacy_utils import (
26+
ArgsKwargs,
27+
make_bounding_boxes,
28+
make_detection_mask,
29+
make_image,
30+
make_images,
31+
make_segmentation_mask,
32+
)
3533

3634
DEFAULT_MAKE_IMAGES_KWARGS = dict(color_spaces=["RGB"], extra_dims=[(4,)])
3735

@@ -1090,7 +1088,7 @@ def make_label(extra_dims, categories):
10901088

10911089
pil_image = to_pil_image(make_image(size=size, color_space="RGB"))
10921090
target = {
1093-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
1091+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
10941092
"labels": make_label(extra_dims=(num_objects,), categories=80),
10951093
}
10961094
if with_mask:
@@ -1100,7 +1098,7 @@ def make_label(extra_dims, categories):
11001098

11011099
tensor_image = torch.Tensor(make_image(size=size, color_space="RGB", dtype=torch.float32))
11021100
target = {
1103-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
1101+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
11041102
"labels": make_label(extra_dims=(num_objects,), categories=80),
11051103
}
11061104
if with_mask:
@@ -1110,7 +1108,7 @@ def make_label(extra_dims, categories):
11101108

11111109
datapoint_image = make_image(size=size, color_space="RGB", dtype=torch.float32)
11121110
target = {
1113-
"boxes": make_bounding_box(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
1111+
"boxes": make_bounding_boxes(canvas_size=size, format="XYXY", batch_dims=(num_objects,), dtype=torch.float),
11141112
"labels": make_label(extra_dims=(num_objects,), categories=80),
11151113
}
11161114
if with_mask:

0 commit comments

Comments
 (0)