Skip to content

[proto] testing API consistency per transform #6511

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
139 changes: 139 additions & 0 deletions test/test_prototype_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1644,3 +1644,142 @@ def test__transform(self):
assert isinstance(ohe_labels, features.OneHotLabel)
assert ohe_labels.shape == (4, 3)
assert ohe_labels.categories == labels.categories == categories


class TestAPIConsistency:
@pytest.mark.parametrize("antialias", [True, False])
@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8),
PIL.Image.new("RGB", (256, 256), 123),
features.Image(torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8)),
],
)
def test_random_resized_crop(self, antialias, inpt):
from torchvision.transforms import transforms as ref_transforms

size = 224
t_ref = ref_transforms.RandomResizedCrop(size, antialias=antialias)
t = transforms.RandomResizedCrop(size, antialias=antialias)

torch.manual_seed(12)
expected_output = t_ref(inpt)

torch.manual_seed(12)
output = t(inpt)

if isinstance(inpt, PIL.Image.Image):
expected_output = pil_to_tensor(expected_output)
output = pil_to_tensor(output)

torch.testing.assert_close(expected_output, output)

@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8),
PIL.Image.new("RGB", (256, 256), 123),
features.Image(torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8)),
],
)
def test_randaug(self, inpt):
from torchvision.transforms import autoaugment as ref_transforms

interpolation = InterpolationMode.BILINEAR
t_ref = ref_transforms.RandAugment(interpolation=interpolation)
t = transforms.RandAugment(interpolation=interpolation)

torch.manual_seed(12)
expected_output = t_ref(inpt)

torch.manual_seed(12)
output = t(inpt)

if isinstance(inpt, PIL.Image.Image):
expected_output = pil_to_tensor(expected_output)
output = pil_to_tensor(output)

torch.testing.assert_close(expected_output, output)

@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8),
PIL.Image.new("RGB", (256, 256), 123),
features.Image(torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8)),
],
)
def test_trivial_aug(self, inpt):
from torchvision.transforms import autoaugment as ref_transforms

interpolation = InterpolationMode.BILINEAR
t_ref = ref_transforms.TrivialAugmentWide(interpolation=interpolation)
t = transforms.TrivialAugmentWide(interpolation=interpolation)

torch.manual_seed(12)
expected_output = t_ref(inpt)

torch.manual_seed(12)
output = t(inpt)

if isinstance(inpt, PIL.Image.Image):
expected_output = pil_to_tensor(expected_output)
output = pil_to_tensor(output)

torch.testing.assert_close(expected_output, output)

@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8),
PIL.Image.new("RGB", (256, 256), 123),
features.Image(torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8)),
],
)
def test_augmix(self, inpt):
from torchvision.transforms import autoaugment as ref_transforms

interpolation = InterpolationMode.BILINEAR
t_ref = ref_transforms.AugMix(interpolation=interpolation)
t = transforms.AugMix(interpolation=interpolation)

torch.manual_seed(12)
expected_output = t_ref(inpt)

torch.manual_seed(12)
output = t(inpt)

if isinstance(inpt, PIL.Image.Image):
expected_output = pil_to_tensor(expected_output)
output = pil_to_tensor(output)

torch.testing.assert_close(expected_output, output)

@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8),
PIL.Image.new("RGB", (256, 256), 123),
features.Image(torch.randint(0, 256, size=(1, 3, 256, 256), dtype=torch.uint8)),
],
)
def test_aa(self, inpt):
from torchvision.transforms import autoaugment as ref_transforms

interpolation = InterpolationMode.BILINEAR
aa_policy = ref_transforms.AutoAugmentPolicy("imagenet")
t_ref = ref_transforms.AutoAugment(aa_policy, interpolation=interpolation)
t = transforms.AutoAugment(aa_policy, interpolation=interpolation)

torch.manual_seed(12)
expected_output = t_ref(inpt)

torch.manual_seed(12)
output = t(inpt)

if isinstance(inpt, PIL.Image.Image):
expected_output = pil_to_tensor(expected_output)
output = pil_to_tensor(output)

torch.testing.assert_close(expected_output, output)