|
23 | 23 | os.path.dirname(os.path.abspath(__file__)), 'assets', 'grace_hopper_517x606.jpg')
|
24 | 24 |
|
25 | 25 |
|
| 26 | +def cycle_over(objs): |
| 27 | + objs = list(objs) |
| 28 | + for idx, obj in enumerate(objs): |
| 29 | + yield obj, objs[:idx] + objs[idx + 1:] |
| 30 | + |
| 31 | + |
| 32 | +def int_dtypes(): |
| 33 | + yield from iter( |
| 34 | + (torch.uint8, torch.int8, torch.int16, torch.short, torch.int32, torch.int, torch.int64, torch.long,) |
| 35 | + ) |
| 36 | + |
| 37 | + |
| 38 | +def float_dtypes(): |
| 39 | + yield from iter((torch.float32, torch.float, torch.float64, torch.double)) |
| 40 | + |
| 41 | + |
26 | 42 | class Tester(unittest.TestCase):
|
27 | 43 |
|
28 | 44 | def test_crop(self):
|
@@ -510,6 +526,100 @@ def test_to_tensor(self):
|
510 | 526 | output = trans(img)
|
511 | 527 | self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
|
512 | 528 |
|
| 529 | + def test_convert_image_dtype_float_to_float(self): |
| 530 | + for input_dtype, output_dtypes in cycle_over(float_dtypes()): |
| 531 | + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) |
| 532 | + for output_dtype in output_dtypes: |
| 533 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 534 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 535 | + output_image = transform(input_image) |
| 536 | + |
| 537 | + actual_min, actual_max = output_image.tolist() |
| 538 | + desired_min, desired_max = 0.0, 1.0 |
| 539 | + |
| 540 | + self.assertAlmostEqual(actual_min, desired_min) |
| 541 | + self.assertAlmostEqual(actual_max, desired_max) |
| 542 | + |
| 543 | + def test_convert_image_dtype_float_to_int(self): |
| 544 | + for input_dtype in float_dtypes(): |
| 545 | + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) |
| 546 | + for output_dtype in int_dtypes(): |
| 547 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 548 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 549 | + |
| 550 | + if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or ( |
| 551 | + input_dtype == torch.float64 and output_dtype == torch.int64 |
| 552 | + ): |
| 553 | + with self.assertRaises(RuntimeError): |
| 554 | + transform(input_image) |
| 555 | + else: |
| 556 | + output_image = transform(input_image) |
| 557 | + |
| 558 | + actual_min, actual_max = output_image.tolist() |
| 559 | + desired_min, desired_max = 0, torch.iinfo(output_dtype).max |
| 560 | + |
| 561 | + self.assertEqual(actual_min, desired_min) |
| 562 | + self.assertEqual(actual_max, desired_max) |
| 563 | + |
| 564 | + def test_convert_image_dtype_int_to_float(self): |
| 565 | + for input_dtype in int_dtypes(): |
| 566 | + input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype) |
| 567 | + for output_dtype in float_dtypes(): |
| 568 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 569 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 570 | + output_image = transform(input_image) |
| 571 | + |
| 572 | + actual_min, actual_max = output_image.tolist() |
| 573 | + desired_min, desired_max = 0.0, 1.0 |
| 574 | + |
| 575 | + self.assertAlmostEqual(actual_min, desired_min) |
| 576 | + self.assertGreaterEqual(actual_min, desired_min) |
| 577 | + self.assertAlmostEqual(actual_max, desired_max) |
| 578 | + self.assertLessEqual(actual_max, desired_max) |
| 579 | + |
| 580 | + def test_convert_image_dtype_int_to_int(self): |
| 581 | + for input_dtype, output_dtypes in cycle_over(int_dtypes()): |
| 582 | + input_max = torch.iinfo(input_dtype).max |
| 583 | + input_image = torch.tensor((0, input_max), dtype=input_dtype) |
| 584 | + for output_dtype in output_dtypes: |
| 585 | + output_max = torch.iinfo(output_dtype).max |
| 586 | + |
| 587 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 588 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 589 | + output_image = transform(input_image) |
| 590 | + |
| 591 | + actual_min, actual_max = output_image.tolist() |
| 592 | + desired_min, desired_max = 0, output_max |
| 593 | + |
| 594 | + # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details |
| 595 | + if input_max >= output_max: |
| 596 | + error_term = 0 |
| 597 | + else: |
| 598 | + error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1) |
| 599 | + |
| 600 | + self.assertEqual(actual_min, desired_min) |
| 601 | + self.assertEqual(actual_max, desired_max + error_term) |
| 602 | + |
| 603 | + def test_convert_image_dtype_int_to_int_consistency(self): |
| 604 | + for input_dtype, output_dtypes in cycle_over(int_dtypes()): |
| 605 | + input_max = torch.iinfo(input_dtype).max |
| 606 | + input_image = torch.tensor((0, input_max), dtype=input_dtype) |
| 607 | + for output_dtype in output_dtypes: |
| 608 | + output_max = torch.iinfo(output_dtype).max |
| 609 | + if output_max <= input_max: |
| 610 | + continue |
| 611 | + |
| 612 | + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): |
| 613 | + transform = transforms.ConvertImageDtype(output_dtype) |
| 614 | + inverse_transfrom = transforms.ConvertImageDtype(input_dtype) |
| 615 | + output_image = inverse_transfrom(transform(input_image)) |
| 616 | + |
| 617 | + actual_min, actual_max = output_image.tolist() |
| 618 | + desired_min, desired_max = 0, input_max |
| 619 | + |
| 620 | + self.assertEqual(actual_min, desired_min) |
| 621 | + self.assertEqual(actual_max, desired_max) |
| 622 | + |
513 | 623 | @unittest.skipIf(accimage is None, 'accimage not available')
|
514 | 624 | def test_accimage_to_tensor(self):
|
515 | 625 | trans = transforms.ToTensor()
|
|
0 commit comments