|
19 | 19 | import pathlib
|
20 | 20 | import pickle
|
21 | 21 | from torchvision import datasets
|
| 22 | +import torch |
22 | 23 |
|
23 | 24 |
|
24 | 25 | try:
|
@@ -470,6 +471,84 @@ def test_repr_smoke(self):
|
470 | 471 | self.assertIsInstance(repr(dataset), str)
|
471 | 472 |
|
472 | 473 |
|
| 474 | +class Caltech101TestCase(datasets_utils.ImageDatasetTestCase): |
| 475 | + DATASET_CLASS = datasets.Caltech101 |
| 476 | + FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple)) |
| 477 | + |
| 478 | + CONFIGS = datasets_utils.combinations_grid(target_type=("category", "annotation", ["category", "annotation"])) |
| 479 | + REQUIRED_PACKAGES = ("scipy",) |
| 480 | + |
| 481 | + def inject_fake_data(self, tmpdir, config): |
| 482 | + root = pathlib.Path(tmpdir) / "caltech101" |
| 483 | + images = root / "101_ObjectCategories" |
| 484 | + annotations = root / "Annotations" |
| 485 | + |
| 486 | + categories = (("Faces", "Faces_2"), ("helicopter", "helicopter"), ("ying_yang", "ying_yang")) |
| 487 | + num_images_per_category = 2 |
| 488 | + |
| 489 | + for image_category, annotation_category in categories: |
| 490 | + datasets_utils.create_image_folder( |
| 491 | + root=images, |
| 492 | + name=image_category, |
| 493 | + file_name_fn=lambda idx: f"image_{idx + 1:04d}.jpg", |
| 494 | + num_examples=num_images_per_category, |
| 495 | + ) |
| 496 | + self._create_annotation_folder( |
| 497 | + root=annotations, |
| 498 | + name=annotation_category, |
| 499 | + file_name_fn=lambda idx: f"annotation_{idx + 1:04d}.mat", |
| 500 | + num_examples=num_images_per_category, |
| 501 | + ) |
| 502 | + |
| 503 | + # This is included in the original archive, but is removed by the dataset. Thus, an empty directory suffices. |
| 504 | + os.makedirs(images / "BACKGROUND_Google") |
| 505 | + |
| 506 | + return num_images_per_category * len(categories) |
| 507 | + |
| 508 | + def _create_annotation_folder(self, root, name, file_name_fn, num_examples): |
| 509 | + root = pathlib.Path(root) / name |
| 510 | + os.makedirs(root) |
| 511 | + |
| 512 | + for idx in range(num_examples): |
| 513 | + self._create_annotation_file(root, file_name_fn(idx)) |
| 514 | + |
| 515 | + def _create_annotation_file(self, root, name): |
| 516 | + mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy()) |
| 517 | + datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict) |
| 518 | + |
| 519 | + def test_combined_targets(self): |
| 520 | + target_types = ["category", "annotation"] |
| 521 | + |
| 522 | + individual_targets = [] |
| 523 | + for target_type in target_types: |
| 524 | + with self.create_dataset(target_type=target_type) as (dataset, _): |
| 525 | + _, target = dataset[0] |
| 526 | + individual_targets.append(target) |
| 527 | + |
| 528 | + with self.create_dataset(target_type=target_types) as (dataset, _): |
| 529 | + _, combined_targets = dataset[0] |
| 530 | + |
| 531 | + actual = len(individual_targets) |
| 532 | + expected = len(combined_targets) |
| 533 | + self.assertEqual( |
| 534 | + actual, |
| 535 | + expected, |
| 536 | + f"The number of the returned combined targets does not match the the number targets if requested " |
| 537 | + f"individually: {actual} != {expected}", |
| 538 | + ) |
| 539 | + |
| 540 | + for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets): |
| 541 | + with self.subTest(target_type=target_type): |
| 542 | + actual = type(combined_target) |
| 543 | + expected = type(individual_target) |
| 544 | + self.assertIs( |
| 545 | + actual, |
| 546 | + expected, |
| 547 | + f"Type of the combined target does not match the type of the corresponding individual target: " |
| 548 | + f"{actual} is not {expected}", |
| 549 | + ) |
| 550 | + |
| 551 | + |
473 | 552 | class Caltech256TestCase(datasets_utils.ImageDatasetTestCase):
|
474 | 553 | DATASET_CLASS = datasets.Caltech256
|
475 | 554 |
|
|
0 commit comments