Skip to content

Commit 211f294

Browse files
committed
Remove relative imports.
1 parent ad60b50 commit 211f294

File tree

14 files changed

+122
-133
lines changed

14 files changed

+122
-133
lines changed
Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,15 @@
11
from ._bounding_box import BoundingBox, BoundingBoxFormat
22
from ._encoded import EncodedData, EncodedImage, EncodedVideo
3-
from ._feature import _Feature, is_simple_tensor
4-
from ._image import ColorSpace, Image
3+
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
4+
from ._image import (
5+
ColorSpace,
6+
Image,
7+
ImageType,
8+
ImageTypeJIT,
9+
LegacyImageType,
10+
LegacyImageTypeJIT,
11+
TensorImageType,
12+
TensorImageTypeJIT,
13+
)
514
from ._label import Label, OneHotLabel
615
from ._mask import Mask

torchvision/prototype/transforms/_augment.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
from torchvision.prototype import features
1111
from torchvision.prototype.transforms import functional as F, InterpolationMode
1212

13-
from ..features._image import ImageType, TensorImageType
14-
1513
from ._transform import _RandomApplyTransform
1614
from ._utils import has_any, query_chw
1715

@@ -94,7 +92,7 @@ def _get_params(self, sample: Any) -> Dict[str, Any]:
9492

9593
return dict(i=i, j=j, h=h, w=w, v=v)
9694

97-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> ImageType:
95+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType:
9896
if params["v"] is not None:
9997
inpt = F.erase(inpt, **params, inplace=self.inplace)
10098

@@ -205,15 +203,15 @@ def __init__(
205203

206204
def _copy_paste(
207205
self,
208-
image: TensorImageType,
206+
image: features.TensorImageType,
209207
target: Dict[str, Any],
210-
paste_image: TensorImageType,
208+
paste_image: features.TensorImageType,
211209
paste_target: Dict[str, Any],
212210
random_selection: torch.Tensor,
213211
blending: bool,
214212
resize_interpolation: F.InterpolationMode,
215213
antialias: Optional[bool],
216-
) -> Tuple[TensorImageType, Dict[str, Any]]:
214+
) -> Tuple[features.TensorImageType, Dict[str, Any]]:
217215

218216
paste_masks = paste_target["masks"].new_like(paste_target["masks"], paste_target["masks"][random_selection])
219217
paste_boxes = paste_target["boxes"].new_like(paste_target["boxes"], paste_target["boxes"][random_selection])
@@ -280,7 +278,9 @@ def _copy_paste(
280278

281279
return image, out_target
282280

283-
def _extract_image_targets(self, flat_sample: List[Any]) -> Tuple[List[TensorImageType], List[Dict[str, Any]]]:
281+
def _extract_image_targets(
282+
self, flat_sample: List[Any]
283+
) -> Tuple[List[features.TensorImageType], List[Dict[str, Any]]]:
284284
# fetch all images, bboxes, masks and labels from unstructured input
285285
# with List[image], List[BoundingBox], List[Mask], List[Label]
286286
images, bboxes, masks, labels = [], [], [], []
@@ -309,7 +309,10 @@ def _extract_image_targets(self, flat_sample: List[Any]) -> Tuple[List[TensorIma
309309
return images, targets
310310

311311
def _insert_outputs(
312-
self, flat_sample: List[Any], output_images: List[TensorImageType], output_targets: List[Dict[str, Any]]
312+
self,
313+
flat_sample: List[Any],
314+
output_images: List[features.TensorImageType],
315+
output_targets: List[Dict[str, Any]],
313316
) -> None:
314317
c0, c1, c2, c3 = 0, 0, 0, 0
315318
for i, obj in enumerate(flat_sample):

torchvision/prototype/transforms/_auto_augment.py

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99
from torchvision.prototype.transforms import AutoAugmentPolicy, functional as F, InterpolationMode, Transform
1010
from torchvision.prototype.transforms.functional._meta import get_chw
1111

12-
from ..features._feature import FillType
13-
from ..features._image import ImageType
14-
1512
from ._utils import _isinstance, _setup_fill_arg
1613

1714
K = TypeVar("K")
@@ -23,7 +20,7 @@ def __init__(
2320
self,
2421
*,
2522
interpolation: InterpolationMode = InterpolationMode.NEAREST,
26-
fill: Union[FillType, Dict[Type, FillType]] = None,
23+
fill: Union[features.FillType, Dict[Type, features.FillType]] = None,
2724
) -> None:
2825
super().__init__()
2926
self.interpolation = interpolation
@@ -38,7 +35,7 @@ def _extract_image(
3835
self,
3936
sample: Any,
4037
unsupported_types: Tuple[Type, ...] = (features.BoundingBox, features.Mask),
41-
) -> Tuple[int, ImageType]:
38+
) -> Tuple[int, features.ImageType]:
4239
sample_flat, _ = tree_flatten(sample)
4340
images = []
4441
for id, inpt in enumerate(sample_flat):
@@ -62,12 +59,12 @@ def _put_into_sample(self, sample: Any, id: int, item: Any) -> Any:
6259

6360
def _apply_image_transform(
6461
self,
65-
image: ImageType,
62+
image: features.ImageType,
6663
transform_id: str,
6764
magnitude: float,
6865
interpolation: InterpolationMode,
69-
fill: Dict[Type, FillType],
70-
) -> ImageType:
66+
fill: Dict[Type, features.FillType],
67+
) -> features.ImageType:
7168
fill_ = fill[type(image)]
7269
fill_ = F._geometry._convert_fill_arg(fill_)
7370

@@ -180,7 +177,7 @@ def __init__(
180177
self,
181178
policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
182179
interpolation: InterpolationMode = InterpolationMode.NEAREST,
183-
fill: Union[FillType, Dict[Type, FillType]] = None,
180+
fill: Union[features.FillType, Dict[Type, features.FillType]] = None,
184181
) -> None:
185182
super().__init__(interpolation=interpolation, fill=fill)
186183
self.policy = policy
@@ -340,7 +337,7 @@ def __init__(
340337
magnitude: int = 9,
341338
num_magnitude_bins: int = 31,
342339
interpolation: InterpolationMode = InterpolationMode.NEAREST,
343-
fill: Union[FillType, Dict[Type, FillType]] = None,
340+
fill: Union[features.FillType, Dict[Type, features.FillType]] = None,
344341
) -> None:
345342
super().__init__(interpolation=interpolation, fill=fill)
346343
self.num_ops = num_ops
@@ -396,7 +393,7 @@ def __init__(
396393
self,
397394
num_magnitude_bins: int = 31,
398395
interpolation: InterpolationMode = InterpolationMode.NEAREST,
399-
fill: Union[FillType, Dict[Type, FillType]] = None,
396+
fill: Union[features.FillType, Dict[Type, features.FillType]] = None,
400397
):
401398
super().__init__(interpolation=interpolation, fill=fill)
402399
self.num_magnitude_bins = num_magnitude_bins
@@ -456,7 +453,7 @@ def __init__(
456453
alpha: float = 1.0,
457454
all_ops: bool = True,
458455
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
459-
fill: Union[FillType, Dict[Type, FillType]] = None,
456+
fill: Union[features.FillType, Dict[Type, features.FillType]] = None,
460457
) -> None:
461458
super().__init__(interpolation=interpolation, fill=fill)
462459
self._PARAMETER_MAX = 10

torchvision/prototype/transforms/_color.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
from torchvision.prototype import features
77
from torchvision.prototype.transforms import functional as F, Transform
88

9-
from ..features._image import ImageType
10-
119
from ._transform import _RandomApplyTransform
1210
from ._utils import query_chw
1311

@@ -112,7 +110,7 @@ def _get_params(self, sample: Any) -> Dict[str, Any]:
112110
channel_permutation=torch.randperm(num_channels) if torch.rand(()) < self.p else None,
113111
)
114112

115-
def _permute_channels(self, inpt: ImageType, permutation: torch.Tensor) -> ImageType:
113+
def _permute_channels(self, inpt: features.ImageType, permutation: torch.Tensor) -> features.ImageType:
116114
if isinstance(inpt, PIL.Image.Image):
117115
inpt = F.pil_to_tensor(inpt)
118116

@@ -125,7 +123,7 @@ def _permute_channels(self, inpt: ImageType, permutation: torch.Tensor) -> Image
125123

126124
return output
127125

128-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> ImageType:
126+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType:
129127
if params["brightness"]:
130128
inpt = F.adjust_brightness(
131129
inpt, brightness_factor=ColorJitter._generate_value(self.brightness[0], self.brightness[1])

torchvision/prototype/transforms/_deprecated.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
from torchvision.transforms import functional as _F
1111
from typing_extensions import Literal
1212

13-
from ..features._image import ImageType
14-
1513
from ._transform import _RandomApplyTransform
1614
from ._utils import query_chw
1715

@@ -54,7 +52,7 @@ def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
5452
super().__init__()
5553
self.num_output_channels = num_output_channels
5654

57-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> ImageType:
55+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType:
5856
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
5957
if isinstance(inpt, features.Image):
6058
output = features.Image.new_like(inpt, output, color_space=features.ColorSpace.GRAY)
@@ -83,7 +81,7 @@ def _get_params(self, sample: Any) -> Dict[str, Any]:
8381
num_input_channels, _, _ = query_chw(sample)
8482
return dict(num_input_channels=num_input_channels)
8583

86-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> ImageType:
84+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType:
8785
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
8886
if isinstance(inpt, features.Image):
8987
output = features.Image.new_like(inpt, output, color_space=features.ColorSpace.GRAY)

torchvision/prototype/transforms/_geometry.py

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,6 @@
1212

1313
from typing_extensions import Literal
1414

15-
from ..features._feature import FillType
16-
from ..features._image import ImageType
17-
1815
from ._transform import _RandomApplyTransform
1916
from ._utils import (
2017
_check_padding_arg,
@@ -181,8 +178,8 @@ def __init__(self, size: Union[int, Sequence[int]]) -> None:
181178
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
182179

183180
def _transform(
184-
self, inpt: ImageType, params: Dict[str, Any]
185-
) -> Tuple[ImageType, ImageType, ImageType, ImageType, ImageType]:
181+
self, inpt: features.ImageType, params: Dict[str, Any]
182+
) -> Tuple[features.ImageType, features.ImageType, features.ImageType, features.ImageType, features.ImageType]:
186183
return F.five_crop(inpt, self.size)
187184

188185
def forward(self, *inputs: Any) -> Any:
@@ -203,7 +200,7 @@ def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False)
203200
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
204201
self.vertical_flip = vertical_flip
205202

206-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> List[ImageType]:
203+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> List[features.ImageType]:
207204
return F.ten_crop(inpt, self.size, vertical_flip=self.vertical_flip)
208205

209206
def forward(self, *inputs: Any) -> Any:
@@ -216,7 +213,7 @@ class Pad(Transform):
216213
def __init__(
217214
self,
218215
padding: Union[int, Sequence[int]],
219-
fill: Union[FillType, Dict[Type, FillType]] = 0,
216+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
220217
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
221218
) -> None:
222219
super().__init__()
@@ -243,7 +240,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
243240
class RandomZoomOut(_RandomApplyTransform):
244241
def __init__(
245242
self,
246-
fill: Union[FillType, Dict[Type, FillType]] = 0,
243+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
247244
side_range: Sequence[float] = (1.0, 4.0),
248245
p: float = 0.5,
249246
) -> None:
@@ -285,7 +282,7 @@ def __init__(
285282
degrees: Union[numbers.Number, Sequence],
286283
interpolation: InterpolationMode = InterpolationMode.NEAREST,
287284
expand: bool = False,
288-
fill: Union[FillType, Dict[Type, FillType]] = 0,
285+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
289286
center: Optional[List[float]] = None,
290287
) -> None:
291288
super().__init__()
@@ -325,7 +322,7 @@ def __init__(
325322
scale: Optional[Sequence[float]] = None,
326323
shear: Optional[Union[float, Sequence[float]]] = None,
327324
interpolation: InterpolationMode = InterpolationMode.NEAREST,
328-
fill: Union[FillType, Dict[Type, FillType]] = 0,
325+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
329326
center: Optional[List[float]] = None,
330327
) -> None:
331328
super().__init__()
@@ -404,7 +401,7 @@ def __init__(
404401
size: Union[int, Sequence[int]],
405402
padding: Optional[Union[int, Sequence[int]]] = None,
406403
pad_if_needed: bool = False,
407-
fill: Union[FillType, Dict[Type, FillType]] = 0,
404+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
408405
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
409406
) -> None:
410407
super().__init__()
@@ -494,7 +491,7 @@ class RandomPerspective(_RandomApplyTransform):
494491
def __init__(
495492
self,
496493
distortion_scale: float = 0.5,
497-
fill: Union[FillType, Dict[Type, FillType]] = 0,
494+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
498495
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
499496
p: float = 0.5,
500497
) -> None:
@@ -570,7 +567,7 @@ def __init__(
570567
self,
571568
alpha: Union[float, Sequence[float]] = 50.0,
572569
sigma: Union[float, Sequence[float]] = 5.0,
573-
fill: Union[FillType, Dict[Type, FillType]] = 0,
570+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
574571
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
575572
) -> None:
576573
super().__init__()
@@ -783,7 +780,7 @@ class FixedSizeCrop(Transform):
783780
def __init__(
784781
self,
785782
size: Union[int, Sequence[int]],
786-
fill: Union[FillType, Dict[Type, FillType]] = 0,
783+
fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,
787784
padding_mode: str = "constant",
788785
) -> None:
789786
super().__init__()

torchvision/prototype/transforms/_meta.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,6 @@
66
from torchvision.prototype import features
77
from torchvision.prototype.transforms import functional as F, Transform
88

9-
from ..features._image import ImageType, TensorImageType
10-
119

1210
class ConvertBoundingBoxFormat(Transform):
1311
_transformed_types = (features.BoundingBox,)
@@ -30,7 +28,7 @@ def __init__(self, dtype: torch.dtype = torch.float32) -> None:
3028
super().__init__()
3129
self.dtype = dtype
3230

33-
def _transform(self, inpt: TensorImageType, params: Dict[str, Any]) -> TensorImageType:
31+
def _transform(self, inpt: features.TensorImageType, params: Dict[str, Any]) -> features.TensorImageType:
3432
output = F.convert_image_dtype(inpt, dtype=self.dtype)
3533
return output if features.is_simple_tensor(inpt) else features.Image.new_like(inpt, output, dtype=self.dtype) # type: ignore[arg-type]
3634

@@ -56,7 +54,7 @@ def __init__(
5654

5755
self.copy = copy
5856

59-
def _transform(self, inpt: ImageType, params: Dict[str, Any]) -> ImageType:
57+
def _transform(self, inpt: features.ImageType, params: Dict[str, Any]) -> features.ImageType:
6058
return F.convert_color_space(
6159
inpt, color_space=self.color_space, old_color_space=self.old_color_space, copy=self.copy
6260
)

torchvision/prototype/transforms/_misc.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
from torchvision.prototype import features
99
from torchvision.prototype.transforms import functional as F, Transform
1010

11-
from ..features._image import TensorImageType
12-
1311
from ._utils import _setup_size, has_any, query_bounding_box
1412

1513

@@ -70,7 +68,7 @@ def forward(self, *inputs: Any) -> Any:
7068

7169
return super().forward(*inputs)
7270

73-
def _transform(self, inpt: TensorImageType, params: Dict[str, Any]) -> torch.Tensor:
71+
def _transform(self, inpt: features.TensorImageType, params: Dict[str, Any]) -> torch.Tensor:
7472
# Image instance after linear transformation is not Image anymore due to unknown data range
7573
# Thus we will return Tensor for input Image
7674

@@ -103,7 +101,7 @@ def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool =
103101
self.std = list(std)
104102
self.inplace = inplace
105103

106-
def _transform(self, inpt: TensorImageType, params: Dict[str, Any]) -> torch.Tensor:
104+
def _transform(self, inpt: features.TensorImageType, params: Dict[str, Any]) -> torch.Tensor:
107105
return F.normalize(inpt, mean=self.mean, std=self.std, inplace=self.inplace)
108106

109107
def forward(self, *inpts: Any) -> Any:

torchvision/prototype/transforms/functional/_augment.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@
55
from torchvision.transforms import functional_tensor as _FT
66
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
77

8-
from ...features._image import ImageTypeJIT
9-
108
erase_image_tensor = _FT.erase
119

1210

@@ -20,14 +18,14 @@ def erase_image_pil(
2018

2119

2220
def erase(
23-
inpt: ImageTypeJIT,
21+
inpt: features.ImageTypeJIT,
2422
i: int,
2523
j: int,
2624
h: int,
2725
w: int,
2826
v: torch.Tensor,
2927
inplace: bool = False,
30-
) -> ImageTypeJIT:
28+
) -> features.ImageTypeJIT:
3129
if isinstance(inpt, torch.Tensor):
3230
output = erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
3331
if not torch.jit.is_scripting() and isinstance(inpt, features.Image):

0 commit comments

Comments
 (0)