diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 7bd15dde4c2..d36e68c2b6f 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -346,6 +346,12 @@ def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = Inte If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + .. warning:: + The output image might be different depending on its type: when downsampling, the interpolation of PIL images + and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences + in the performance of a network. Therefore, it is preferable to train and serve a model with the same input + types. + Args: img (PIL Image or Tensor): Image to be resized. size (sequence or int): Desired output size. If size is a sequence like diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index 2c4a10598b4..7c25b000ce8 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -229,6 +229,12 @@ class Resize(torch.nn.Module): If the image is torch Tensor, it is expected to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + .. warning:: + The output image might be different depending on its type: when downsampling, the interpolation of PIL images + and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences + in the performance of a network. Therefore, it is preferable to train and serve a model with the same input + types. + Args: size (sequence or int): Desired output size. If size is a sequence like (h, w), output size will be matched to this. If size is an int,