From 844cc698f3415da11b7c244797fb981bf1f7fd31 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 24 Feb 2023 15:20:06 +0000 Subject: [PATCH 1/2] Change betastatus doc warning --- docs/source/beta_status.py | 15 ++++++-- docs/source/transforms.rst | 8 ++--- torchvision/transforms/v2/_augment.py | 2 +- torchvision/transforms/v2/_auto_augment.py | 8 ++--- torchvision/transforms/v2/_color.py | 20 +++++------ torchvision/transforms/v2/_container.py | 8 ++--- torchvision/transforms/v2/_deprecated.py | 2 +- torchvision/transforms/v2/_geometry.py | 36 +++++++++---------- torchvision/transforms/v2/_meta.py | 6 ++-- torchvision/transforms/v2/_misc.py | 12 +++---- torchvision/transforms/v2/_temporal.py | 2 +- torchvision/transforms/v2/_type_conversion.py | 6 ++-- 12 files changed, 68 insertions(+), 57 deletions(-) diff --git a/docs/source/beta_status.py b/docs/source/beta_status.py index 925894df5c5..4a0fdc72c0f 100644 --- a/docs/source/beta_status.py +++ b/docs/source/beta_status.py @@ -4,15 +4,26 @@ class BetaStatus(Directive): has_content = True + text = "The {api_name} is in Beta stage, and backward compatibility is not guaranteed." def run(self): - api_name = " ".join(self.content) - text = f"The {api_name} is in Beta stage, and backward compatibility is not guaranteed." + text = self.text.format(api_name=" ".join(self.content)) return [nodes.warning("", nodes.paragraph("", "", nodes.Text(text)))] +class V2BetaStatus(BetaStatus): + text = ( + "The {api_name} is in Beta stage, and while we do not expect major breaking changes, " + "some APIs may still change according to user feedback. Please submit any feedback you may have " + "in this issue: https://github.com/pytorch/vision/issues/6753, and you can also check " + "out https://github.com/pytorch/vision/issues/7319 to learn " + "more about the APIs that we suspect might involve future changes." + ) + + def setup(app): app.add_directive("betastatus", BetaStatus) + app.add_directive("v2betastatus", V2BetaStatus) return { "version": "0.1", "parallel_read_safe": True, diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 22e0889a480..0d6961bbe79 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -16,10 +16,10 @@ Transforming and augmenting images :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`. Note that these transforms are still BETA, and while we don't expect major breaking changes in the future, some APIs may still change according to user - feedback. Please submit any feedback you may have in - https://github.com/pytorch/vision/issues/6753, and you can also check out - https://github.com/pytorch/vision/issues/7319 to learn more about the APIs - that we suspect might involve future changes. + feedback. Please submit any feedback you may have `here + `_, and you can also check + out `this issue `_ to learn + more about the APIs that we suspect might involve future changes. Transforms are common image transformations available in the ``torchvision.transforms`` module. They can be chained together using diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 0df7e0f249a..937e3508a87 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -15,7 +15,7 @@ class RandomErasing(_RandomApplyTransform): """[BETA] Randomly select a rectangle region in the input image or video and erase its pixels. - .. betastatus:: RandomErasing transform + .. v2betastatus:: RandomErasing transform This transform does not support PIL Image. 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896 diff --git a/torchvision/transforms/v2/_auto_augment.py b/torchvision/transforms/v2/_auto_augment.py index 2cd88c1a74d..34c0ced43d2 100644 --- a/torchvision/transforms/v2/_auto_augment.py +++ b/torchvision/transforms/v2/_auto_augment.py @@ -165,7 +165,7 @@ class AutoAugment(_AutoAugmentBase): r"""[BETA] AutoAugment data augmentation method based on `"AutoAugment: Learning Augmentation Strategies from Data" `_. - .. betastatus:: AutoAugment transform + .. v2betastatus:: AutoAugment transform This transformation works on images and videos only. @@ -342,7 +342,7 @@ class RandAugment(_AutoAugmentBase): `"RandAugment: Practical automated data augmentation with a reduced search space" `_. - .. betastatus:: RandAugment transform + .. v2betastatus:: RandAugment transform This transformation works on images and videos only. @@ -425,7 +425,7 @@ class TrivialAugmentWide(_AutoAugmentBase): r"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `_. - .. betastatus:: TrivialAugmentWide transform + .. v2betastatus:: TrivialAugmentWide transform This transformation works on images and videos only. @@ -496,7 +496,7 @@ class AugMix(_AutoAugmentBase): r"""[BETA] AugMix data augmentation method based on `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" `_. - .. betastatus:: AugMix transform + .. v2betastatus:: AugMix transform This transformation works on images and videos only. diff --git a/torchvision/transforms/v2/_color.py b/torchvision/transforms/v2/_color.py index 237e8d6181a..4ad534c988b 100644 --- a/torchvision/transforms/v2/_color.py +++ b/torchvision/transforms/v2/_color.py @@ -13,7 +13,7 @@ class Grayscale(Transform): """[BETA] Convert images or videos to grayscale. - .. betastatus:: Grayscale transform + .. v2betastatus:: Grayscale transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions @@ -42,7 +42,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomGrayscale(_RandomApplyTransform): """[BETA] Randomly convert image or videos to grayscale with a probability of p (default 0.1). - .. betastatus:: RandomGrayscale transform + .. v2betastatus:: RandomGrayscale transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions @@ -76,7 +76,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ColorJitter(Transform): """[BETA] Randomly change the brightness, contrast, saturation and hue of an image or video. - .. betastatus:: ColorJitter transform + .. v2betastatus:: ColorJitter transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -182,7 +182,7 @@ class RandomPhotometricDistort(Transform): """[BETA] Randomly distorts the image or video as used in `SSD: Single Shot MultiBox Detector `_. - .. betastatus:: RandomPhotometricDistort transform + .. v2betastatus:: RandomPhotometricDistort transform This transform relies on :class:`~torchvision.transforms.v2.ColorJitter` under the hood to adjust the contrast, saturation, hue, brightness, and also @@ -282,7 +282,7 @@ def _transform( class RandomEqualize(_RandomApplyTransform): """[BETA] Equalize the histogram of the given image or video with a given probability. - .. betastatus:: RandomEqualize transform + .. v2betastatus:: RandomEqualize transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -301,7 +301,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomInvert(_RandomApplyTransform): """[BETA] Inverts the colors of the given image or video with a given probability. - .. betastatus:: RandomInvert transform + .. v2betastatus:: RandomInvert transform If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, where ... means it can have an arbitrary number of leading dimensions. @@ -321,7 +321,7 @@ class RandomPosterize(_RandomApplyTransform): """[BETA] Posterize the image or video with a given probability by reducing the number of bits for each color channel. - .. betastatus:: RandomPosterize transform + .. v2betastatus:: RandomPosterize transform If the input is a :class:`torch.Tensor`, it should be of type torch.uint8, and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -346,7 +346,7 @@ class RandomSolarize(_RandomApplyTransform): """[BETA] Solarize the image or video with a given probability by inverting all pixel values above a threshold. - .. betastatus:: RandomSolarize transform + .. v2betastatus:: RandomSolarize transform If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, where ... means it can have an arbitrary number of leading dimensions. @@ -370,7 +370,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAutocontrast(_RandomApplyTransform): """[BETA] Autocontrast the pixels of the given image or video with a given probability. - .. betastatus:: RandomAutocontrast transform + .. v2betastatus:: RandomAutocontrast transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -389,7 +389,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAdjustSharpness(_RandomApplyTransform): """[BETA] Adjust the sharpness of the image or video with a given probability. - .. betastatus:: RandomAdjustSharpness transform + .. v2betastatus:: RandomAdjustSharpness transform If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. diff --git a/torchvision/transforms/v2/_container.py b/torchvision/transforms/v2/_container.py index 2f34a58902e..fffef4157bd 100644 --- a/torchvision/transforms/v2/_container.py +++ b/torchvision/transforms/v2/_container.py @@ -10,7 +10,7 @@ class Compose(Transform): """[BETA] Composes several transforms together. - .. betastatus:: Compose transform + .. v2betastatus:: Compose transform This transform does not support torchscript. Please, see the note below. @@ -61,7 +61,7 @@ def extra_repr(self) -> str: class RandomApply(Transform): """[BETA] Apply randomly a list of transformations with a given probability. - .. betastatus:: RandomApply transform + .. v2betastatus:: RandomApply transform .. note:: In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of @@ -116,7 +116,7 @@ def extra_repr(self) -> str: class RandomChoice(Transform): """[BETA] Apply single transformation randomly picked from a list. - .. betastatus:: RandomChoice transform + .. v2betastatus:: RandomChoice transform This transform does not support torchscript. @@ -155,7 +155,7 @@ def forward(self, *inputs: Any) -> Any: class RandomOrder(Transform): """[BETA] Apply a list of transformations in a random order. - .. betastatus:: RandomOrder transform + .. v2betastatus:: RandomOrder transform This transform does not support torchscript. diff --git a/torchvision/transforms/v2/_deprecated.py b/torchvision/transforms/v2/_deprecated.py index b5544ecfd49..e900e853d2b 100644 --- a/torchvision/transforms/v2/_deprecated.py +++ b/torchvision/transforms/v2/_deprecated.py @@ -12,7 +12,7 @@ class ToTensor(Transform): """[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly. - .. betastatus:: ToTensor transform + .. v2betastatus:: ToTensor transform .. warning:: :class:`v2.ToTensor` is deprecated and will be removed in a future release. diff --git a/torchvision/transforms/v2/_geometry.py b/torchvision/transforms/v2/_geometry.py index b2618bb892f..59791c30b9d 100644 --- a/torchvision/transforms/v2/_geometry.py +++ b/torchvision/transforms/v2/_geometry.py @@ -28,7 +28,7 @@ class RandomHorizontalFlip(_RandomApplyTransform): """[BETA] Horizontally flip the input with a given probability. - .. betastatus:: RandomHorizontalFlip transform + .. v2betastatus:: RandomHorizontalFlip transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -48,7 +48,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomVerticalFlip(_RandomApplyTransform): """[BETA] Vertically flip the input with a given probability. - .. betastatus:: RandomVerticalFlip transform + .. v2betastatus:: RandomVerticalFlip transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -68,7 +68,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Resize(Transform): """[BETA] Resize the input to the given size. - .. betastatus:: Resize transform + .. v2betastatus:: Resize transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -162,7 +162,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class CenterCrop(Transform): """[BETA] Crop the input at the center. - .. betastatus:: CenterCrop transform + .. v2betastatus:: CenterCrop transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -190,7 +190,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomResizedCrop(Transform): """[BETA] Crop a random portion of the input and resize it to a given size. - .. betastatus:: RandomResizedCrop transform + .. v2betastatus:: RandomResizedCrop transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -316,7 +316,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class FiveCrop(Transform): """[BETA] Crop the image or video into four corners and the central crop. - .. betastatus:: FiveCrop transform + .. v2betastatus:: FiveCrop transform If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a :class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions. @@ -379,7 +379,7 @@ class TenCrop(Transform): """[BETA] Crop the image or video into four corners and the central crop plus the flipped version of these (horizontal flipping is used by default). - .. betastatus:: TenCrop transform + .. v2betastatus:: TenCrop transform If the input is a :class:`torch.Tensor` or a :class:`~torchvision.datapoints.Image` or a :class:`~torchvision.datapoints.Video` it can have arbitrary number of leading batch dimensions. @@ -437,7 +437,7 @@ def _transform( class Pad(Transform): """[BETA] Pad the input on all sides with the given "pad" value. - .. betastatus:: Pad transform + .. v2betastatus:: Pad transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -512,7 +512,7 @@ class RandomZoomOut(_RandomApplyTransform): """[BETA] "Zoom out" transformation from `"SSD: Single Shot MultiBox Detector" `_. - .. betastatus:: RandomZoomOut transform + .. v2betastatus:: RandomZoomOut transform This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect. Output spatial size is randomly sampled from original size up to a maximum size configured @@ -581,7 +581,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomRotation(Transform): """[BETA] Rotate the input by angle. - .. betastatus:: RandomRotation transform + .. v2betastatus:: RandomRotation transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -654,7 +654,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAffine(Transform): """[BETA] Random affine transformation the input keeping center invariant. - .. betastatus:: RandomAffine transform + .. v2betastatus:: RandomAffine transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -775,7 +775,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomCrop(Transform): """[BETA] Crop the input at a random location. - .. betastatus:: RandomCrop transform + .. v2betastatus:: RandomCrop transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -930,7 +930,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomPerspective(_RandomApplyTransform): """[BETA] Perform a random perspective transformation of the input with a given probability. - .. betastatus:: RandomPerspective transform + .. v2betastatus:: RandomPerspective transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -1016,7 +1016,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ElasticTransform(Transform): """[BETA] Transform the input with elastic transformations. - .. betastatus:: RandomPerspective transform + .. v2betastatus:: RandomPerspective transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -1108,7 +1108,7 @@ class RandomIoUCrop(Transform): """[BETA] Random IoU crop transformation from `"SSD: Single Shot MultiBox Detector" `_. - .. betastatus:: RandomIoUCrop transform + .. v2betastatus:: RandomIoUCrop transform This transformation requires an image or video data and ``datapoints.BoundingBox`` in the input. @@ -1232,7 +1232,7 @@ class ScaleJitter(Transform): """[BETA] Perform Large Scale Jitter on the input according to `"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" `_. - .. betastatus:: ScaleJitter transform + .. v2betastatus:: ScaleJitter transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -1298,7 +1298,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomShortestSize(Transform): """[BETA] Randomly resize the input. - .. betastatus:: RandomShortestSize transform + .. v2betastatus:: RandomShortestSize transform If the input is a :class:`torch.Tensor` or a ``Datapoint`` (e.g. :class:`~torchvision.datapoints.Image`, :class:`~torchvision.datapoints.Video`, :class:`~torchvision.datapoints.BoundingBox` etc.) @@ -1366,7 +1366,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomResize(Transform): """[BETA] Randomly resize the input. - .. betastatus:: RandomResize transform + .. v2betastatus:: RandomResize transform This transformation can be used together with ``RandomCrop`` as data augmentations to train models on image segmentation task. diff --git a/torchvision/transforms/v2/_meta.py b/torchvision/transforms/v2/_meta.py index 7f28e25c602..b7e2a42259f 100644 --- a/torchvision/transforms/v2/_meta.py +++ b/torchvision/transforms/v2/_meta.py @@ -11,7 +11,7 @@ class ConvertBoundingBoxFormat(Transform): """[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY". - .. betastatus:: ConvertBoundingBoxFormat transform + .. v2betastatus:: ConvertBoundingBoxFormat transform Args: format (str or datapoints.BoundingBoxFormat): output bounding box format. @@ -34,7 +34,7 @@ def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> da class ConvertDtype(Transform): """[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly. - .. betastatus:: ConvertDtype transform + .. v2betastatus:: ConvertDtype transform This function does not support PIL Image. @@ -77,7 +77,7 @@ class ClampBoundingBox(Transform): The clamping is done according to the bounding boxes' ``spatial_size`` meta-data. - .. betastatus:: ClampBoundingBox transform + .. v2betastatus:: ClampBoundingBox transform """ diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index 40d57856292..c9b9025ebd9 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -24,7 +24,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Lambda(Transform): """[BETA] Apply a user-defined function as a transform. - .. betastatus:: Lambda transform + .. v2betastatus:: Lambda transform This transform does not support torchscript. @@ -55,7 +55,7 @@ def extra_repr(self) -> str: class LinearTransformation(Transform): """[BETA] Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline. - .. betastatus:: LinearTransformation transform + .. v2betastatus:: LinearTransformation transform This transform does not support PIL Image. Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and @@ -138,7 +138,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Normalize(Transform): """[BETA] Normalize a tensor image or video with mean and standard deviation. - .. betastatus:: Normalize transform + .. v2betastatus:: Normalize transform This transform does not support PIL Image. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` @@ -178,7 +178,7 @@ def _transform( class GaussianBlur(Transform): """[BETA] Blurs image with randomly chosen Gaussian blur. - .. betastatus:: GausssianBlur transform + .. v2betastatus:: GausssianBlur transform If the input is a Tensor, it is expected to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -225,7 +225,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ToDtype(Transform): """[BETA] Converts the input to a specific dtype - this does not scale values. - .. betastatus:: ToDtype transform + .. v2betastatus:: ToDtype transform Args: dtype (``torch.dtype`` or dict of ``Datapoint`` -> ``torch.dtype``): The dtype to convert to. @@ -258,7 +258,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class SanitizeBoundingBox(Transform): """[BETA] Remove degenerate/invalid bounding boxes and their corresponding labels and masks. - .. betastatus:: SanitizeBoundingBox transform + .. v2betastatus:: SanitizeBoundingBox transform This transform removes bounding boxes and their associated labels/masks that: diff --git a/torchvision/transforms/v2/_temporal.py b/torchvision/transforms/v2/_temporal.py index ad7526bc4a4..df4ad66643a 100644 --- a/torchvision/transforms/v2/_temporal.py +++ b/torchvision/transforms/v2/_temporal.py @@ -9,7 +9,7 @@ class UniformTemporalSubsample(Transform): """[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video. - .. betastatus:: UniformTemporalSubsample transform + .. v2betastatus:: UniformTemporalSubsample transform Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension. diff --git a/torchvision/transforms/v2/_type_conversion.py b/torchvision/transforms/v2/_type_conversion.py index 92de314608c..60f44c5d3db 100644 --- a/torchvision/transforms/v2/_type_conversion.py +++ b/torchvision/transforms/v2/_type_conversion.py @@ -13,7 +13,7 @@ class PILToTensor(Transform): """[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values. - .. betastatus:: PILToTensor transform + .. v2betastatus:: PILToTensor transform This transform does not support torchscript. @@ -30,7 +30,7 @@ class ToImageTensor(Transform): """[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image` ; this does not scale values. - .. betastatus:: ToImageTensor transform + .. v2betastatus:: ToImageTensor transform This transform does not support torchscript. """ @@ -46,7 +46,7 @@ def _transform( class ToImagePIL(Transform): """[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values. - .. betastatus:: ToImagePIL transform + .. v2betastatus:: ToImagePIL transform This transform does not support torchscript. From 68dcf9e226621b5c775b756859d97aa7c41ce54f Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 24 Feb 2023 15:25:01 +0000 Subject: [PATCH 2/2] Also update import warning --- torchvision/__init__.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/torchvision/__init__.py b/torchvision/__init__.py index f29da9cf644..eed24091a52 100644 --- a/torchvision/__init__.py +++ b/torchvision/__init__.py @@ -100,10 +100,11 @@ def _is_tracing(): _WARN_ABOUT_BETA_TRANSFORMS = True _BETA_TRANSFORMS_WARNING = ( "The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. " - "While we will try our best to maintain backward compatibility, " - "some APIs or behaviors might change without a deprecation cycle. " - "To help us improve these new features, please provide your feedback " - "here: https://github.com/pytorch/vision/issues/6753." + "While we do not expect major breaking changes, some APIs may still change " + "according to user feedback. Please submit any feedback you may have in " + "this issue: https://github.com/pytorch/vision/issues/6753, and you can also " + "check out https://github.com/pytorch/vision/issues/7319 to learn more about " + "the APIs that we suspect might involve future changes. " "You can silence this warning by calling torchvision.disable_beta_transform_warning()." )