From 8253305152d1776b234266f247ccc7e8dc224598 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Thu, 12 Dec 2024 17:39:17 +0100 Subject: [PATCH 01/27] Added Keypoints to the library --- torchvision/tv_tensors/__init__.py | 3 ++ torchvision/tv_tensors/_keypoints.py | 78 ++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 torchvision/tv_tensors/_keypoints.py diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 1ba47f60a36..984d8070ac3 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -6,6 +6,7 @@ from ._torch_function_helpers import set_return_type from ._tv_tensor import TVTensor from ._video import Video +from ._keypoints import KeyPoints # TODO: Fix this. We skip this method as it leads to @@ -31,5 +32,7 @@ def wrap(wrappee, *, like, **kwargs): format=kwargs.get("format", like.format), canvas_size=kwargs.get("canvas_size", like.canvas_size), ) + elif isinstance(like, KeyPoints): + return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) else: return wrappee.as_subclass(type(like)) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py new file mode 100644 index 00000000000..410d4cf6859 --- /dev/null +++ b/torchvision/tv_tensors/_keypoints.py @@ -0,0 +1,78 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union +from torch.utils._pytree import tree_flatten +import torch +from ._tv_tensor import TVTensor + + +class KeyPoints(TVTensor): + """:class:`torch.Tensor` subclass for tensors with shape ``[..., 2]`` that represent points in an image. + + Each point is represented by its XY coordinates. + + Args: + data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. + canvas_size (two-tuple of ints): Height and width of the corresponding image or video. + dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from + ``data``. + device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a + :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. + requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and + ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. + """ + + canvas_size: Tuple[int, int] + + def __new__( + cls, data: Any, *, dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + ): + tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) + if tensor.ndim == 1: + tensor = tensor.unsqueeze(0) + elif tensor.shape[-1] != 2: + raise ValueError(f"Expected a tensor of shape (..., 2), not {tensor.shape}") + points = tensor.as_subclass(cls) + points.canvas_size = canvas_size + return points + + if TYPE_CHECKING: + # EVIL: Just so that MYPY+PYLANCE+others stop shouting that everything is wrong when initializeing the TVTensor + # Not read or defined at Runtime (only at linting time). + # TODO: Add this to all TVTensors + def __init__( + self, data: Any, *, dtype: Optional[torch.dtype] = None, + device: Optional[Union[torch.device, str, int]] = None, + requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + ): + ... + + @classmethod + def _wrap_output( + cls, output: Any, args: Sequence[Any] = (), kwargs: Optional[Mapping[str, Any]] = None, + ) -> Any: + # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. + # This copies over the metadata. + # For BoundingBoxes, that included format, but we only support one format here ! + flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator] + first_bbox_from_args = next(x for x in flat_params if isinstance(x, KeyPoints)) + canvas_size: Tuple[int, int] = first_bbox_from_args.canvas_size + + if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): + output = KeyPoints(output, canvas_size=canvas_size) + elif isinstance(output, tuple): + # NB: output is checked against sequence because it has already been checked against Tensor + # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent + # or complex errors + output = tuple( + KeyPoints(part, canvas_size=canvas_size) + for part in output + ) + elif isinstance(output, MutableSequence): + for i, part in enumerate(output): + output[i] = KeyPoints(part, canvas_size=canvas_size) + return output + + def __repr__(self, *, tensor_contents: Any = None) -> str: + return self._make_repr(canvas_size=self.canvas_size) From 484561d493ae06ecb50a4094f31e7479504e3a23 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:50:47 +0100 Subject: [PATCH 02/27] Improved KeyPoints to be exported --- torchvision/tv_tensors/__init__.py | 16 +++++++++++++--- torchvision/tv_tensors/_keypoints.py | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 984d8070ac3..3a56bf88330 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -1,3 +1,4 @@ +from typing import TypeVar import torch from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat @@ -9,11 +10,14 @@ from ._keypoints import KeyPoints +_WRAP_LIKE_T = TypeVar("_WRAP_LIKE_T", bound=TVTensor) + + # TODO: Fix this. We skip this method as it leads to # RecursionError: maximum recursion depth exceeded while calling a Python object # Until `disable` is removed, there will be graph breaks after all calls to functional transforms @torch.compiler.disable -def wrap(wrappee, *, like, **kwargs): +def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T: """Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of @@ -27,12 +31,18 @@ def wrap(wrappee, *, like, **kwargs): Ignored otherwise. """ if isinstance(like, BoundingBoxes): - return BoundingBoxes._wrap( + return BoundingBoxes._wrap( # type:ignore wrappee, format=kwargs.get("format", like.format), canvas_size=kwargs.get("canvas_size", like.canvas_size), ) elif isinstance(like, KeyPoints): - return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) + return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) # type:ignore else: return wrappee.as_subclass(type(like)) + + +__all__: list[str] = [ + "wrap", "KeyPoints", "Video", "TVTensor", "set_return_type", + "Mask", "Image", "BoundingBoxFormat", "BoundingBoxes" +] diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 410d4cf6859..d044bb77824 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,7 +1,7 @@ from __future__ import annotations from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union -from torch.utils._pytree import tree_flatten import torch +from torch.utils._pytree import tree_flatten from ._tv_tensor import TVTensor From 3255890a384043788d2485129b9b3f0dbb170297 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:51:11 +0100 Subject: [PATCH 03/27] Added kernels to support the keypoints --- torchvision/transforms/v2/_misc.py | 2 +- torchvision/transforms/v2/_utils.py | 16 +- .../transforms/v2/functional/__init__.py | 13 + .../transforms/v2/functional/_geometry.py | 348 ++++++++++++++++-- torchvision/transforms/v2/functional/_meta.py | 31 ++ torchvision/transforms/v2/functional/_misc.py | 64 ++++ 6 files changed, 451 insertions(+), 23 deletions(-) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index d38a6ad8767..ccb5968cd59 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,7 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor +from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_all_keypoints, get_bounding_boxes, has_any, is_pure_tensor # TODO: do we want/need to expose this? diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index dd65ca4d9c9..4e6e76418ec 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -4,7 +4,7 @@ import numbers from contextlib import suppress -from typing import Any, Callable, Dict, List, Literal, Sequence, Tuple, Type, Union +from typing import Any, Callable, Dict, Iterable, List, Literal, Sequence, Tuple, Type, Union import PIL.Image import torch @@ -162,6 +162,20 @@ def get_bounding_boxes(flat_inputs: List[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") +def get_all_keypoints(flat_inputs: List[Any]) -> Iterable[tv_tensors.KeyPoints]: + """Yields all KeyPoints in the input. + + Raises: + ValueError: No KeyPoints can be found + """ + generator = (inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.KeyPoints)) + try: + yield next(generator) + except StopIteration: + raise ValueError("No Keypoints were found in the sample.") + return generator + + def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]: """Return Channel, Height, and Width.""" chws = { diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index d5705d55c4b..cbc6e02b2fb 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -5,6 +5,7 @@ from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, + convert_box_to_points, get_dimensions_image, get_dimensions_video, get_dimensions, @@ -67,21 +68,25 @@ ) from ._geometry import ( affine, + affine_keypoints, affine_bounding_boxes, affine_image, affine_mask, affine_video, center_crop, + center_crop_keypoints, center_crop_bounding_boxes, center_crop_image, center_crop_mask, center_crop_video, crop, + crop_keypoints, crop_bounding_boxes, crop_image, crop_mask, crop_video, elastic, + elastic_keypoints, elastic_bounding_boxes, elastic_image, elastic_mask, @@ -92,31 +97,37 @@ five_crop_video, hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file horizontal_flip, + horizontal_flip_keypoints, horizontal_flip_bounding_boxes, horizontal_flip_image, horizontal_flip_mask, horizontal_flip_video, pad, + pad_keypoints, pad_bounding_boxes, pad_image, pad_mask, pad_video, perspective, + perspectice_keypoints, perspective_bounding_boxes, perspective_image, perspective_mask, perspective_video, resize, + resize_keypoints, resize_bounding_boxes, resize_image, resize_mask, resize_video, resized_crop, + resized_crop_keypoints, resized_crop_bounding_boxes, resized_crop_image, resized_crop_mask, resized_crop_video, rotate, + rotate_keypoints, rotate_bounding_boxes, rotate_image, rotate_mask, @@ -129,6 +140,7 @@ vertical_flip_image, vertical_flip_mask, vertical_flip_video, + vertical_flip_keypoints, vflip, ) from ._misc import ( @@ -143,6 +155,7 @@ normalize_image, normalize_video, sanitize_bounding_boxes, + sanitize_keypoints, to_dtype, to_dtype_image, to_dtype_video, diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index da080e437c9..a80b246630a 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -22,7 +22,7 @@ from torchvision.utils import _log_api_usage_once -from ._meta import _get_size_image_pil, clamp_bounding_boxes, convert_bounding_box_format +from ._meta import _get_size_image_pil, clamp_bounding_boxes, clamp_keypoints, convert_bounding_box_format from ._utils import _FillTypeJIT, _get_kernel, _register_five_ten_crop_kernel_internal, _register_kernel_internal @@ -65,6 +65,11 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) +@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def horizontal_flip_keypoints(kp: tv_tensors.KeyPoints): + return kp.sub_(kp.canvas_size[1]).neg_() + + def horizontal_flip_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int] ) -> torch.Tensor: @@ -122,6 +127,11 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) +@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): + return kp.sub_(kp.canvas_size[1]).neg_() + + def vertical_flip_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int] ) -> torch.Tensor: @@ -333,6 +343,38 @@ def _resize_mask_dispatch( return tv_tensors.wrap(output, like=inpt) +def resize_keypoints( + kp: torch.Tensor, size: Optional[List[int]], + canvas_size: Tuple[int, int], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[bool] = True, +): + old_height, old_width = canvas_size + new_height, new_width = _compute_resized_output_size(canvas_size, size=size, max_size=max_size) + + w_ratio = new_width / old_width + h_ratio = new_height / old_height + ratios = torch.tensor([w_ratio, h_ratio]) + kp.data = kp.data.mul(ratios).to(kp.dtype) + + return kp, (new_height, new_width) + + +@_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _resize_keypoints_dispatch( + kp: tv_tensors.KeyPoints, size: Optional[List[int]], + interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, + antialias: Optional[bool] = True, +) -> tv_tensors.KeyPoints: + out, canvas_size = resize_keypoints( + kp.as_subclass(torch.Tensor), size, canvas_size=kp.canvas_size, interpolation=interpolation, + max_size=max_size, antialias=antialias + ) + return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + + def resize_bounding_boxes( bounding_boxes: torch.Tensor, canvas_size: Tuple[int, int], @@ -758,6 +800,67 @@ def _affine_image_pil( return _FP.affine(image, matrix, interpolation=pil_modes_mapping[interpolation], fill=fill) +def _affine_keypoints_with_expand( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, + expand: bool = False, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + if keypoints.numel() == 0: + return keypoints, canvas_size + + original_dtype = keypoints.dtype + keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() + dtype = keypoints.dtype + device = keypoints.device + + angle, translate, shear, center = _affine_parse_args( + angle, translate, scale, shear, InterpolationMode.NEAREST, center + ) + + if center is None: + height, width = canvas_size + center = [width * 0.5, height * 0.5] + + affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear, inverted=False) + transposed_affine_matrix = ( + torch.tensor( + affine_vector, + dtype=dtype, + device=device, + ) + .reshape(2, 3) + .T + ) + # 1) Unlike bounding box (whose implmentation we stole) we're already a bunch of points. + keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=device, dtype=dtype)], dim=-1) + # 2) Now let's transform the points using affine matrix + keypoints = torch.matmul(keypoints, transposed_affine_matrix).to(original_dtype) + + return keypoints, canvas_size + + +@_register_kernel_internal(affine, tv_tensors.KeyPoints) +def affine_keypoints( + keypoints: tv_tensors.KeyPoints, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, +): + return _affine_keypoints_with_expand( + keypoints=keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + angle=angle, translate=translate, scale=scale, shear=shear, + center=center, expand=False + ) + + def _affine_bounding_boxes_with_expand( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1055,6 +1158,29 @@ def _rotate_image_pil( ) +def rotate_keypoints( + keypoints: tv_tensors.KeyPoints, + angle: float, + interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, + expand: bool = False, + center: Optional[List[float]] = None, + fill: _FillTypeJIT = None, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + return _affine_keypoints_with_expand( + keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, + angle=-angle, translate=[0.0, 0.0], scale=1.0, + shear=[0.0, 0.0], center=center, expand=expand, + ) + + +@_register_kernel_internal(rotate, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) +def _rotate_keypoints_dispatch( + kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs +) -> tv_tensors.KeyPoints: + out, canvas_size = rotate_keypoints(kp, angle, center=center, expand=expand, **kwargs) + return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + + def rotate_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1318,6 +1444,36 @@ def pad_mask( return output +def pad_keypoints( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], + padding: List[int], + padding_mode: str = "constant" +): + SUPPORTED_MODES = ["constant"] + if padding_mode not in SUPPORTED_MODES: + # TODO: add support of other padding modes + raise ValueError( + f"Padding mode '{padding_mode}' is not supported with KeyPoints" + f" (supported modes are {', '.join(SUPPORTED_MODES)})" + ) + left, right, top, bottom = _parse_pad_padding(padding) + pad = torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device) + canvas_size = (canvas_size[0] + top + bottom, canvas_size[1] + left + right) + return clamp_keypoints(keypoints + pad, canvas_size), canvas_size + + +@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensors_wrapper=False) +def _pad( + keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs +) -> tv_tensors.KeyPoints: + output, canvas_size = pad_keypoints( + keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, + padding=padding, padding_mode=padding_mode + ) + return tv_tensors.wrap(output, like=keypoints, canvas_size=canvas_size) + + def pad_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1404,6 +1560,26 @@ def crop_image(image: torch.Tensor, top: int, left: int, height: int, width: int _register_kernel_internal(crop, PIL.Image.Image)(_crop_image_pil) +def crop_keypoints( + kp: torch.Tensor, + top: int, + left: int, + height: int, + width: int, +) -> Tuple[torch.Tensor, Tuple[int, int]]: + + kp.sub_(torch.tensor([left, top], dtype=kp.dtype, device=kp.device)) + canvas_size = (height, width) + + return clamp_keypoints(kp, canvas_size=canvas_size), canvas_size + + +@_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int) -> tv_tensors.KeyPoints: + out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + + def crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1577,6 +1753,51 @@ def _perspective_image_pil( return _FP.perspective(image, perspective_coeffs, interpolation=pil_modes_mapping[interpolation], fill=fill) +def perspectice_keypoints( + kp: torch.Tensor, + canvas_size: Tuple[int, int], + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + coefficients: Optional[List[float]] = None, +): + if kp.numel() == 0: + return kp + dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + device = kp.device + + perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) + + denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] + if denom == 0: + raise RuntimeError( + f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform bounding boxes. " + f"Denominator is zero, denom={denom}" + ) + + theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) + + numer_points = torch.matmul(kp, theta1.T) + denom_points = torch.matmul(kp, theta2.T) + transformed_points = numer_points.div_(denom_points) + return clamp_keypoints(transformed_points, canvas_size) + + +@_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _perspective_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, + startpoints: Optional[List[List[int]]], + endpoints: Optional[List[List[int]]], + coefficients: Optional[List[float]] = None, + **kwargs, +) -> tv_tensors.BoundingBoxes: + output = perspectice_keypoints( + inpt.as_subclass(torch.Tensor), + canvas_size=inpt.canvas_size, startpoints=startpoints, + endpoints=endpoints, coefficients=coefficients, + ) + return tv_tensors.wrap(output, like=inpt) + + def perspective_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1618,26 +1839,7 @@ def perspective_bounding_boxes( f"Denominator is zero, denom={denom}" ) - inv_coeffs = [ - (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, - (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, - (perspective_coeffs[1] * perspective_coeffs[5] - perspective_coeffs[2] * perspective_coeffs[4]) / denom, - (-perspective_coeffs[3] + perspective_coeffs[5] * perspective_coeffs[6]) / denom, - (perspective_coeffs[0] - perspective_coeffs[2] * perspective_coeffs[6]) / denom, - (-perspective_coeffs[0] * perspective_coeffs[5] + perspective_coeffs[2] * perspective_coeffs[3]) / denom, - (-perspective_coeffs[4] * perspective_coeffs[6] + perspective_coeffs[3] * perspective_coeffs[7]) / denom, - (-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom, - ] - - theta1 = torch.tensor( - [[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]], - dtype=dtype, - device=device, - ) - - theta2 = torch.tensor( - [[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device - ) + theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) # 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners). # Tensor of points has shape (N * 4, 3), where N is the number of bboxes @@ -1671,6 +1873,33 @@ def perspective_bounding_boxes( ).reshape(original_shape) +def _compute_perspective_thetas( + perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, / +) -> Tuple[torch.Tensor, torch.Tensor]: + inv_coeffs = [ + (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, + (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, + (perspective_coeffs[1] * perspective_coeffs[5] - perspective_coeffs[2] * perspective_coeffs[4]) / denom, + (-perspective_coeffs[3] + perspective_coeffs[5] * perspective_coeffs[6]) / denom, + (perspective_coeffs[0] - perspective_coeffs[2] * perspective_coeffs[6]) / denom, + (-perspective_coeffs[0] * perspective_coeffs[5] + perspective_coeffs[2] * perspective_coeffs[3]) / denom, + (-perspective_coeffs[4] * perspective_coeffs[6] + perspective_coeffs[3] * perspective_coeffs[7]) / denom, + (-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom, + ] + + theta1 = torch.tensor( + [[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]], + dtype=dtype, + device=device, + ) + + theta2 = torch.tensor( + [[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device + ) + + return theta1, theta2 + + @_register_kernel_internal(perspective, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) def _perspective_bounding_boxes_dispatch( inpt: tv_tensors.BoundingBoxes, @@ -1831,6 +2060,48 @@ def _create_identity_grid(size: Tuple[int, int], device: torch.device, dtype: to return base_grid +def elastic_keypoints( + kp: torch.Tensor, + canvas_size: Tuple[int, int], + displacement: torch.Tensor +) -> torch.Tensor: + expected_shape = (1, canvas_size[0], canvas_size[1], 2) + if not isinstance(displacement, torch.Tensor): + raise TypeError("Argument displacement should be a Tensor") + elif displacement.shape != expected_shape: + raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}") + + if kp.numel() == 0: + return kp + + device = kp.device + dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + + if displacement.dtype != dtype or displacement.device != device: + displacement = displacement.to(dtype=dtype, device=device) + + id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) + inv_grid = id_grid.sub_(displacement) + + index_xy = kp.to(dtype=torch.long) + index_x, index_y = index_xy[:, 0], index_xy[:, 1] + + t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) + transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) + + return clamp_keypoints(transformed_points, canvas_size=canvas_size) + + +@_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _elastic_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs +): + output = elastic_keypoints( + inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement + ) + return tv_tensors.wrap(output, like=inpt) + + def elastic_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -2011,6 +2282,26 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: List[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) +def center_crop_keypoints( + inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int] +): + crop_height, crop_width = _center_crop_parse_output_size(output_size) + crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) + return crop_keypoints( + inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width + ) + + +@_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _center_crop_keypoints_dispatch( + inpt: tv_tensors.KeyPoints, output_size: List[int] +) -> tv_tensors.KeyPoints: + output, canvas_size = center_crop_keypoints( + inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) + + def center_crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -2146,6 +2437,21 @@ def _resized_crop_image_pil_dispatch( ) +def resized_crop_keypoints( + kp: torch.Tensor, top: int, left: int, height: int, width: int, size: List[int], +) -> Tuple[torch.Tensor, Tuple[int, int]]: + kp, canvas_size = crop_keypoints(kp, top, left, height, width) + return resize_keypoints(kp, size=size, canvas_size=canvas_size) + + +@_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _resized_crop_dispatch(inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs): + out, canvas_size = resized_crop_bounding_boxes( + inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size + ) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + + def resized_crop_bounding_boxes( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index b90e5fb7b5b..022c1cf7f25 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -176,6 +176,24 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy +def _xyxy_to_points( + bounding_boxes: torch.Tensor +) -> torch.Tensor: + return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) + + +def convert_box_to_points( + bounding_boxes: tv_tensors.BoundingBoxes +) -> tv_tensors.KeyPoints: + bbox = _convert_bounding_box_format( + bounding_boxes.as_subclass(torch.Tensor), + old_format=bounding_boxes.format, + new_format=BoundingBoxFormat.XYXY, + inplace=False + ) + return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) + + def _convert_bounding_box_format( bounding_boxes: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False ) -> torch.Tensor: @@ -254,6 +272,19 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) +def clamp_keypoints( + inpt: torch.Tensor, + canvas_size: Tuple[int, int] +) -> torch.Tensor: + if not torch.jit.is_scripting(): + _log_api_usage_once(clamp_bounding_boxes) + dtype = inpt.dtype + inpt = inpt.float() + inpt[..., 0].clamp_(0, canvas_size[1]) + inpt[..., 1].clamp_(0, canvas_size[0]) + return inpt.to(dtype=dtype) + + def clamp_bounding_boxes( inpt: torch.Tensor, format: Optional[BoundingBoxFormat] = None, diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index f40bf117753..b4559ab95e4 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -320,6 +320,7 @@ def to_dtype_video(video: torch.Tensor, dtype: torch.dtype = torch.float, scale: return to_dtype_image(video, dtype, scale=scale) +@_register_kernel_internal(to_dtype, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @_register_kernel_internal(to_dtype, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) @_register_kernel_internal(to_dtype, tv_tensors.Mask, tv_tensor_wrapper=False) def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: bool = False) -> torch.Tensor: @@ -327,6 +328,69 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo return inpt.to(dtype) +def sanitize_keypoints( + keypoints: torch.Tensor, + canvas_size: Optional[Tuple[int, int]] = None +) -> Tuple[torch.Tensor, torch.Tensor]: + """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. + + This removes the keypoints that are outside of their corresponing image. + You may want to first call :func:`~torchvision.transforms.v2.functional.clam_keypoints` + first to avoid undesired removals. + + .. note:: + Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes` + + Raises: + ValueError: If the keypoints are not passed as a two dimensional tensor. + + Args: + keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed + canvas_size (Optional[Tuple[int, int]], optional): The canvas_size of the bounding boxes + (size of the corresponding image/video). + Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. + + Returns: + out (tuple of Tensors): The subset of valid bounding boxes, and the corresponding indexing mask. + The mask can then be used to subset other tensors (e.g. labels) that are associated with the bounding boxes. + """ + if not keypoints.ndim == 2: + if keypoints.ndim < 2: + raise ValueError("Cannot sanitize a single Keypoint") + raise ValueError( + "Cannot sanitize KeyPoints structure that are not 2D. " + f"Expected shape to be (N, 2), got {keypoints.shape} ({keypoints.ndim=}, not 2)" + ) + if torch.jit.is_scripting() or is_pure_tensor(keypoints): + if canvas_size is None: + raise ValueError( + "canvas_size cannot be None if keypoints is a pure tensor. " + f"Got canvas_size={canvas_size}." + "Set that to appropriate values or pass keypoints as a tv_tensors.KeyPoints object." + ) + valid = _get_sanitize_keypoints_mask( + keypoints, canvas_size=canvas_size, + ) + return keypoints[valid], valid + if not isinstance(keypoints, tv_tensors.KeyPoints): + raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") + valid = _get_sanitize_keypoints_mask( + keypoints, canvas_size=keypoints.canvas_size, + ) + return tv_tensors.wrap(keypoints[valid], like=keypoints), valid + + +def _get_sanitize_keypoints_mask( + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], +) -> torch.Tensor: + image_h, image_w = canvas_size + x = keypoints[:, 0] + y = keypoints[:, 1] + + return (0 < x) & (x < image_w) & (0 < y) & (y < image_h) + + def sanitize_bounding_boxes( bounding_boxes: torch.Tensor, format: Optional[tv_tensors.BoundingBoxFormat] = None, From 7436636b631c6a2397af438ca0195d948956061a Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:51:22 +0100 Subject: [PATCH 04/27] Added tests for keypoints --- test/common_utils.py | 12 ++++++ test/test_transforms_v2.py | 20 ++++++++++ test/test_transforms_v2_utils.py | 54 +++++++++++++++------------ test/test_tv_tensors.py | 63 +++++++++++++++++++++++++++----- 4 files changed, 115 insertions(+), 34 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 99c7931587d..0fafdce5d9e 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -8,6 +8,7 @@ import shutil import sys import tempfile +from typing import Sequence, Tuple import warnings from subprocess import CalledProcessError, check_output, STDOUT @@ -402,6 +403,17 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) +def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int | Sequence[int] = 4, dtype=None, device='cpu'): + """Make the KeyPoints for testing purposes""" + if isinstance(num_points, int): + num_points = [num_points] + half_point: Tuple[int, ...] = tuple(num_points) + (1,) + y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) + x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) + points = torch.cat((x, y), dim=-1) + return tv_tensors.KeyPoints(points, canvas_size=canvas_size) + + def make_bounding_boxes( canvas_size=DEFAULT_SIZE, *, diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index fb49525ecfe..35a0befb896 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -31,6 +31,7 @@ make_image, make_image_pil, make_image_tensor, + make_keypoints, make_segmentation_mask, make_video, make_video_tensor, @@ -223,6 +224,7 @@ def check_functional_kernel_signature_match(functional, *, kernel, input_type): # explicitly passed to the kernel. explicit_metadata = { tv_tensors.BoundingBoxes: {"format", "canvas_size"}, + tv_tensors.KeyPoints: {"canvas_size"} } kernel_params = [param for param in kernel_params if param.name not in explicit_metadata.get(input_type, set())] @@ -327,6 +329,18 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), + keypoints=make_keypoints(), keypoints_degenerate=tv_tensors.KeyPoints( + [ + [0, 1], # left edge + [1, 0], # top edge + [0, 0], # top left corner + [size[1], 1], # right edge + [size[1], 0], # top right corner + [1, size[0]], # bottom edge + [0, size[0]], # bottom left corner + [size[1], size[0]] # bottom right corner + ], canvas_size=size, device=device + ), detection_mask=make_detection_masks(size, device=device), segmentation_mask=make_segmentation_mask(size, device=device), int=0, @@ -680,6 +694,7 @@ def test_functional(self, size, make_input): (F.resize_image, torch.Tensor), (F._geometry._resize_image_pil, PIL.Image.Image), (F.resize_image, tv_tensors.Image), + (F.resize_keypoints, tv_tensors.KeyPoints), (F.resize_bounding_boxes, tv_tensors.BoundingBoxes), (F.resize_mask, tv_tensors.Mask), (F.resize_video, tv_tensors.Video), @@ -1035,6 +1050,7 @@ def test_functional(self, make_input): (F.horizontal_flip_image, torch.Tensor), (F._geometry._horizontal_flip_image_pil, PIL.Image.Image), (F.horizontal_flip_image, tv_tensors.Image), + (F.horizontal_flip_keypoints, tv_tensors.KeyPoints), (F.horizontal_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.horizontal_flip_mask, tv_tensors.Mask), (F.horizontal_flip_video, tv_tensors.Video), @@ -1203,6 +1219,7 @@ def test_functional(self, make_input): (F.affine_image, torch.Tensor), (F._geometry._affine_image_pil, PIL.Image.Image), (F.affine_image, tv_tensors.Image), + (F.affine_keypoints, tv_tensors.KeyPoints), (F.affine_bounding_boxes, tv_tensors.BoundingBoxes), (F.affine_mask, tv_tensors.Mask), (F.affine_video, tv_tensors.Video), @@ -1485,6 +1502,7 @@ def test_functional(self, make_input): (F.vertical_flip_image, torch.Tensor), (F._geometry._vertical_flip_image_pil, PIL.Image.Image), (F.vertical_flip_image, tv_tensors.Image), + (F.vertical_flip_keypoints, tv_tensors.KeyPoints), (F.vertical_flip_bounding_boxes, tv_tensors.BoundingBoxes), (F.vertical_flip_mask, tv_tensors.Mask), (F.vertical_flip_video, tv_tensors.Video), @@ -1627,6 +1645,7 @@ def test_functional(self, make_input): (F.rotate_image, torch.Tensor), (F._geometry._rotate_image_pil, PIL.Image.Image), (F.rotate_image, tv_tensors.Image), + (F.rotate_keypoints, tv_tensors.KeyPoints), (F.rotate_bounding_boxes, tv_tensors.BoundingBoxes), (F.rotate_mask, tv_tensors.Mask), (F.rotate_video, tv_tensors.Video), @@ -2332,6 +2351,7 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), + tv_tensors.KeyPoints(torch.rand(4, 2), canvas_size=(12, 12)) ): with pytest.raises(ValueError, match="does not support PIL images, "): cutmix_mixup(input_with_bad_type) diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index 53222c6a2c8..cda255d0173 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -4,7 +4,7 @@ import torch import torchvision.transforms.v2._utils -from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_masks, make_image +from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_masks, make_image, make_keypoints from torchvision import tv_tensors from torchvision.transforms.v2._utils import has_all, has_any @@ -14,29 +14,32 @@ IMAGE = make_image(DEFAULT_SIZE, color_space="RGB") BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=tv_tensors.BoundingBoxFormat.XYXY) MASK = make_detection_masks(DEFAULT_SIZE) +KEYPOINTS = make_keypoints(DEFAULT_SIZE) @pytest.mark.parametrize( ("sample", "types", "expected"), [ - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), - ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes), False), - ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask), False), - ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints), True), + ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), False), + ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((KEYPOINTS,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False), ( - (IMAGE, BOUNDING_BOX, MASK), - (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True, ), - ((), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False), - ((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, tv_tensors.Image),), True), - ((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False), - ((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True), + ((), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda obj: isinstance(obj, tv_tensors.Image),), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda _: False,), False), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (lambda _: True,), True), ((IMAGE,), (tv_tensors.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor), True), ( (torch.Tensor(IMAGE),), @@ -57,15 +60,18 @@ def test_has_any(sample, types, expected): @pytest.mark.parametrize( ("sample", "types", "expected"), [ - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask,), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask, tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True), ( - (IMAGE, BOUNDING_BOX, MASK), - (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True, ), ((BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), False), diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index ed75ae35ecd..6d4eed9c579 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -2,7 +2,7 @@ import pytest import torch -from common_utils import assert_equal, make_bounding_boxes, make_image, make_segmentation_mask, make_video +from common_utils import assert_equal, make_bounding_boxes, make_image, make_keypoints, make_segmentation_mask, make_video from PIL import Image from torchvision import tv_tensors @@ -49,6 +49,20 @@ def test_bbox_dim_error(): tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32)) +@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0,], [2, 2,]], [1, 2,]]) +def test_keypoints_instance(data): + kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32)) + assert isinstance(kpoint, tv_tensors.KeyPoints) + assert type(kpoint) is tv_tensors.KeyPoints + assert kpoint.shape[-1] == 2 + + +def test_keypoints_shape_error(): + data_3d = [(0, 1, 2)] + with pytest.raises(ValueError, match="shape"): + tv_tensors.KeyPoints(torch.tensor(data_3d), canvas_size=(11, 7)) + + @pytest.mark.parametrize( ("data", "input_requires_grad", "expected_requires_grad"), [ @@ -68,7 +82,9 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad): assert tv_tensor.requires_grad is expected_requires_grad -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) def test_isinstance(make_input): assert isinstance(make_input(), torch.Tensor) @@ -80,7 +96,9 @@ def test_wrapping_no_copy(): assert image.data_ptr() == tensor.data_ptr() -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) def test_to_wrapping(make_input): dp = make_input() @@ -90,7 +108,9 @@ def test_to_wrapping(make_input): assert dp_to.dtype is torch.float64 -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): tensor = torch.rand((3, 16, 16), dtype=torch.float64) @@ -104,7 +124,9 @@ def test_to_tv_tensor_reference(make_input, return_type): assert type(tensor) is torch.Tensor -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_clone_wrapping(make_input, return_type): dp = make_input() @@ -116,7 +138,9 @@ def test_clone_wrapping(make_input, return_type): assert dp_clone.data_ptr() != dp.data_ptr() -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_requires_grad__wrapping(make_input, return_type): dp = make_input(dtype=torch.float) @@ -131,7 +155,9 @@ def test_requires_grad__wrapping(make_input, return_type): assert dp_requires_grad.requires_grad -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_detach_wrapping(make_input, return_type): dp = make_input(dtype=torch.float).requires_grad_(True) @@ -148,29 +174,42 @@ def test_force_subclass_with_metadata(return_type): # Largely the same as above, we additionally check that the metadata is preserved format, canvas_size = "XYXY", (32, 32) bbox = tv_tensors.BoundingBoxes([[0, 0, 5, 5], [2, 2, 7, 7]], format=format, canvas_size=canvas_size) + kpoints = tv_tensors.KeyPoints([[0, 0], [2, 2]], canvas_size=canvas_size) tv_tensors.set_return_type(return_type) bbox = bbox.clone() + kpoints = kpoints.clone() if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) bbox = bbox.to(torch.float64) + kpoints = kpoints.to(torch.float64) if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) bbox = bbox.detach() + kpoints = kpoints.detach() if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) assert not bbox.requires_grad + assert not kpoints.requires_grad bbox.requires_grad_(True) + kpoints.requires_grad_(True) if return_type == "TVTensor": + assert kpoints.canvas_size == canvas_size assert bbox.format, bbox.canvas_size == (format, canvas_size) assert bbox.requires_grad + assert kpoints.requires_grad tv_tensors.set_return_type("tensor") -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_other_op_no_wrapping(make_input, return_type): dp = make_input() @@ -182,7 +221,9 @@ def test_other_op_no_wrapping(make_input, return_type): assert type(output) is (type(dp) if return_type == "TVTensor" else torch.Tensor) -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize( "op", [ @@ -199,7 +240,9 @@ def test_no_tensor_output_op_no_wrapping(make_input, op): assert type(output) is not type(dp) -@pytest.mark.parametrize("make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video]) +@pytest.mark.parametrize("make_input", [ + make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints +]) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_inplace_op_no_wrapping(make_input, return_type): dp = make_input() From b35cba60ae2190c78bd890a746f184fe64f15158 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Fri, 13 Dec 2024 15:52:08 +0100 Subject: [PATCH 05/27] Applied ufmt formatting --- torchvision/transforms/v2/_misc.py | 10 +- .../transforms/v2/functional/__init__.py | 22 ++--- .../transforms/v2/functional/_geometry.py | 91 +++++++++++-------- torchvision/transforms/v2/functional/_meta.py | 15 +-- torchvision/transforms/v2/functional/_misc.py | 9 +- torchvision/tv_tensors/__init__.py | 16 +++- torchvision/tv_tensors/_keypoints.py | 31 +++++-- 7 files changed, 113 insertions(+), 81 deletions(-) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index ccb5968cd59..f0b83c14bd8 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,7 +9,15 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_all_keypoints, get_bounding_boxes, has_any, is_pure_tensor +from ._utils import ( + _parse_labels_getter, + _setup_number_or_seq, + _setup_size, + get_all_keypoints, + get_bounding_boxes, + has_any, + is_pure_tensor, +) # TODO: do we want/need to expose this? diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index cbc6e02b2fb..ec649759a72 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -68,27 +68,27 @@ ) from ._geometry import ( affine, - affine_keypoints, affine_bounding_boxes, affine_image, + affine_keypoints, affine_mask, affine_video, center_crop, - center_crop_keypoints, center_crop_bounding_boxes, center_crop_image, + center_crop_keypoints, center_crop_mask, center_crop_video, crop, - crop_keypoints, crop_bounding_boxes, crop_image, + crop_keypoints, crop_mask, crop_video, elastic, - elastic_keypoints, elastic_bounding_boxes, elastic_image, + elastic_keypoints, elastic_mask, elastic_transform, elastic_video, @@ -97,39 +97,39 @@ five_crop_video, hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file horizontal_flip, - horizontal_flip_keypoints, horizontal_flip_bounding_boxes, horizontal_flip_image, + horizontal_flip_keypoints, horizontal_flip_mask, horizontal_flip_video, pad, - pad_keypoints, pad_bounding_boxes, pad_image, + pad_keypoints, pad_mask, pad_video, - perspective, perspectice_keypoints, + perspective, perspective_bounding_boxes, perspective_image, perspective_mask, perspective_video, resize, - resize_keypoints, resize_bounding_boxes, resize_image, + resize_keypoints, resize_mask, resize_video, resized_crop, - resized_crop_keypoints, resized_crop_bounding_boxes, resized_crop_image, + resized_crop_keypoints, resized_crop_mask, resized_crop_video, rotate, - rotate_keypoints, rotate_bounding_boxes, rotate_image, + rotate_keypoints, rotate_mask, rotate_video, ten_crop, @@ -138,9 +138,9 @@ vertical_flip, vertical_flip_bounding_boxes, vertical_flip_image, + vertical_flip_keypoints, vertical_flip_mask, vertical_flip_video, - vertical_flip_keypoints, vflip, ) from ._misc import ( diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a80b246630a..792965433f1 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -344,7 +344,8 @@ def _resize_mask_dispatch( def resize_keypoints( - kp: torch.Tensor, size: Optional[List[int]], + kp: torch.Tensor, + size: Optional[List[int]], canvas_size: Tuple[int, int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, @@ -363,14 +364,19 @@ def resize_keypoints( @_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resize_keypoints_dispatch( - kp: tv_tensors.KeyPoints, size: Optional[List[int]], + kp: tv_tensors.KeyPoints, + size: Optional[List[int]], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( - kp.as_subclass(torch.Tensor), size, canvas_size=kp.canvas_size, interpolation=interpolation, - max_size=max_size, antialias=antialias + kp.as_subclass(torch.Tensor), + size, + canvas_size=kp.canvas_size, + interpolation=interpolation, + max_size=max_size, + antialias=antialias, ) return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) @@ -856,8 +862,12 @@ def affine_keypoints( return _affine_keypoints_with_expand( keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - angle=angle, translate=translate, scale=scale, shear=shear, - center=center, expand=False + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + expand=False, ) @@ -1167,9 +1177,14 @@ def rotate_keypoints( fill: _FillTypeJIT = None, ) -> Tuple[torch.Tensor, Tuple[int, int]]: return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - angle=-angle, translate=[0.0, 0.0], scale=1.0, - shear=[0.0, 0.0], center=center, expand=expand, + keypoints=keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + angle=-angle, + translate=[0.0, 0.0], + scale=1.0, + shear=[0.0, 0.0], + center=center, + expand=expand, ) @@ -1445,10 +1460,7 @@ def pad_mask( def pad_keypoints( - keypoints: torch.Tensor, - canvas_size: Tuple[int, int], - padding: List[int], - padding_mode: str = "constant" + keypoints: torch.Tensor, canvas_size: Tuple[int, int], padding: List[int], padding_mode: str = "constant" ): SUPPORTED_MODES = ["constant"] if padding_mode not in SUPPORTED_MODES: @@ -1468,8 +1480,10 @@ def _pad( keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( - keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, - padding=padding, padding_mode=padding_mode + keypoints.as_subclass(torch.Tensor), + canvas_size=keypoints.canvas_size, + padding=padding, + padding_mode=padding_mode, ) return tv_tensors.wrap(output, like=keypoints, canvas_size=canvas_size) @@ -1575,7 +1589,9 @@ def crop_keypoints( @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int) -> tv_tensors.KeyPoints: +def crop_keypoints_dispatch( + inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int +) -> tv_tensors.KeyPoints: out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) @@ -1792,8 +1808,10 @@ def _perspective_keypoints_dispatch( ) -> tv_tensors.BoundingBoxes: output = perspectice_keypoints( inpt.as_subclass(torch.Tensor), - canvas_size=inpt.canvas_size, startpoints=startpoints, - endpoints=endpoints, coefficients=coefficients, + canvas_size=inpt.canvas_size, + startpoints=startpoints, + endpoints=endpoints, + coefficients=coefficients, ) return tv_tensors.wrap(output, like=inpt) @@ -2060,11 +2078,7 @@ def _create_identity_grid(size: Tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints( - kp: torch.Tensor, - canvas_size: Tuple[int, int], - displacement: torch.Tensor -) -> torch.Tensor: +def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") @@ -2093,12 +2107,8 @@ def elastic_keypoints( @_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _elastic_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs -): - output = elastic_keypoints( - inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement - ) +def _elastic_keypoints_dispatch(inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs): + output = elastic_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement) return tv_tensors.wrap(output, like=inpt) @@ -2282,20 +2292,14 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: List[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) -def center_crop_keypoints( - inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int] -): +def center_crop_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int]): crop_height, crop_width = _center_crop_parse_output_size(output_size) crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) - return crop_keypoints( - inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width - ) + return crop_keypoints(inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width) @_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _center_crop_keypoints_dispatch( - inpt: tv_tensors.KeyPoints, output_size: List[int] -) -> tv_tensors.KeyPoints: +def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: List[int]) -> tv_tensors.KeyPoints: output, canvas_size = center_crop_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size ) @@ -2438,14 +2442,21 @@ def _resized_crop_image_pil_dispatch( def resized_crop_keypoints( - kp: torch.Tensor, top: int, left: int, height: int, width: int, size: List[int], + kp: torch.Tensor, + top: int, + left: int, + height: int, + width: int, + size: List[int], ) -> Tuple[torch.Tensor, Tuple[int, int]]: kp, canvas_size = crop_keypoints(kp, top, left, height, width) return resize_keypoints(kp, size=size, canvas_size=canvas_size) @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _resized_crop_dispatch(inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs): +def _resized_crop_dispatch( + inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs +): out, canvas_size = resized_crop_bounding_boxes( inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size ) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 022c1cf7f25..2a29d87d6d7 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -176,20 +176,16 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy -def _xyxy_to_points( - bounding_boxes: torch.Tensor -) -> torch.Tensor: +def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) -def convert_box_to_points( - bounding_boxes: tv_tensors.BoundingBoxes -) -> tv_tensors.KeyPoints: +def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, new_format=BoundingBoxFormat.XYXY, - inplace=False + inplace=False, ) return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) @@ -272,10 +268,7 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) -def clamp_keypoints( - inpt: torch.Tensor, - canvas_size: Tuple[int, int] -) -> torch.Tensor: +def clamp_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int]) -> torch.Tensor: if not torch.jit.is_scripting(): _log_api_usage_once(clamp_bounding_boxes) dtype = inpt.dtype diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index b4559ab95e4..42a85a2d9fe 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -329,8 +329,7 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo def sanitize_keypoints( - keypoints: torch.Tensor, - canvas_size: Optional[Tuple[int, int]] = None + keypoints: torch.Tensor, canvas_size: Optional[Tuple[int, int]] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. @@ -369,13 +368,15 @@ def sanitize_keypoints( "Set that to appropriate values or pass keypoints as a tv_tensors.KeyPoints object." ) valid = _get_sanitize_keypoints_mask( - keypoints, canvas_size=canvas_size, + keypoints, + canvas_size=canvas_size, ) return keypoints[valid], valid if not isinstance(keypoints, tv_tensors.KeyPoints): raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") valid = _get_sanitize_keypoints_mask( - keypoints, canvas_size=keypoints.canvas_size, + keypoints, + canvas_size=keypoints.canvas_size, ) return tv_tensors.wrap(keypoints[valid], like=keypoints), valid diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 3a56bf88330..e1c6b2202df 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -1,13 +1,14 @@ from typing import TypeVar + import torch from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat from ._image import Image +from ._keypoints import KeyPoints from ._mask import Mask from ._torch_function_helpers import set_return_type from ._tv_tensor import TVTensor from ._video import Video -from ._keypoints import KeyPoints _WRAP_LIKE_T = TypeVar("_WRAP_LIKE_T", bound=TVTensor) @@ -37,12 +38,19 @@ def wrap(wrappee: torch.Tensor, *, like: _WRAP_LIKE_T, **kwargs) -> _WRAP_LIKE_T canvas_size=kwargs.get("canvas_size", like.canvas_size), ) elif isinstance(like, KeyPoints): - return KeyPoints(wrappee, canvas_size=kwargs.get('canvas_size', like.canvas_size)) # type:ignore + return KeyPoints(wrappee, canvas_size=kwargs.get("canvas_size", like.canvas_size)) # type:ignore else: return wrappee.as_subclass(type(like)) __all__: list[str] = [ - "wrap", "KeyPoints", "Video", "TVTensor", "set_return_type", - "Mask", "Image", "BoundingBoxFormat", "BoundingBoxes" + "wrap", + "KeyPoints", + "Video", + "TVTensor", + "set_return_type", + "Mask", + "Image", + "BoundingBoxFormat", + "BoundingBoxes", ] diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index d044bb77824..e4e5e307511 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -1,7 +1,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Mapping, MutableSequence, Optional, Sequence, Tuple, Union + +from typing import Any, Mapping, MutableSequence, Optional, Sequence, Tuple, TYPE_CHECKING, Union + import torch from torch.utils._pytree import tree_flatten + from ._tv_tensor import TVTensor @@ -24,9 +27,13 @@ class KeyPoints(TVTensor): canvas_size: Tuple[int, int] def __new__( - cls, data: Any, *, dtype: Optional[torch.dtype] = None, + cls, + data: Any, + *, + dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + requires_grad: Optional[bool] = None, + canvas_size: Tuple[int, int], ): tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) if tensor.ndim == 1: @@ -42,15 +49,22 @@ def __new__( # Not read or defined at Runtime (only at linting time). # TODO: Add this to all TVTensors def __init__( - self, data: Any, *, dtype: Optional[torch.dtype] = None, + self, + data: Any, + *, + dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, - requires_grad: Optional[bool] = None, canvas_size: Tuple[int, int] + requires_grad: Optional[bool] = None, + canvas_size: Tuple[int, int], ): ... @classmethod def _wrap_output( - cls, output: Any, args: Sequence[Any] = (), kwargs: Optional[Mapping[str, Any]] = None, + cls, + output: Any, + args: Sequence[Any] = (), + kwargs: Optional[Mapping[str, Any]] = None, ) -> Any: # Mostly copied over from the BoundingBoxes TVTensor, minor improvements. # This copies over the metadata. @@ -65,10 +79,7 @@ def _wrap_output( # NB: output is checked against sequence because it has already been checked against Tensor # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent # or complex errors - output = tuple( - KeyPoints(part, canvas_size=canvas_size) - for part in output - ) + output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) elif isinstance(output, MutableSequence): for i, part in enumerate(output): output[i] = KeyPoints(part, canvas_size=canvas_size) From a19ec0b9daf2331b8e808c2815a27f5864fa8a4b Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Mon, 16 Dec 2024 13:14:48 +0100 Subject: [PATCH 06/27] Fixed the bugs found while testing --- test/common_utils.py | 7 +- test/test_transforms_v2.py | 16 +++-- test/test_transforms_v2_utils.py | 2 +- torchvision/transforms/v2/_augment.py | 2 +- torchvision/transforms/v2/_utils.py | 1 + .../transforms/v2/functional/_geometry.py | 65 ++++++++++++++----- torchvision/transforms/v2/functional/_meta.py | 5 ++ 7 files changed, 72 insertions(+), 26 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index 0fafdce5d9e..bfc2a75ecb7 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -403,7 +403,9 @@ def make_image_pil(*args, **kwargs): return to_pil_image(make_image(*args, **kwargs)) -def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int | Sequence[int] = 4, dtype=None, device='cpu'): +def make_keypoints( + canvas_size: Tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device='cpu' +) -> tv_tensors.KeyPoints: """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] @@ -411,7 +413,8 @@ def make_keypoints(canvas_size: Tuple[int, int] = DEFAULT_SIZE, num_points: int y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) - return tv_tensors.KeyPoints(points, canvas_size=canvas_size) + keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size) + return keypoints def make_bounding_boxes( diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 35a0befb896..229ef58b0d8 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -329,7 +329,7 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), - keypoints=make_keypoints(), keypoints_degenerate=tv_tensors.KeyPoints( + keypoints=make_keypoints(canvas_size=size), keypoints_degenerate=tv_tensors.KeyPoints( [ [0, 1], # left edge [1, 0], # top edge @@ -2351,8 +2351,9 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), - tv_tensors.KeyPoints(torch.rand(4, 2), canvas_size=(12, 12)) + tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)) ): + print(type(input_with_bad_type), cutmix_mixup) with pytest.raises(ValueError, match="does not support PIL images, "): cutmix_mixup(input_with_bad_type) @@ -2760,8 +2761,9 @@ def test_functional_signature(self, kernel, input_type): check_functional_kernel_signature_match(F.elastic, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( - "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + "make_input", [ + make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints + ], ) def test_displacement_error(self, make_input): input = make_input() @@ -2773,8 +2775,10 @@ def test_displacement_error(self, make_input): F.elastic(input, displacement=torch.rand(F.get_size(input))) @pytest.mark.parametrize( - "make_input", - [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], + "make_input", [ + make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, + make_keypoints + ], ) # ElasticTransform needs larger images to avoid the needed internal padding being larger than the actual image @pytest.mark.parametrize("size", [(163, 163), (72, 333), (313, 95)]) diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index cda255d0173..813a3cd93e6 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -26,7 +26,7 @@ ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.Mask), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), - ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints), True), + ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.KeyPoints,), True), ((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), False), ((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask, tv_tensors.KeyPoints), False), ((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), False), diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 93d4ba45d65..4237eaba425 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -156,7 +156,7 @@ def forward(self, *inputs): flat_inputs, spec = tree_flatten(inputs) needs_transform_list = self._needs_transform_list(flat_inputs) - if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask): + if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints): raise ValueError(f"{type(self).__name__}() does not support PIL images, bounding boxes and masks.") labels = self._labels_getter(inputs) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 4e6e76418ec..91258829f3b 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -205,6 +205,7 @@ def query_size(flat_inputs: List[Any]) -> Tuple[int, int]: tv_tensors.Video, tv_tensors.Mask, tv_tensors.BoundingBoxes, + tv_tensors.KeyPoints, ), ) } diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 792965433f1..a27a6b158ae 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -65,9 +65,15 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) +def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): + kp[0] = kp[0].sub_(canvas_size[1]).neg_() + return kp + + @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def horizontal_flip_keypoints(kp: tv_tensors.KeyPoints): - return kp.sub_(kp.canvas_size[1]).neg_() +def _horizontal_flip_keypoints_dispatch(kp: tv_tensors.KeyPoints): + out = horizontal_flip_keypoints(kp.as_subclass(torch.Tensor), canvas_size=kp.canvas_size) + return tv_tensors.wrap(out, like=kp) def horizontal_flip_bounding_boxes( @@ -127,9 +133,10 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) -@_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - return kp.sub_(kp.canvas_size[1]).neg_() + kp[1] = kp[1].sub_(kp.canvas_size[0]).neg_() + return kp def vertical_flip_bounding_boxes( @@ -850,9 +857,9 @@ def _affine_keypoints_with_expand( return keypoints, canvas_size -@_register_kernel_internal(affine, tv_tensors.KeyPoints) def affine_keypoints( - keypoints: tv_tensors.KeyPoints, + keypoints: torch.Tensor, + canvas_size: Tuple[int, int], angle: Union[int, float], translate: List[float], scale: float, @@ -860,8 +867,8 @@ def affine_keypoints( center: Optional[List[float]] = None, ): return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), - canvas_size=keypoints.canvas_size, + keypoints=keypoints, + canvas_size=canvas_size, angle=angle, translate=translate, scale=scale, @@ -871,6 +878,28 @@ def affine_keypoints( ) +@_register_kernel_internal(affine, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _affine_keypoints_dispatch( + inpt: tv_tensors.BoundingBoxes, + angle: Union[int, float], + translate: List[float], + scale: float, + shear: List[float], + center: Optional[List[float]] = None, + **kwargs, +) -> tv_tensors.BoundingBoxes: + output, canvas_size = affine_keypoints( + inpt.as_subclass(torch.Tensor), + canvas_size=inpt.canvas_size, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) + + def _affine_bounding_boxes_with_expand( bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, @@ -1188,7 +1217,7 @@ def rotate_keypoints( ) -@_register_kernel_internal(rotate, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) +@_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: @@ -1475,8 +1504,8 @@ def pad_keypoints( return clamp_keypoints(keypoints + pad, canvas_size), canvas_size -@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensors_wrapper=False) -def _pad( +@_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _pad_keypoints_dispatch( keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( @@ -1589,7 +1618,7 @@ def crop_keypoints( @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def crop_keypoints_dispatch( +def _crop_keypoints_dispatch( inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int ) -> tv_tensors.KeyPoints: out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) @@ -1786,11 +1815,12 @@ def perspectice_keypoints( denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] if denom == 0: raise RuntimeError( - f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform bounding boxes. " + f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform keypoints. " f"Denominator is zero, denom={denom}" ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) + kp = torch.cat([kp, torch.ones(kp.shape[0], 1, device=kp.device)], dim=-1) numer_points = torch.matmul(kp, theta1.T) denom_points = torch.matmul(kp, theta2.T) @@ -1892,7 +1922,7 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, / + perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, ) -> Tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, @@ -2099,6 +2129,9 @@ def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displaceme index_xy = kp.to(dtype=torch.long) index_x, index_y = index_xy[:, 0], index_xy[:, 1] + # Unlike bounding boxes, this may not work well. + index_x.clamp_(0, inv_grid.shape[2] - 1) + index_y.clamp_(0, inv_grid.shape[1] - 1) t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) @@ -2457,8 +2490,8 @@ def resized_crop_keypoints( def _resized_crop_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): - out, canvas_size = resized_crop_bounding_boxes( - inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size + out, canvas_size = resized_crop_keypoints( + inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size ) return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 2a29d87d6d7..a7f528ecc3e 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -121,6 +121,11 @@ def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> List[int] return list(bounding_box.canvas_size) +@_register_kernel_internal(get_size, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> List[int]: + return list(keypoints.canvas_size) + + def get_num_frames(inpt: torch.Tensor) -> int: if torch.jit.is_scripting(): return get_num_frames_video(inpt) From 5f4b18825fcfaa0499023d51f15be8dd6c6823c4 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 11:48:48 +0100 Subject: [PATCH 07/27] Improved documentation to take KeyPoints into account --- docs/source/tv_tensors.rst | 1 + gallery/transforms/plot_tv_tensors.py | 11 ++++++++++- torchvision/transforms/v2/functional/_meta.py | 10 +++++++++- torchvision/tv_tensors/_keypoints.py | 5 ++++- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/docs/source/tv_tensors.rst b/docs/source/tv_tensors.rst index cb8a3c45fa9..d292012fdf8 100644 --- a/docs/source/tv_tensors.rst +++ b/docs/source/tv_tensors.rst @@ -21,6 +21,7 @@ info. Image Video + KeyPoints BoundingBoxFormat BoundingBoxes Mask diff --git a/gallery/transforms/plot_tv_tensors.py b/gallery/transforms/plot_tv_tensors.py index 5bce37aa374..2c6ebbf9031 100644 --- a/gallery/transforms/plot_tv_tensors.py +++ b/gallery/transforms/plot_tv_tensors.py @@ -46,11 +46,12 @@ # Under the hood, they are needed in :mod:`torchvision.transforms.v2` to correctly dispatch to the appropriate function # for the input data. # -# :mod:`torchvision.tv_tensors` supports four types of TVTensors: +# :mod:`torchvision.tv_tensors` supports five types of TVTensors: # # * :class:`~torchvision.tv_tensors.Image` # * :class:`~torchvision.tv_tensors.Video` # * :class:`~torchvision.tv_tensors.BoundingBoxes` +# * :class:`~torchvision.tv_tensors.KeyPoints` # * :class:`~torchvision.tv_tensors.Mask` # # What can I do with a TVTensor? @@ -96,6 +97,7 @@ # :class:`~torchvision.tv_tensors.BoundingBoxes` requires the coordinate format as well as the size of the # corresponding image (``canvas_size``) alongside the actual values. These # metadata are required to properly transform the bounding boxes. +# In a similar fashion, :class:`~torchvision.tv_tensors.KeyPoints` also require the ``canvas_size`` metadata to be added. bboxes = tv_tensors.BoundingBoxes( [[17, 16, 344, 495], [0, 10, 0, 10]], @@ -104,6 +106,13 @@ ) print(bboxes) + +keypoints = tv_tensors.KeyPoints( + [[17, 16], [344, 495], [0, 10], [0, 10]], + canvas_size=image.shape[-2:] +) +print(keypoints) + # %% # Using ``tv_tensors.wrap()`` # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index a7f528ecc3e..1a9692caa22 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -182,10 +182,18 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: - return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2) + return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: + """Converts a set of bounding boxes to its edge points. + + Args: + bounding_boxes (tv_tensors.BoundingBoxes): A set of ``N`` bounding boxes (of shape ``[N, 4]``) + + Returns: + tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` + """ bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index e4e5e307511..79997d004fd 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -13,6 +13,9 @@ class KeyPoints(TVTensor): Each point is represented by its XY coordinates. + KeyPoints can be converted from :class:`torchvision.tv_tensors.BoundingBoxes` + by :func:`torchvision.transforms.v2.functional.convert_box_to_points`. + Args: data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. canvas_size (two-tuple of ints): Height and width of the corresponding image or video. @@ -47,7 +50,7 @@ def __new__( if TYPE_CHECKING: # EVIL: Just so that MYPY+PYLANCE+others stop shouting that everything is wrong when initializeing the TVTensor # Not read or defined at Runtime (only at linting time). - # TODO: Add this to all TVTensors + # TODO: BOUNDING BOXES needs something similar def __init__( self, data: Any, From cabce1ca1514c1427999c74a657429713c6d586c Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 11:51:54 +0100 Subject: [PATCH 08/27] Applied ufmt check --- torchvision/transforms/v2/functional/_geometry.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a27a6b158ae..135ee3be8bb 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -1922,7 +1922,10 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], dtype: torch.dtype, device: torch.device, denom: float, + perspective_coeffs: List[float], + dtype: torch.dtype, + device: torch.device, + denom: float, ) -> Tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, From d1b27ad96a1e05f99593dd6320abbb52ee426602 Mon Sep 17 00:00:00 2001 From: SCHOEPP Alexandre Date: Tue, 17 Dec 2024 14:52:16 +0100 Subject: [PATCH 09/27] Fixed the hflip not being along the right coordinate --- torchvision/transforms/v2/functional/_geometry.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 135ee3be8bb..7e2c8785692 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -66,7 +66,7 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): - kp[0] = kp[0].sub_(canvas_size[1]).neg_() + kp[..., 0] = kp[..., 0].sub_(canvas_size[1]).neg_() return kp @@ -135,7 +135,7 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: @_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - kp[1] = kp[1].sub_(kp.canvas_size[0]).neg_() + kp[..., 1] = kp[..., 1].sub_(kp.canvas_size[0]).neg_() return kp @@ -363,8 +363,8 @@ def resize_keypoints( w_ratio = new_width / old_width h_ratio = new_height / old_height - ratios = torch.tensor([w_ratio, h_ratio]) - kp.data = kp.data.mul(ratios).to(kp.dtype) + ratios = torch.tensor([w_ratio, h_ratio], device=kp.device) + kp = kp.mul(ratios).to(kp.dtype) return kp, (new_height, new_width) @@ -880,14 +880,14 @@ def affine_keypoints( @_register_kernel_internal(affine, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _affine_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, + inpt: tv_tensors.KeyPoints, angle: Union[int, float], translate: List[float], scale: float, shear: List[float], center: Optional[List[float]] = None, **kwargs, -) -> tv_tensors.BoundingBoxes: +) -> tv_tensors.KeyPoints: output, canvas_size = affine_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, @@ -2490,7 +2490,7 @@ def resized_crop_keypoints( @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _resized_crop_dispatch( +def _resized_crop_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): out, canvas_size = resized_crop_keypoints( From 5a8c5b422b26374203369bf39452f32f5817b834 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:36:58 +0200 Subject: [PATCH 10/27] Fixed order of arguments --- torchvision/tv_tensors/_keypoints.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 79997d004fd..5e184053c0a 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -33,10 +33,10 @@ def __new__( cls, data: Any, *, + canvas_size: Tuple[int, int], dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - canvas_size: Tuple[int, int], ): tensor: torch.Tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad) if tensor.ndim == 1: @@ -55,10 +55,10 @@ def __init__( self, data: Any, *, + canvas_size: Tuple[int, int], dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - canvas_size: Tuple[int, int], ): ... From dea31e266a8ce114d6815b611c555cfae59789ed Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:41:18 +0200 Subject: [PATCH 11/27] Reworked logic of the conditions to better handle mutable/non mutable sequences in wrap_output --- torchvision/tv_tensors/_keypoints.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 5e184053c0a..e00c58d5134 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -78,14 +78,16 @@ def _wrap_output( if isinstance(output, torch.Tensor) and not isinstance(output, KeyPoints): output = KeyPoints(output, canvas_size=canvas_size) - elif isinstance(output, tuple): - # NB: output is checked against sequence because it has already been checked against Tensor - # Since a Tensor is a sequence of Tensor, had it not been the case, we may have had silent - # or complex errors - output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) elif isinstance(output, MutableSequence): + # For lists and list-like object we don't try to create a new object, we just set the values in the list + # This allows us to conserve the type of complex list-like object that may not follow the initialization API of lists for i, part in enumerate(output): output[i] = KeyPoints(part, canvas_size=canvas_size) + elif isinstance(output, Sequence): + # Non-mutable sequences handled here (like tuples) + # Every sequence that is not a mutable sequence is a non-mutable sequence + # We have to use a tuple here, since we know its initialization api, unlike for `output` + output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) return output def __repr__(self, *, tensor_contents: Any = None) -> str: From 71e20a540ddac6f6161006b98d356fedf72e758b Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:43:52 +0200 Subject: [PATCH 12/27] Renamed out variable to be more similar with _resized_crop_bounding_boxes_dispatch --- torchvision/transforms/v2/functional/_geometry.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 7e2c8785692..488e9c70473 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2493,10 +2493,10 @@ def resized_crop_keypoints( def _resized_crop_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs ): - out, canvas_size = resized_crop_keypoints( + output, canvas_size = resized_crop_keypoints( inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size ) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def resized_crop_bounding_boxes( From 2f77527a3c87d28f1dea689db202cb4070445ea4 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 19:45:16 +0200 Subject: [PATCH 13/27] renamed _xyxy_to_points to _xyxy_to_keypoints for consistency --- torchvision/transforms/v2/functional/_meta.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index a9f6ed1b782..d45d2a534f0 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -181,8 +181,7 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: return xyxy - -def _xyxy_to_points(bounding_boxes: torch.Tensor) -> torch.Tensor: +def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] @@ -201,7 +200,7 @@ def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensor new_format=BoundingBoxFormat.XYXY, inplace=False, ) - return tv_tensors.KeyPoints(_xyxy_to_points(bbox), canvas_size=bounding_boxes.canvas_size) + return tv_tensors.KeyPoints(_xyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) def _cxcywhr_to_xywhr(cxcywhr: torch.Tensor, inplace: bool) -> torch.Tensor: From 517a6dedb5730177d582ee815265d9012bb71b14 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:12:07 +0200 Subject: [PATCH 14/27] clarified _xyxy_to_points and changed the name of its caller for the sake of consistency with the other bounding_boxes converters in _meta.py --- test/test_transforms_v2.py | 20 +++++++++++++++++++ .../transforms/v2/functional/__init__.py | 2 +- torchvision/transforms/v2/functional/_meta.py | 3 ++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index aaced22dc4f..e77489c6206 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6260,3 +6260,23 @@ def test_different_sizes(self, make_input1, make_input2, query): def test_no_valid_input(self, query): with pytest.raises(TypeError, match="No image"): query(["blah"]) + + @pytest.mark.parametrize( + 'boxes', [ + tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4)) + ] + ) + def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): + # TODO: this test can't handle rotated boxes yet + kp = F.convert_bounding_boxes_to_points(boxes) + assert kp.shape == boxes.shape + (2, ) + assert kp.dtype == boxes.dtype + # kp is a list of A, B, C, D polygons. + # If we use A | C, we should get back the XYXY format of bounding box + reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes( + reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size + ), new_format=boxes.format + ) + assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index ec649759a72..e32ef73f7c1 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -5,7 +5,7 @@ from ._meta import ( clamp_bounding_boxes, convert_bounding_box_format, - convert_box_to_points, + convert_bounding_boxes_to_points, get_dimensions_image, get_dimensions_video, get_dimensions, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index d45d2a534f0..e24e3817be4 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -185,7 +185,7 @@ def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] -def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: +def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Converts a set of bounding boxes to its edge points. Args: @@ -194,6 +194,7 @@ def convert_box_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensor Returns: tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` """ + # TODO: support rotated BBOX bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, From 63ed4a5a8b041b534c54481fd0cc7b3407bde802 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:13:44 +0200 Subject: [PATCH 15/27] Renamed half_point to more explicit single_coord_shape --- test/common_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index eca7997c102..d01d365a21a 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -409,9 +409,9 @@ def make_keypoints( """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] - half_point: Tuple[int, ...] = tuple(num_points) + (1,) - y = torch.randint(0, canvas_size[0] - 1, half_point, dtype=dtype, device=device) - x = torch.randint(0, canvas_size[1] - 1, half_point, dtype=dtype, device=device) + single_coord_shape: Tuple[int, ...] = tuple(num_points) + (1,) + y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device) + x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) keypoints = tv_tensors.KeyPoints(points, canvas_size=canvas_size) return keypoints From 166c1ecc2fbcf5e720486ff52de3d3fb5e922f34 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 20:48:26 +0200 Subject: [PATCH 16/27] Integrated KeyPoints better in the transforms. It now warns alongside BoundingBoxes and Masks in RandomErasing, AutoAugment, FiveCrop, and SanitizeBoundingBoxes now can handle them and its documentation now states that the underlying logic relies on masks and keypoints being a given shape --- test/test_transforms_v2.py | 2 +- torchvision/transforms/v2/_augment.py | 2 +- torchvision/transforms/v2/_auto_augment.py | 3 ++- torchvision/transforms/v2/_geometry.py | 4 +-- torchvision/transforms/v2/_misc.py | 31 +++++++++++++--------- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index e77489c6206..f501d9b7619 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -3487,7 +3487,7 @@ def _sample_input_adapter(self, transform, input, device): adapted_input = {} image_or_video_found = False for key, value in input.items(): - if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): # AA transforms don't support bounding boxes or masks continue elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)): diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index 4237eaba425..3a4cc8ee29d 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -89,7 +89,7 @@ def __init__( self._log_ratio = torch.log(torch.tensor(self.ratio)) def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." diff --git a/torchvision/transforms/v2/_auto_augment.py b/torchvision/transforms/v2/_auto_augment.py index 4dd7ba343aa..1ff32629f38 100644 --- a/torchvision/transforms/v2/_auto_augment.py +++ b/torchvision/transforms/v2/_auto_augment.py @@ -46,7 +46,7 @@ def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, def _flatten_and_extract_image_or_video( self, inputs: Any, - unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask), + unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), ) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]: flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0]) needs_transform_list = self._needs_transform_list(flat_inputs) @@ -194,6 +194,7 @@ class AutoAugment(_AutoAugmentBase): fill (sequence or number, optional): Pixel fill value for the area outside the transformed image. If given a number, the value is used for all bands respectively. """ + _v1_transform_cls = _transforms.AutoAugment _AUGMENTATION_SPACE = { diff --git a/torchvision/transforms/v2/_geometry.py b/torchvision/transforms/v2/_geometry.py index c615515b943..02d8a98a7a9 100644 --- a/torchvision/transforms/v2/_geometry.py +++ b/torchvision/transforms/v2/_geometry.py @@ -356,7 +356,7 @@ def __init__(self, size: Union[int, Sequence[int]]) -> None: self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." @@ -401,7 +401,7 @@ def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) self.vertical_flip = vertical_flip def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any: - if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)): + if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints, tv_tensors.Mask)): warnings.warn( f"{type(self).__name__}() is currently passing through inputs of type " f"tv_tensors.{type(inpt).__name__}. This will likely change in the future." diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index f0b83c14bd8..b389db0c7d4 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -9,15 +9,7 @@ from torchvision import transforms as _transforms, tv_tensors from torchvision.transforms.v2 import functional as F, Transform -from ._utils import ( - _parse_labels_getter, - _setup_number_or_seq, - _setup_size, - get_all_keypoints, - get_bounding_boxes, - has_any, - is_pure_tensor, -) +from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor # TODO: do we want/need to expose this? @@ -348,9 +340,9 @@ def transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class SanitizeBoundingBoxes(Transform): - """Remove degenerate/invalid bounding boxes and their corresponding labels and masks. + """Remove degenerate/invalid bounding boxes and their corresponding labels, masks and keypoints. - This transform removes bounding boxes and their associated labels/masks that: + This transform removes bounding boxes and their associated labels, masks and keypoints that: - are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1. - have any coordinate outside of their corresponding image. You may want to @@ -366,6 +358,14 @@ class SanitizeBoundingBoxes(Transform): may modify bounding boxes but once at the end should be enough in most cases. + .. note:: + This transform requires that any :class:`~torchvision.tv_tensor.KeyPoints` or + :class:`~torchvision.tv_tensor.Mask` provided has to match the bounding boxes in shape. + + If the bounding boxes are of shape ``[N, K]``, then the + KeyPoints have to be of shape ``[N, ..., 2]`` or ``[N, 2]`` + and the masks have to be of shape ``[N, ..., H, W]`` or ``[N, H, W]`` + Args: min_size (float, optional): The size below which bounding boxes are removed. Default is 1. min_area (float, optional): The area below which bounding boxes are removed. Default is 1. @@ -445,10 +445,15 @@ def forward(self, *inputs: Any) -> Any: return tree_unflatten(flat_outputs, spec) def transform(self, inpt: Any, params: Dict[str, Any]) -> Any: + # For every object in the flattened input of the `forward` method, we apply transform + # The params contain the list of valid indices of the (N, K) bbox set + + # We suppose here that any KeyPoints or Masks TVTensors is of shape (N, ..., 2) and (N, ..., H, W) respectively + # TODO: check this. is_label = params["labels"] is not None and any(inpt is label for label in params["labels"]) - is_bounding_boxes_or_mask = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)) + is_bbox_mask_or_kpoints = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints)) - if not (is_label or is_bounding_boxes_or_mask): + if not (is_label or is_bbox_mask_or_kpoints): return inpt output = inpt[params["valid"]] From 1cc3b6fb80a15a12d8b2a7172f2449472cac35c1 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Wed, 30 Apr 2025 21:08:26 +0200 Subject: [PATCH 17/27] Fixed _geometry.py post botched merge request --- .../transforms/v2/functional/_geometry.py | 148 +++++++++--------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index c747a9af80c..a74a211b9e7 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -66,15 +66,15 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: return horizontal_flip_image(mask) -def horizontal_flip_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int]): - kp[..., 0] = kp[..., 0].sub_(canvas_size[1]).neg_() - return kp +def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]): + keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() + return keypoints @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _horizontal_flip_keypoints_dispatch(kp: tv_tensors.KeyPoints): - out = horizontal_flip_keypoints(kp.as_subclass(torch.Tensor), canvas_size=kp.canvas_size) - return tv_tensors.wrap(out, like=kp) +def _horizontal_flip_keypoints_dispatch(keypoints: tv_tensors.KeyPoints): + out = horizontal_flip_keypoints(keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size) + return tv_tensors.wrap(out, like=keypoints) def horizontal_flip_bounding_boxes( @@ -135,9 +135,9 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: @_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def vertical_flip_keypoints(kp: tv_tensors.KeyPoints): - kp[..., 1] = kp[..., 1].sub_(kp.canvas_size[0]).neg_() - return kp +def vertical_flip_keypoints(keypoints: tv_tensors.KeyPoints): + keypoints[..., 1] = keypoints[..., 1].sub_(keypoints.canvas_size[0]).neg_() + return keypoints def vertical_flip_bounding_boxes( @@ -352,9 +352,9 @@ def _resize_mask_dispatch( def resize_keypoints( - kp: torch.Tensor, - size: Optional[List[int]], - canvas_size: Tuple[int, int], + keypoints: torch.Tensor, + size: Optional[list[int]], + canvas_size: tuple[int, int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, @@ -364,29 +364,29 @@ def resize_keypoints( w_ratio = new_width / old_width h_ratio = new_height / old_height - ratios = torch.tensor([w_ratio, h_ratio], device=kp.device) - kp = kp.mul(ratios).to(kp.dtype) + ratios = torch.tensor([w_ratio, h_ratio], device=keypoints.device) + keypoints = keypoints.mul(ratios).to(keypoints.dtype) - return kp, (new_height, new_width) + return keypoints, (new_height, new_width) @_register_kernel_internal(resize, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resize_keypoints_dispatch( - kp: tv_tensors.KeyPoints, - size: Optional[List[int]], + keypoints: tv_tensors.KeyPoints, + size: Optional[list[int]], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( - kp.as_subclass(torch.Tensor), + keypoints.as_subclass(torch.Tensor), size, - canvas_size=kp.canvas_size, + canvas_size=keypoints.canvas_size, interpolation=interpolation, max_size=max_size, antialias=antialias, ) - return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) def resize_bounding_boxes( @@ -816,14 +816,14 @@ def _affine_image_pil( def _affine_keypoints_with_expand( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, expand: bool = False, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: if keypoints.numel() == 0: return keypoints, canvas_size @@ -860,12 +860,12 @@ def _affine_keypoints_with_expand( def affine_keypoints( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, ): return _affine_keypoints_with_expand( keypoints=keypoints, @@ -883,10 +883,10 @@ def affine_keypoints( def _affine_keypoints_dispatch( inpt: tv_tensors.KeyPoints, angle: Union[int, float], - translate: List[float], + translate: list[float], scale: float, - shear: List[float], - center: Optional[List[float]] = None, + shear: list[float], + center: Optional[list[float]] = None, **kwargs, ) -> tv_tensors.KeyPoints: output, canvas_size = affine_keypoints( @@ -1203,9 +1203,9 @@ def rotate_keypoints( angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, expand: bool = False, - center: Optional[List[float]] = None, + center: Optional[list[float]] = None, fill: _FillTypeJIT = None, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: return _affine_keypoints_with_expand( keypoints=keypoints.as_subclass(torch.Tensor), canvas_size=keypoints.canvas_size, @@ -1220,10 +1220,10 @@ def rotate_keypoints( @_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( - kp: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs + keypoints: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(kp, angle, center=center, expand=expand, **kwargs) - return tv_tensors.wrap(out, like=kp, canvas_size=canvas_size) + out, canvas_size = rotate_keypoints(keypoints, angle, center=center, expand=expand, **kwargs) + return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) def rotate_bounding_boxes( @@ -1490,7 +1490,7 @@ def pad_mask( def pad_keypoints( - keypoints: torch.Tensor, canvas_size: Tuple[int, int], padding: List[int], padding_mode: str = "constant" + keypoints: torch.Tensor, canvas_size: tuple[int, int], padding: list[int], padding_mode: str = "constant" ): SUPPORTED_MODES = ["constant"] if padding_mode not in SUPPORTED_MODES: @@ -1507,7 +1507,7 @@ def pad_keypoints( @_register_kernel_internal(pad, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _pad_keypoints_dispatch( - keypoints: tv_tensors.KeyPoints, padding: List[int], padding_mode: str = "constant", **kwargs + keypoints: tv_tensors.KeyPoints, padding: list[int], padding_mode: str = "constant", **kwargs ) -> tv_tensors.KeyPoints: output, canvas_size = pad_keypoints( keypoints.as_subclass(torch.Tensor), @@ -1605,17 +1605,17 @@ def crop_image(image: torch.Tensor, top: int, left: int, height: int, width: int def crop_keypoints( - kp: torch.Tensor, + keypoints: torch.Tensor, top: int, left: int, height: int, width: int, -) -> Tuple[torch.Tensor, Tuple[int, int]]: +) -> tuple[torch.Tensor, tuple[int, int]]: - kp.sub_(torch.tensor([left, top], dtype=kp.dtype, device=kp.device)) + keypoints.sub_(torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device)) canvas_size = (height, width) - return clamp_keypoints(kp, canvas_size=canvas_size), canvas_size + return clamp_keypoints(keypoints, canvas_size=canvas_size), canvas_size @_register_kernel_internal(crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @@ -1800,16 +1800,16 @@ def _perspective_image_pil( def perspectice_keypoints( - kp: torch.Tensor, - canvas_size: Tuple[int, int], - startpoints: Optional[List[List[int]]], - endpoints: Optional[List[List[int]]], - coefficients: Optional[List[float]] = None, + keypoints: torch.Tensor, + canvas_size: tuple[int, int], + startpoints: Optional[list[list[int]]], + endpoints: Optional[list[list[int]]], + coefficients: Optional[list[float]] = None, ): - if kp.numel() == 0: - return kp - dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 - device = kp.device + if keypoints.numel() == 0: + return keypoints + dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 + device = keypoints.device perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) @@ -1821,10 +1821,10 @@ def perspectice_keypoints( ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) - kp = torch.cat([kp, torch.ones(kp.shape[0], 1, device=kp.device)], dim=-1) + keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) - numer_points = torch.matmul(kp, theta1.T) - denom_points = torch.matmul(kp, theta2.T) + numer_points = torch.matmul(keypoints, theta1.T) + denom_points = torch.matmul(keypoints, theta2.T) transformed_points = numer_points.div_(denom_points) return clamp_keypoints(transformed_points, canvas_size) @@ -1832,9 +1832,9 @@ def perspectice_keypoints( @_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _perspective_keypoints_dispatch( inpt: tv_tensors.BoundingBoxes, - startpoints: Optional[List[List[int]]], - endpoints: Optional[List[List[int]]], - coefficients: Optional[List[float]] = None, + startpoints: Optional[list[list[int]]], + endpoints: Optional[list[list[int]]], + coefficients: Optional[list[float]] = None, **kwargs, ) -> tv_tensors.BoundingBoxes: output = perspectice_keypoints( @@ -1923,11 +1923,11 @@ def perspective_bounding_boxes( def _compute_perspective_thetas( - perspective_coeffs: List[float], + perspective_coeffs: list[float], dtype: torch.dtype, device: torch.device, denom: float, -) -> Tuple[torch.Tensor, torch.Tensor]: +) -> tuple[torch.Tensor, torch.Tensor]: inv_coeffs = [ (perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom, (-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom, @@ -2112,18 +2112,18 @@ def _create_identity_grid(size: tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: +def elastic_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") elif displacement.shape != expected_shape: raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}") - if kp.numel() == 0: - return kp + if keypoints.numel() == 0: + return keypoints - device = kp.device - dtype = kp.dtype if torch.is_floating_point(kp) else torch.float32 + device = keypoints.device + dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 if displacement.dtype != dtype or displacement.device != device: displacement = displacement.to(dtype=dtype, device=device) @@ -2131,7 +2131,7 @@ def elastic_keypoints(kp: torch.Tensor, canvas_size: Tuple[int, int], displaceme id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) inv_grid = id_grid.sub_(displacement) - index_xy = kp.to(dtype=torch.long) + index_xy = keypoints.to(dtype=torch.long) index_x, index_y = index_xy[:, 0], index_xy[:, 1] # Unlike bounding boxes, this may not work well. index_x.clamp_(0, inv_grid.shape[2] - 1) @@ -2329,14 +2329,14 @@ def _center_crop_image_pil(image: PIL.Image.Image, output_size: list[int]) -> PI return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width) -def center_crop_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int], output_size: List[int]): +def center_crop_keypoints(inpt: torch.Tensor, canvas_size: tuple[int, int], output_size: list[int]): crop_height, crop_width = _center_crop_parse_output_size(output_size) crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size) return crop_keypoints(inpt, top=crop_top, left=crop_left, height=crop_height, width=crop_width) @_register_kernel_internal(center_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: List[int]) -> tv_tensors.KeyPoints: +def _center_crop_keypoints_dispatch(inpt: tv_tensors.KeyPoints, output_size: list[int]) -> tv_tensors.KeyPoints: output, canvas_size = center_crop_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, output_size=output_size ) @@ -2479,20 +2479,20 @@ def _resized_crop_image_pil_dispatch( def resized_crop_keypoints( - kp: torch.Tensor, + keypoints: torch.Tensor, top: int, left: int, height: int, width: int, - size: List[int], -) -> Tuple[torch.Tensor, Tuple[int, int]]: - kp, canvas_size = crop_keypoints(kp, top, left, height, width) - return resize_keypoints(kp, size=size, canvas_size=canvas_size) + size: list[int], +) -> tuple[torch.Tensor, tuple[int, int]]: + keypoints, canvas_size = crop_keypoints(keypoints, top, left, height, width) + return resize_keypoints(keypoints, size=size, canvas_size=canvas_size) @_register_kernel_internal(resized_crop, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _resized_crop_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs + inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: list[int], **kwargs ): output, canvas_size = resized_crop_keypoints( inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size From 841de7701955249951b4facddd80982ca23a7e3a Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sat, 3 May 2025 16:37:51 -0700 Subject: [PATCH 18/27] Review python 3.9 type hint and lint --- test/common_utils.py | 6 +- test/test_transforms_v2.py | 50 ++++++----- test/test_transforms_v2_utils.py | 6 +- test/test_tv_tensors.py | 84 ++++++++++++------- torchvision/transforms/v2/_utils.py | 4 +- .../transforms/v2/functional/_geometry.py | 4 +- torchvision/transforms/v2/functional/_meta.py | 2 +- torchvision/transforms/v2/functional/_misc.py | 8 +- 8 files changed, 103 insertions(+), 61 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index bf0fe92ae3e..600cb5a13b7 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -8,8 +8,8 @@ import shutil import sys import tempfile -from typing import Sequence, Tuple import warnings +from collections.abc import Sequence from subprocess import CalledProcessError, check_output, STDOUT import numpy as np @@ -402,12 +402,12 @@ def make_image_pil(*args, **kwargs): def make_keypoints( - canvas_size: Tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device='cpu' + canvas_size: tuple[int, int] = DEFAULT_SIZE, *, num_points: int | Sequence[int] = 4, dtype=None, device="cpu" ) -> tv_tensors.KeyPoints: """Make the KeyPoints for testing purposes""" if isinstance(num_points, int): num_points = [num_points] - single_coord_shape: Tuple[int, ...] = tuple(num_points) + (1,) + single_coord_shape: tuple[int, ...] = tuple(num_points) + (1,) y = torch.randint(0, canvas_size[0] - 1, single_coord_shape, dtype=dtype, device=device) x = torch.randint(0, canvas_size[1] - 1, single_coord_shape, dtype=dtype, device=device) points = torch.cat((x, y), dim=-1) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 701d668b6f2..2cc72db15ea 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -231,10 +231,7 @@ def check_functional_kernel_signature_match(functional, *, kernel, input_type): if issubclass(input_type, tv_tensors.TVTensor): # We filter out metadata that is implicitly passed to the functional through the input tv_tensor, but has to be # explicitly passed to the kernel. - explicit_metadata = { - tv_tensors.BoundingBoxes: {"format", "canvas_size"}, - tv_tensors.KeyPoints: {"canvas_size"} - } + explicit_metadata = {tv_tensors.BoundingBoxes: {"format", "canvas_size"}, tv_tensors.KeyPoints: {"canvas_size"}} kernel_params = [param for param in kernel_params if param.name not in explicit_metadata.get(input_type, set())] functional_params = iter(functional_params) @@ -338,7 +335,8 @@ def _make_transform_sample(transform, *, image_or_video, adapter): canvas_size=size, device=device, ), - keypoints=make_keypoints(canvas_size=size), keypoints_degenerate=tv_tensors.KeyPoints( + keypoints=make_keypoints(canvas_size=size), + keypoints_degenerate=tv_tensors.KeyPoints( [ [0, 1], # left edge [1, 0], # top edge @@ -347,8 +345,10 @@ def _make_transform_sample(transform, *, image_or_video, adapter): [size[1], 0], # top right corner [1, size[0]], # bottom edge [0, size[0]], # bottom left corner - [size[1], size[0]] # bottom right corner - ], canvas_size=size, device=device + [size[1], size[0]], # bottom right corner + ], + canvas_size=size, + device=device, ), detection_mask=make_detection_masks(size, device=device), segmentation_mask=make_segmentation_mask(size, device=device), @@ -2362,7 +2362,7 @@ def test_error(self, T): F.to_pil_image(imgs[0]), tv_tensors.Mask(torch.rand(12, 12)), tv_tensors.BoundingBoxes(torch.rand(2, 4), format="XYXY", canvas_size=12), - tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)) + tv_tensors.KeyPoints(torch.rand(2, 2), canvas_size=(12, 12)), ): print(type(input_with_bad_type), cutmix_mixup) with pytest.raises(ValueError, match="does not support PIL images, "): @@ -2772,8 +2772,15 @@ def test_functional_signature(self, kernel, input_type): check_functional_kernel_signature_match(F.elastic, kernel=kernel, input_type=input_type) @pytest.mark.parametrize( - "make_input", [ - make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, ], ) def test_displacement_error(self, make_input): @@ -2786,9 +2793,15 @@ def test_displacement_error(self, make_input): F.elastic(input, displacement=torch.rand(F.get_size(input))) @pytest.mark.parametrize( - "make_input", [ - make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video, - make_keypoints + "make_input", + [ + make_image_tensor, + make_image_pil, + make_image, + make_bounding_boxes, + make_segmentation_mask, + make_video, + make_keypoints, ], ) # ElasticTransform needs larger images to avoid the needed internal padding being larger than the actual image @@ -6297,21 +6310,18 @@ def test_no_valid_input(self, query): query(["blah"]) @pytest.mark.parametrize( - 'boxes', [ - tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4)) - ] + "boxes", [tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))] ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): # TODO: this test can't handle rotated boxes yet kp = F.convert_bounding_boxes_to_points(boxes) - assert kp.shape == boxes.shape + (2, ) + assert kp.shape == boxes.shape + (2,) assert kp.dtype == boxes.dtype # kp is a list of A, B, C, D polygons. # If we use A | C, we should get back the XYXY format of bounding box reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes( - reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size - ), new_format=boxes.format + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), + new_format=boxes.format, ) assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" diff --git a/test/test_transforms_v2_utils.py b/test/test_transforms_v2_utils.py index 813a3cd93e6..dab6d525a38 100644 --- a/test/test_transforms_v2_utils.py +++ b/test/test_transforms_v2_utils.py @@ -68,7 +68,11 @@ def test_has_any(sample, types, expected): ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Mask, tv_tensors.KeyPoints), True), ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.KeyPoints), True), - ((IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), True), + ( + (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), + (tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), + True, + ), ( (IMAGE, BOUNDING_BOX, MASK, KEYPOINTS), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask, tv_tensors.KeyPoints), diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index a29c942db67..0c06bc9c929 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -2,7 +2,14 @@ import pytest import torch -from common_utils import assert_equal, make_bounding_boxes, make_image, make_keypoints, make_segmentation_mask, make_video +from common_utils import ( + assert_equal, + make_bounding_boxes, + make_image, + make_keypoints, + make_segmentation_mask, + make_video, +) from PIL import Image from torchvision import tv_tensors @@ -49,7 +56,26 @@ def test_bbox_dim_error(): tv_tensors.BoundingBoxes(data_3d, format="XYXY", canvas_size=(32, 32)) -@pytest.mark.parametrize("data", [torch.randint(0, 32, size=(5, 2)), [[0, 0,], [2, 2,]], [1, 2,]]) +@pytest.mark.parametrize( + "data", + [ + torch.randint(0, 32, size=(5, 2)), + [ + [ + 0, + 0, + ], + [ + 2, + 2, + ], + ], + [ + 1, + 2, + ], + ], +) def test_keypoints_instance(data): kpoint = tv_tensors.KeyPoints(data, canvas_size=(32, 32)) assert isinstance(kpoint, tv_tensors.KeyPoints) @@ -82,9 +108,9 @@ def test_new_requires_grad(data, input_requires_grad, expected_requires_grad): assert tv_tensor.requires_grad is expected_requires_grad -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) def test_isinstance(make_input): assert isinstance(make_input(), torch.Tensor) @@ -96,9 +122,9 @@ def test_wrapping_no_copy(): assert image.data_ptr() == tensor.data_ptr() -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) def test_to_wrapping(make_input): dp = make_input() @@ -108,9 +134,9 @@ def test_to_wrapping(make_input): assert dp_to.dtype is torch.float64 -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_to_tv_tensor_reference(make_input, return_type): tensor = torch.rand((3, 16, 16), dtype=torch.float64) @@ -124,9 +150,9 @@ def test_to_tv_tensor_reference(make_input, return_type): assert type(tensor) is torch.Tensor -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_clone_wrapping(make_input, return_type): dp = make_input() @@ -138,9 +164,9 @@ def test_clone_wrapping(make_input, return_type): assert dp_clone.data_ptr() != dp.data_ptr() -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_requires_grad__wrapping(make_input, return_type): dp = make_input(dtype=torch.float) @@ -155,9 +181,9 @@ def test_requires_grad__wrapping(make_input, return_type): assert dp_requires_grad.requires_grad -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_detach_wrapping(make_input, return_type): dp = make_input(dtype=torch.float).requires_grad_(True) @@ -212,9 +238,9 @@ def test_force_subclass_with_metadata(return_type): tv_tensors.set_return_type("tensor") -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_other_op_no_wrapping(make_input, return_type): dp = make_input() @@ -226,9 +252,9 @@ def test_other_op_no_wrapping(make_input, return_type): assert type(output) is (type(dp) if return_type == "TVTensor" else torch.Tensor) -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize( "op", [ @@ -245,9 +271,9 @@ def test_no_tensor_output_op_no_wrapping(make_input, op): assert type(output) is not type(dp) -@pytest.mark.parametrize("make_input", [ - make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints -]) +@pytest.mark.parametrize( + "make_input", [make_image, make_bounding_boxes, make_segmentation_mask, make_video, make_keypoints] +) @pytest.mark.parametrize("return_type", ["Tensor", "TVTensor"]) def test_inplace_op_no_wrapping(make_input, return_type): dp = make_input() diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 34fb8ee4170..5add3c7bc20 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -2,10 +2,10 @@ import collections.abc import numbers -from collections.abc import Sequence +from collections.abc import Iterable, Sequence from contextlib import suppress -from typing import Any, Callable, Literal, Sequence, Iterable +from typing import Any, Callable, Literal import PIL.Image import torch diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index a74a211b9e7..44eafcdb31f 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2112,7 +2112,9 @@ def _create_identity_grid(size: tuple[int, int], device: torch.device, dtype: to return base_grid -def elastic_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor) -> torch.Tensor: +def elastic_keypoints( + keypoints: torch.Tensor, canvas_size: tuple[int, int], displacement: torch.Tensor +) -> torch.Tensor: expected_shape = (1, canvas_size[0], canvas_size[1], 2) if not isinstance(displacement, torch.Tensor): raise TypeError("Argument displacement should be a Tensor") diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index bd3cbd3c699..348e14bda14 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -122,7 +122,7 @@ def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> list[int] @_register_kernel_internal(get_size, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> List[int]: +def get_size_keypoints(keypoints: tv_tensors.KeyPoints) -> list[int]: return list(keypoints.canvas_size) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index ccd750eba0f..bddd8e27721 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -329,8 +329,8 @@ def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: boo def sanitize_keypoints( - keypoints: torch.Tensor, canvas_size: Optional[Tuple[int, int]] = None -) -> Tuple[torch.Tensor, torch.Tensor]: + keypoints: torch.Tensor, canvas_size: Optional[tuple[int, int]] = None +) -> tuple[torch.Tensor, torch.Tensor]: """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. This removes the keypoints that are outside of their corresponing image. @@ -345,7 +345,7 @@ def sanitize_keypoints( Args: keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed - canvas_size (Optional[Tuple[int, int]], optional): The canvas_size of the bounding boxes + canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes (size of the corresponding image/video). Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. @@ -383,7 +383,7 @@ def sanitize_keypoints( def _get_sanitize_keypoints_mask( keypoints: torch.Tensor, - canvas_size: Tuple[int, int], + canvas_size: tuple[int, int], ) -> torch.Tensor: image_h, image_w = canvas_size x = keypoints[:, 0] From ff6ab48397ef000d0b4a95fb3afe3c6dbe19588e Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sat, 3 May 2025 16:55:53 -0700 Subject: [PATCH 19/27] Add specific keypoint tests --- test/test_transforms_v2.py | 569 +++++++++++++++++- .../transforms/v2/functional/__init__.py | 4 +- .../transforms/v2/functional/_geometry.py | 102 +++- torchvision/transforms/v2/functional/_meta.py | 36 +- 4 files changed, 671 insertions(+), 40 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 2cc72db15ea..e3d59dda6b8 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -574,6 +574,45 @@ def affine_bounding_boxes(bounding_boxes): ) +def reference_affine_keypoints_helper(keypoints, *, affine_matrix, new_canvas_size=None, clamp=True): + canvas_size = new_canvas_size or keypoints.canvas_size + + def affine_keypoints(keypoints): + dtype = keypoints.dtype + device = keypoints.device + + # Go to float before converting to prevent precision loss + x, y = keypoints.to(dtype=torch.float64, device="cpu", copy=True).squeeze(0).tolist() + + points = np.array([[x, y, 1.0]]) + transformed_points = np.matmul(points, affine_matrix.astype(points.dtype).T) + + output = torch.Tensor( + [ + float(transformed_points[0, 0]), + float(transformed_points[0, 1]), + ] + ) + + if clamp: + # It is important to clamp before casting, especially for CXCYWH format, dtype=int64 + output = F.clamp_keypoints( + output, + canvas_size=canvas_size, + ) + else: + # We leave the bounding box as float64 so the caller gets the full precision to perform any additional + # operation + dtype = output.dtype + + return output.to(dtype=dtype, device=device) + + return tv_tensors.KeyPoints( + torch.cat([affine_keypoints(k) for k in keypoints.reshape(-1, 2).unbind()], dim=0).reshape(keypoints.shape), + canvas_size=canvas_size, + ) + + class TestResize: INPUT_SIZE = (17, 11) OUTPUT_SIZES = [17, [17], (17,), None, [12, 13], (12, 13)] @@ -673,6 +712,28 @@ def test_kernel_bounding_boxes(self, format, size, use_max_size, dtype, device): check_scripted_vs_eager=not isinstance(size, int), ) + @pytest.mark.parametrize("size", OUTPUT_SIZES) + @pytest.mark.parametrize("use_max_size", [True, False]) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, size, use_max_size, dtype, device): + if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): + return + + keypoints = make_keypoints( + canvas_size=self.INPUT_SIZE, + dtype=dtype, + device=device, + ) + check_kernel( + F.resize_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + size=size, + **max_size_kwarg, + check_scripted_vs_eager=not isinstance(size, int), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.resize_mask, make_mask(self.INPUT_SIZE), size=self.OUTPUT_SIZES[-1]) @@ -781,6 +842,28 @@ def _reference_resize_bounding_boxes(self, bounding_boxes, *, size, max_size=Non new_canvas_size=(new_height, new_width), ) + def _reference_resize_keypoints(self, keypoints, *, size, max_size=None): + old_height, old_width = keypoints.canvas_size + new_height, new_width = self._compute_output_size( + input_size=keypoints.canvas_size, size=size, max_size=max_size + ) + + if (old_height, old_width) == (new_height, new_width): + return keypoints + + affine_matrix = np.array( + [ + [new_width / old_width, 0, 0], + [0, new_height / old_height, 0], + ], + ) + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=(new_height, new_width), + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("size", OUTPUT_SIZES) @pytest.mark.parametrize("use_max_size", [True, False]) @@ -797,6 +880,21 @@ def test_bounding_boxes_correctness(self, format, size, use_max_size, fn): self._check_output_size(bounding_boxes, actual, size=size, **max_size_kwarg) torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("size", OUTPUT_SIZES) + @pytest.mark.parametrize("use_max_size", [True, False]) + @pytest.mark.parametrize("fn", [F.resize, transform_cls_to_functional(transforms.Resize)]) + def test_keypoints_correctness(self, size, use_max_size, fn): + if not (max_size_kwarg := self._make_max_size_kwarg(use_max_size=use_max_size, size=size)): + return + + keypoints = make_keypoints(canvas_size=self.INPUT_SIZE) + + actual = fn(keypoints, size=size, **max_size_kwarg) + expected = self._reference_resize_keypoints(keypoints, size=size, **max_size_kwarg) + + self._check_output_size(keypoints, actual, size=size, **max_size_kwarg) + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("interpolation", set(transforms.InterpolationMode) - set(INTERPOLATION_MODES)) @pytest.mark.parametrize( "make_input", @@ -1039,6 +1137,16 @@ def test_kernel_bounding_boxes(self, format, dtype, device): canvas_size=bounding_boxes.canvas_size, ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + check_kernel( + F.horizontal_flip_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.horizontal_flip_mask, make_mask()) @@ -1097,6 +1205,16 @@ def _reference_horizontal_flip_bounding_boxes(self, bounding_boxes): return reference_affine_bounding_boxes_helper(bounding_boxes, affine_matrix=affine_matrix) + def _reference_horizontal_flip_keypoints(self, keypoints): + affine_matrix = np.array( + [ + [-1, 0, keypoints.canvas_size[1]], + [0, 1, 0], + ], + ) + + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize( "fn", [F.horizontal_flip, transform_cls_to_functional(transforms.RandomHorizontalFlip, p=1)] @@ -1109,6 +1227,17 @@ def test_bounding_boxes_correctness(self, format, fn): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( + "fn", [F.horizontal_flip, transform_cls_to_functional(transforms.RandomHorizontalFlip, p=1)] + ) + def test_keypoints_correctness(self, fn): + keypoints = make_keypoints() + + actual = fn(keypoints) + expected = self._reference_horizontal_flip_keypoints(keypoints) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( "make_input", [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], @@ -1210,6 +1339,24 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device): check_scripted_vs_eager=not (param == "shear" and isinstance(value, (int, float))), ) + @param_value_parametrization( + angle=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["angle"], + translate=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["translate"], + shear=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["shear"], + center=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["center"], + ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, param, value, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + self._check_kernel( + F.affine_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **{param: value}, + check_scripted_vs_eager=not (param == "shear" and isinstance(value, (int, float))), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): self._check_kernel(F.affine_mask, make_mask()) @@ -1346,6 +1493,17 @@ def _reference_affine_bounding_boxes(self, bounding_boxes, *, angle, translate, ), ) + def _reference_affine_keypoints(self, keypoints, *, angle, translate, scale, shear, center): + if center is None: + center = [s * 0.5 for s in keypoints.canvas_size[::-1]] + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=self._compute_affine_matrix( + angle=angle, translate=translate, scale=scale, shear=shear, center=center + ), + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) @@ -1392,6 +1550,50 @@ def test_transform_bounding_boxes_correctness(self, format, center, seed): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) + @pytest.mark.parametrize("translate", _CORRECTNESS_AFFINE_KWARGS["translate"]) + @pytest.mark.parametrize("scale", _CORRECTNESS_AFFINE_KWARGS["scale"]) + @pytest.mark.parametrize("shear", _CORRECTNESS_AFFINE_KWARGS["shear"]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + def test_functional_keypoints_correctness(self, angle, translate, scale, shear, center): + keypoints = make_keypoints() + + actual = F.affine( + keypoints, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + expected = self._reference_affine_keypoints( + keypoints, + angle=angle, + translate=translate, + scale=scale, + shear=shear, + center=center, + ) + + torch.testing.assert_close(actual, expected) + + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, center, seed): + keypoints = make_keypoints() + + transform = transforms.RandomAffine(**self._CORRECTNESS_TRANSFORM_AFFINE_RANGES, center=center) + + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_affine_keypoints(keypoints, **params, center=center) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("degrees", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["degrees"]) @pytest.mark.parametrize("translate", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["translate"]) @pytest.mark.parametrize("scale", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["scale"]) @@ -1493,6 +1695,16 @@ def test_kernel_bounding_boxes(self, format, dtype, device): canvas_size=bounding_boxes.canvas_size, ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + check_kernel( + F.vertical_flip_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.vertical_flip_mask, make_mask()) @@ -1549,6 +1761,16 @@ def _reference_vertical_flip_bounding_boxes(self, bounding_boxes): return reference_affine_bounding_boxes_helper(bounding_boxes, affine_matrix=affine_matrix) + def _reference_vertical_flip_keypoints(self, keypoints): + affine_matrix = np.array( + [ + [1, 0, 0], + [0, -1, keypoints.canvas_size[0]], + ], + ) + + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("fn", [F.vertical_flip, transform_cls_to_functional(transforms.RandomVerticalFlip, p=1)]) def test_bounding_boxes_correctness(self, format, fn): @@ -1559,6 +1781,15 @@ def test_bounding_boxes_correctness(self, format, fn): torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize("fn", [F.vertical_flip, transform_cls_to_functional(transforms.RandomVerticalFlip, p=1)]) + def test_keypoints_correctness(self, fn): + keypoints = make_keypoints() + + actual = fn(keypoints) + expected = self._reference_vertical_flip_keypoints(keypoints) + + torch.testing.assert_close(actual, expected) + @pytest.mark.parametrize( "make_input", [make_image_tensor, make_image_pil, make_image, make_bounding_boxes, make_segmentation_mask, make_video], @@ -1636,6 +1867,27 @@ def test_kernel_bounding_boxes(self, param, value, format, dtype, device): **kwargs, ) + @param_value_parametrization( + angle=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["angle"], + expand=[False, True], + center=_EXHAUSTIVE_TYPE_AFFINE_KWARGS["center"], + ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.uint8]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, param, value, dtype, device): + kwargs = {param: value} + if param != "angle": + kwargs["angle"] = self._MINIMAL_AFFINE_KWARGS["angle"] + + keypoints = make_keypoints(dtype=dtype, device=device) + + check_kernel( + F.rotate_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **kwargs, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.rotate_mask, make_mask(), **self._MINIMAL_AFFINE_KWARGS) @@ -1823,6 +2075,71 @@ def test_transform_bounding_boxes_correctness(self, format, expand, center, seed torch.testing.assert_close(actual, expected) torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + def _recenter_keypoints_after_expand(self, keypoints, *, recenter_xy): + x, y = recenter_xy + translate = [x, y] + return tv_tensors.wrap( + (keypoints.to(torch.float64) - torch.tensor(translate)).to(keypoints.dtype), like=keypoints + ) + + def _reference_rotate_keypoints(self, keypoints, *, angle, expand, center): + if center is None: + center = [s * 0.5 for s in keypoints.canvas_size[::-1]] + cx, cy = center + + a = np.cos(angle * np.pi / 180.0) + b = np.sin(angle * np.pi / 180.0) + affine_matrix = np.array( + [ + [a, b, cx - cx * a - b * cy], + [-b, a, cy + cx * b - a * cy], + ], + ) + + new_canvas_size, recenter_xy = self._compute_output_canvas_size( + expand=expand, canvas_size=keypoints.canvas_size, affine_matrix=affine_matrix + ) + + output = reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=new_canvas_size, + clamp=False, + ) + + return F.clamp_keypoints(self._recenter_keypoints_after_expand(output, recenter_xy=recenter_xy)).to(keypoints) + + @pytest.mark.parametrize("angle", _CORRECTNESS_AFFINE_KWARGS["angle"]) + @pytest.mark.parametrize("expand", [False, True]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + def test_functional_keypoints_correctness(self, angle, expand, center): + keypoints = make_keypoints() + + actual = F.rotate(keypoints, angle=angle, expand=expand, center=center) + expected = self._reference_rotate_keypoints(keypoints, angle=angle, expand=expand, center=center) + + torch.testing.assert_close(actual, expected) + torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + + @pytest.mark.parametrize("expand", [False, True]) + @pytest.mark.parametrize("center", _CORRECTNESS_AFFINE_KWARGS["center"]) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, expand, center, seed): + keypoints = make_keypoints() + + transform = transforms.RandomRotation(**self._CORRECTNESS_TRANSFORM_AFFINE_RANGES, expand=expand, center=center) + + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_rotate_keypoints(keypoints, **params, expand=expand, center=center) + + torch.testing.assert_close(actual, expected) + torch.testing.assert_close(F.get_size(actual), F.get_size(expected), atol=2 if expand else 0, rtol=0) + @pytest.mark.parametrize("degrees", _EXHAUSTIVE_TYPE_TRANSFORM_AFFINE_RANGES["degrees"]) @pytest.mark.parametrize("seed", list(range(10))) def test_transformmake_params_bounds(self, degrees, seed): @@ -2740,6 +3057,18 @@ def test_kernel_bounding_boxes(self, format, dtype, device): displacement=self._make_displacement(bounding_boxes), ) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_kernel_keypoints(self, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + + check_kernel( + F.elastic_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + displacement=self._make_displacement(keypoints), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): mask = make_mask() @@ -2872,7 +3201,7 @@ def test_kernel_image(self, kwargs, dtype, device): @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) @pytest.mark.parametrize("device", cpu_and_cuda()) - def test_kernel_bounding_box(self, kwargs, format, dtype, device): + def test_kernel_bounding_boxes(self, kwargs, format, dtype, device): bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format, dtype=dtype, device=device) check_kernel(F.crop_bounding_boxes, bounding_boxes, format=format, **kwargs) @@ -3057,6 +3386,54 @@ def test_transform_bounding_boxes_correctness(self, output_size, format, dtype, assert_equal(actual, expected) assert_equal(F.get_size(actual), F.get_size(expected)) + def _reference_crop_keypoints(self, keypoints, *, top, left, height, width): + affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + ], + ) + return reference_affine_keypoints_helper( + keypoints, affine_matrix=affine_matrix, new_canvas_size=(height, width) + ) + + @pytest.mark.parametrize("kwargs", CORRECTNESS_CROP_KWARGS) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_functional_keypoints_correctness(self, kwargs, dtype, device): + keypoints = make_keypoints(self.INPUT_SIZE, dtype=dtype, device=device) + + actual = F.crop(keypoints, **kwargs) + expected = self._reference_crop_keypoints(keypoints, **kwargs) + + assert_equal(actual, expected, atol=1, rtol=0) + assert_equal(F.get_size(actual), F.get_size(expected)) + + @pytest.mark.parametrize("output_size", [(17, 11), (11, 17), (11, 11)]) + @pytest.mark.parametrize("dtype", [torch.float32, torch.int64]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("seed", list(range(5))) + def test_transform_keypoints_correctness(self, output_size, dtype, device, seed): + input_size = (output_size[0] * 2, output_size[1] * 2) + keypoints = make_keypoints(input_size, dtype=dtype, device=device) + + transform = transforms.RandomCrop(output_size) + + with freeze_rng_state(): + torch.manual_seed(seed) + params = transform.make_params([keypoints]) + assert not params.pop("needs_pad") + del params["padding"] + assert params.pop("needs_crop") + + torch.manual_seed(seed) + actual = transform(keypoints) + + expected = self._reference_crop_keypoints(keypoints, **params) + + assert_equal(actual, expected) + assert_equal(F.get_size(actual), F.get_size(expected)) + def test_errors(self): with pytest.raises(ValueError, match="Please provide only two dimensions"): transforms.RandomCrop([10, 12, 14]) @@ -3795,6 +4172,31 @@ def _reference_resized_crop_bounding_boxes(self, bounding_boxes, *, top, left, h new_canvas_size=size, ) + def _reference_resized_crop_keypoints(self, keypoints, *, top, left, height, width, size): + new_height, new_width = size + + crop_affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + [0, 0, 1], + ], + ) + resize_affine_matrix = np.array( + [ + [new_width / width, 0, 0], + [0, new_height / height, 0], + [0, 0, 1], + ], + ) + affine_matrix = (resize_affine_matrix @ crop_affine_matrix)[:2, :] + + return reference_affine_keypoints_helper( + keypoints, + affine_matrix=affine_matrix, + new_canvas_size=size, + ) + @pytest.mark.parametrize("format", SUPPORTED_BOX_FORMATS) def test_functional_bounding_boxes_correctness(self, format): bounding_boxes = make_bounding_boxes(self.INPUT_SIZE, format=format) @@ -3807,6 +4209,15 @@ def test_functional_bounding_boxes_correctness(self, format): assert_equal(actual, expected) assert_equal(F.get_size(actual), F.get_size(expected)) + def test_functional_keypoints_correctness(self): + keypoints = make_keypoints(self.INPUT_SIZE) + + actual = F.resized_crop(keypoints, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) + expected = self._reference_resized_crop_keypoints(keypoints, **self.CROP_KWARGS, size=self.OUTPUT_SIZE) + + assert_equal(actual, expected) + assert_equal(F.get_size(actual), F.get_size(expected)) + def test_transform_errors_warnings(self): with pytest.raises(ValueError, match="provide only two dimensions"): transforms.RandomResizedCrop(size=(1, 2, 3)) @@ -3892,6 +4303,26 @@ def test_kernel_bounding_boxes_errors(self, padding_mode): padding_mode=padding_mode, ) + def test_kernel_keypoints(self): + keypoints = make_keypoints() + check_kernel( + F.pad_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + padding=[1], + ) + + @pytest.mark.parametrize("padding_mode", ["symmetric", "edge", "reflect"]) + def test_kernel_keypoints_errors(self, padding_mode): + keypoints = make_keypoints() + with pytest.raises(ValueError, match=f"'{padding_mode}' is not supported"): + F.pad_keypoints( + keypoints, + canvas_size=keypoints.canvas_size, + padding=[1], + padding_mode=padding_mode, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.pad_mask, make_mask(), padding=[1]) @@ -4035,6 +4466,17 @@ def test_kernel_bounding_boxes(self, output_size, format): check_scripted_vs_eager=not isinstance(output_size, int), ) + @pytest.mark.parametrize("output_size", OUTPUT_SIZES) + def test_kernel_keypoints(self, output_size): + keypoints = make_keypoints(self.INPUT_SIZE) + check_kernel( + F.center_crop_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + output_size=output_size, + check_scripted_vs_eager=not isinstance(output_size, int), + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.center_crop_mask, make_mask(), output_size=self.OUTPUT_SIZES[0]) @@ -4114,6 +4556,37 @@ def test_bounding_boxes_correctness(self, output_size, format, dtype, device, fn assert_equal(actual, expected) + def _reference_center_crop_keypoints(self, keypoints, output_size): + image_height, image_width = keypoints.canvas_size + if isinstance(output_size, int): + output_size = (output_size, output_size) + elif len(output_size) == 1: + output_size *= 2 + crop_height, crop_width = output_size + + top = int(round((image_height - crop_height) / 2)) + left = int(round((image_width - crop_width) / 2)) + + affine_matrix = np.array( + [ + [1, 0, -left], + [0, 1, -top], + ], + ) + return reference_affine_keypoints_helper(keypoints, affine_matrix=affine_matrix, new_canvas_size=output_size) + + @pytest.mark.parametrize("output_size", OUTPUT_SIZES) + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + @pytest.mark.parametrize("fn", [F.center_crop, transform_cls_to_functional(transforms.CenterCrop)]) + def test_keypoints_correctness(self, output_size, dtype, device, fn): + keypoints = make_keypoints(self.INPUT_SIZE, dtype=dtype, device=device) + + actual = fn(keypoints, output_size) + expected = self._reference_center_crop_keypoints(keypoints, output_size) + + assert_equal(actual, expected) + class TestPerspective: COEFFICIENTS = [ @@ -4201,6 +4674,39 @@ def test_kernel_bounding_boxes_error(self): coefficients=[0.0] * 8, ) + @param_value_parametrization( + coefficients=COEFFICIENTS, + start_end_points=START_END_POINTS, + ) + def test_kernel_keypoints(self, param, value): + if param == "start_end_points": + kwargs = dict(zip(["startpoints", "endpoints"], value)) + else: + kwargs = {"startpoints": None, "endpoints": None, param: value} + + keypoints = make_keypoints() + + check_kernel( + F.perspective_keypoints, + keypoints, + canvas_size=keypoints.canvas_size, + **kwargs, + ) + + def test_kernel_keypoints_error(self): + keypoints = make_keypoints() + canvas_size = keypoints.canvas_size + keypoints = keypoints.as_subclass(torch.Tensor) + + with pytest.raises(RuntimeError, match="Denominator is zero"): + F.perspective_keypoints( + keypoints, + canvas_size=canvas_size, + startpoints=None, + endpoints=None, + coefficients=[0.0] * 8, + ) + @pytest.mark.parametrize("make_mask", [make_segmentation_mask, make_detection_masks]) def test_kernel_mask(self, make_mask): check_kernel(F.perspective_mask, make_mask(), **self.MINIMAL_KWARGS) @@ -4358,6 +4864,67 @@ def test_correctness_perspective_bounding_boxes(self, startpoints, endpoints, fo assert_close(actual, expected, rtol=0, atol=1) + def _reference_perspective_keypoints(self, keypoints, *, startpoints, endpoints): + canvas_size = keypoints.canvas_size + dtype = keypoints.dtype + device = keypoints.device + + coefficients = _get_perspective_coeffs(endpoints, startpoints) + + def perspective_keypoints(keypoints): + m1 = np.array( + [ + [coefficients[0], coefficients[1], coefficients[2]], + [coefficients[3], coefficients[4], coefficients[5]], + ] + ) + m2 = np.array( + [ + [coefficients[6], coefficients[7], 1.0], + [coefficients[6], coefficients[7], 1.0], + ] + ) + + # Go to float before converting to prevent precision loss + x, y = keypoints.to(dtype=torch.float64, device="cpu", copy=True).squeeze(0).tolist() + + points = np.array([[x, y, 1.0]]) + + numerator = points @ m1.T + denominator = points @ m2.T + transformed_points = numerator / denominator + + output = torch.Tensor( + [ + float(transformed_points[0, 0]), + float(transformed_points[0, 1]), + ] + ) + + # It is important to clamp before casting, especially for CXCYWH format, dtype=int64 + return F.clamp_keypoints( + output, + canvas_size=canvas_size, + ).to(dtype=dtype, device=device) + + return tv_tensors.KeyPoints( + torch.cat([perspective_keypoints(k) for k in keypoints.reshape(-1, 2).unbind()], dim=0).reshape( + keypoints.shape + ), + canvas_size=canvas_size, + ) + + @pytest.mark.parametrize(("startpoints", "endpoints"), START_END_POINTS) + @pytest.mark.parametrize("dtype", [torch.int64, torch.float32]) + @pytest.mark.parametrize("device", cpu_and_cuda()) + def test_correctness_perspective_keypoints(self, startpoints, endpoints, dtype, device): + keypoints = make_keypoints(dtype=dtype, device=device) + + actual = F.perspective(keypoints, startpoints=startpoints, endpoints=endpoints) + expected = self._reference_perspective_keypoints(keypoints, startpoints=startpoints, endpoints=endpoints) + + assert_close(actual, expected, rtol=0, atol=1) + class TestEqualize: @pytest.mark.parametrize("dtype", [torch.uint8, torch.float32]) diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index e32ef73f7c1..e651bbd9257 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -4,6 +4,7 @@ from ._meta import ( clamp_bounding_boxes, + clamp_keypoints, convert_bounding_box_format, convert_bounding_boxes_to_points, get_dimensions_image, @@ -16,6 +17,7 @@ get_num_channels_video, get_num_channels, get_size_bounding_boxes, + get_size_keypoints, get_size_image, get_size_mask, get_size_video, @@ -108,10 +110,10 @@ pad_keypoints, pad_mask, pad_video, - perspectice_keypoints, perspective, perspective_bounding_boxes, perspective_image, + perspective_keypoints, perspective_mask, perspective_video, resize, diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 44eafcdb31f..50bf20e8ba0 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -67,8 +67,10 @@ def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor: def horizontal_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]): + shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) keypoints[..., 0] = keypoints[..., 0].sub_(canvas_size[1]).neg_() - return keypoints + return keypoints.reshape(shape) @_register_kernel_internal(horizontal_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) @@ -134,10 +136,11 @@ def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor: return vertical_flip_image(mask) -@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def vertical_flip_keypoints(keypoints: tv_tensors.KeyPoints): - keypoints[..., 1] = keypoints[..., 1].sub_(keypoints.canvas_size[0]).neg_() - return keypoints +def vertical_flip_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: + shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) + keypoints[..., 1] = keypoints[..., 1].sub_(canvas_size[0]).neg_() + return keypoints.reshape(shape) def vertical_flip_bounding_boxes( @@ -157,6 +160,12 @@ def vertical_flip_bounding_boxes( return bounding_boxes.reshape(shape) +@_register_kernel_internal(vertical_flip, tv_tensors.KeyPoints, tv_tensor_wrapper=False) +def _vertical_flip_keypoints_dispatch(inpt: tv_tensors.KeyPoints) -> tv_tensors.KeyPoints: + output = vertical_flip_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size) + return tv_tensors.wrap(output, like=inpt) + + @_register_kernel_internal(vertical_flip, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False) def _vertical_flip_bounding_boxes_dispatch(inpt: tv_tensors.BoundingBoxes) -> tv_tensors.BoundingBoxes: output = vertical_flip_bounding_boxes( @@ -828,6 +837,7 @@ def _affine_keypoints_with_expand( return keypoints, canvas_size original_dtype = keypoints.dtype + original_shape = keypoints.shape keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() dtype = keypoints.dtype device = keypoints.device @@ -850,12 +860,40 @@ def _affine_keypoints_with_expand( .reshape(2, 3) .T ) - # 1) Unlike bounding box (whose implmentation we stole) we're already a bunch of points. - keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=device, dtype=dtype)], dim=-1) + + # 1) We transform points into a tensor of points with shape (N, 3), where N is the number of points. + points = keypoints.reshape(-1, 2) + points = torch.cat([points, torch.ones(points.shape[0], 1, device=device, dtype=dtype)], dim=-1) # 2) Now let's transform the points using affine matrix - keypoints = torch.matmul(keypoints, transposed_affine_matrix).to(original_dtype) + transformed_points = torch.matmul(points, transposed_affine_matrix) - return keypoints, canvas_size + if expand: + # Compute minimum point for transformed image frame: + # Points are Top-Left, Top-Right, Bottom-Left, Bottom-Right points. + height, width = canvas_size + points = torch.tensor( + [ + [0.0, 0.0, 1.0], + [0.0, float(height), 1.0], + [float(width), float(height), 1.0], + [float(width), 0.0, 1.0], + ], + dtype=dtype, + device=device, + ) + new_points = torch.matmul(points, transposed_affine_matrix) + tr = torch.amin(new_points, dim=0, keepdim=True) + # Translate keypoints + transformed_points.sub_(tr) + # Estimate meta-data for image with inverted=True + affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + new_width, new_height = _compute_affine_output_size(affine_vector, width, height) + canvas_size = (new_height, new_width) + + out_kkpoints = clamp_keypoints(transformed_points, canvas_size=canvas_size).reshape(original_shape) + out_kkpoints = out_kkpoints.to(original_dtype) + + return out_kkpoints, canvas_size def affine_keypoints( @@ -1199,16 +1237,15 @@ def _rotate_image_pil( def rotate_keypoints( - keypoints: tv_tensors.KeyPoints, + keypoints: torch.Tensor, + canvas_size: tuple[int, int], angle: float, - interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, expand: bool = False, center: Optional[list[float]] = None, - fill: _FillTypeJIT = None, ) -> tuple[torch.Tensor, tuple[int, int]]: return _affine_keypoints_with_expand( - keypoints=keypoints.as_subclass(torch.Tensor), - canvas_size=keypoints.canvas_size, + keypoints=keypoints, + canvas_size=canvas_size, angle=-angle, translate=[0.0, 0.0], scale=1.0, @@ -1220,10 +1257,10 @@ def rotate_keypoints( @_register_kernel_internal(rotate, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _rotate_keypoints_dispatch( - keypoints: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs + inpt: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(keypoints, angle, center=center, expand=expand, **kwargs) - return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) + out, canvas_size = rotate_keypoints(inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand) + return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) def rotate_bounding_boxes( @@ -1612,7 +1649,7 @@ def crop_keypoints( width: int, ) -> tuple[torch.Tensor, tuple[int, int]]: - keypoints.sub_(torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device)) + keypoints = keypoints - torch.tensor([left, top], dtype=keypoints.dtype, device=keypoints.device) canvas_size = (height, width) return clamp_keypoints(keypoints, canvas_size=canvas_size), canvas_size @@ -1622,8 +1659,8 @@ def crop_keypoints( def _crop_keypoints_dispatch( inpt: tv_tensors.KeyPoints, top: int, left: int, height: int, width: int ) -> tv_tensors.KeyPoints: - out, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + output, canvas_size = crop_keypoints(inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def crop_bounding_boxes( @@ -1799,7 +1836,7 @@ def _perspective_image_pil( return _FP.perspective(image, perspective_coeffs, interpolation=pil_modes_mapping[interpolation], fill=fill) -def perspectice_keypoints( +def perspective_keypoints( keypoints: torch.Tensor, canvas_size: tuple[int, int], startpoints: Optional[list[list[int]]], @@ -1810,7 +1847,9 @@ def perspectice_keypoints( return keypoints dtype = keypoints.dtype if torch.is_floating_point(keypoints) else torch.float32 device = keypoints.device + original_shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients) denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3] @@ -1821,23 +1860,23 @@ def perspectice_keypoints( ) theta1, theta2 = _compute_perspective_thetas(perspective_coeffs, dtype, device, denom) - keypoints = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) + points = torch.cat([keypoints, torch.ones(keypoints.shape[0], 1, device=keypoints.device)], dim=-1) - numer_points = torch.matmul(keypoints, theta1.T) - denom_points = torch.matmul(keypoints, theta2.T) + numer_points = torch.matmul(points, theta1.T) + denom_points = torch.matmul(points, theta2.T) transformed_points = numer_points.div_(denom_points) - return clamp_keypoints(transformed_points, canvas_size) + return clamp_keypoints(transformed_points.to(keypoints.dtype), canvas_size).reshape(original_shape) @_register_kernel_internal(perspective, tv_tensors.KeyPoints, tv_tensor_wrapper=False) def _perspective_keypoints_dispatch( - inpt: tv_tensors.BoundingBoxes, + inpt: tv_tensors.KeyPoints, startpoints: Optional[list[list[int]]], endpoints: Optional[list[list[int]]], coefficients: Optional[list[float]] = None, **kwargs, -) -> tv_tensors.BoundingBoxes: - output = perspectice_keypoints( +) -> tv_tensors.KeyPoints: + output = perspective_keypoints( inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, startpoints=startpoints, @@ -2130,6 +2169,9 @@ def elastic_keypoints( if displacement.dtype != dtype or displacement.device != device: displacement = displacement.to(dtype=dtype, device=device) + original_shape = keypoints.shape + keypoints = keypoints.clone().reshape(-1, 2) + id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype) inv_grid = id_grid.sub_(displacement) @@ -2142,11 +2184,11 @@ def elastic_keypoints( t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype) transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5) - return clamp_keypoints(transformed_points, canvas_size=canvas_size) + return clamp_keypoints(transformed_points.to(keypoints.dtype), canvas_size=canvas_size).reshape(original_shape) @_register_kernel_internal(elastic, tv_tensors.KeyPoints, tv_tensor_wrapper=False) -def _elastic_keypoints_dispatch(inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs): +def _elastic_keypoints_dispatch(inpt: tv_tensors.KeyPoints, displacement: torch.Tensor, **kwargs): output = elastic_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size, displacement=displacement) return tv_tensors.wrap(output, like=inpt) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 348e14bda14..4de86144de9 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -388,14 +388,12 @@ def _clamp_bounding_boxes( return out_boxes.to(in_dtype) -def clamp_keypoints(inpt: torch.Tensor, canvas_size: Tuple[int, int]) -> torch.Tensor: - if not torch.jit.is_scripting(): - _log_api_usage_once(clamp_bounding_boxes) - dtype = inpt.dtype - inpt = inpt.float() - inpt[..., 0].clamp_(0, canvas_size[1]) - inpt[..., 1].clamp_(0, canvas_size[0]) - return inpt.to(dtype=dtype) +def _clamp_keypoints(keypoints: torch.Tensor, canvas_size: tuple[int, int]) -> torch.Tensor: + dtype = keypoints.dtype + keypoints = keypoints.clone() if keypoints.is_floating_point() else keypoints.float() + keypoints[..., 0].clamp_(min=0, max=canvas_size[1]) + keypoints[..., 1].clamp_(min=0, max=canvas_size[0]) + return keypoints.to(dtype=dtype) def clamp_bounding_boxes( @@ -421,3 +419,25 @@ def clamp_bounding_boxes( raise TypeError( f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead." ) + + +def clamp_keypoints( + inpt: torch.Tensor, + canvas_size: Optional[tuple[int, int]] = None, +) -> torch.Tensor: + """See :func:`~torchvision.transforms.v2.ClampKeyPoints` for details.""" + if not torch.jit.is_scripting(): + _log_api_usage_once(clamp_keypoints) + + if torch.jit.is_scripting() or is_pure_tensor(inpt): + + if canvas_size is None: + raise ValueError("For pure tensor inputs, `canvas_size` have to be passed.") + return _clamp_keypoints(inpt, canvas_size=canvas_size) + elif isinstance(inpt, tv_tensors.KeyPoints): + if canvas_size is not None: + raise ValueError("For keypoints tv_tensor inputs, `canvas_size` must not be passed.") + output = _clamp_keypoints(inpt.as_subclass(torch.Tensor), canvas_size=inpt.canvas_size) + return tv_tensors.wrap(output, like=inpt) + else: + raise TypeError(f"Input can either be a plain tensor or a keypoints tv_tensor, but got {type(inpt)} instead.") From 0de59e78c6641b63e19bab1a620dd12c4c0ed3d5 Mon Sep 17 00:00:00 2001 From: Antoine Simoulin Date: Sun, 4 May 2025 11:02:12 -0700 Subject: [PATCH 20/27] Adjust variable names --- torchvision/transforms/v2/functional/_geometry.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 50bf20e8ba0..27fe6a722bc 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -364,13 +364,14 @@ def resize_keypoints( keypoints: torch.Tensor, size: Optional[list[int]], canvas_size: tuple[int, int], - interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, - antialias: Optional[bool] = True, ): old_height, old_width = canvas_size new_height, new_width = _compute_resized_output_size(canvas_size, size=size, max_size=max_size) + if (new_height, new_width) == (old_height, old_width): + return keypoints, canvas_size + w_ratio = new_width / old_width h_ratio = new_height / old_height ratios = torch.tensor([w_ratio, h_ratio], device=keypoints.device) @@ -383,17 +384,13 @@ def resize_keypoints( def _resize_keypoints_dispatch( keypoints: tv_tensors.KeyPoints, size: Optional[list[int]], - interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, max_size: Optional[int] = None, - antialias: Optional[bool] = True, ) -> tv_tensors.KeyPoints: out, canvas_size = resize_keypoints( keypoints.as_subclass(torch.Tensor), size, canvas_size=keypoints.canvas_size, - interpolation=interpolation, max_size=max_size, - antialias=antialias, ) return tv_tensors.wrap(out, like=keypoints, canvas_size=canvas_size) @@ -1259,8 +1256,10 @@ def rotate_keypoints( def _rotate_keypoints_dispatch( inpt: tv_tensors.KeyPoints, angle: float, expand: bool = False, center: Optional[list[float]] = None, **kwargs ) -> tv_tensors.KeyPoints: - out, canvas_size = rotate_keypoints(inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand) - return tv_tensors.wrap(out, like=inpt, canvas_size=canvas_size) + output, canvas_size = rotate_keypoints( + inpt, canvas_size=inpt.canvas_size, angle=angle, center=center, expand=expand + ) + return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size) def rotate_bounding_boxes( From 4b62ef419d03d53bb66b6def63e96f2415a37ae3 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 21:53:39 +0200 Subject: [PATCH 21/27] Improved documentation inside of the KeyPoints class definition --- torchvision/tv_tensors/_keypoints.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index e00c58d5134..c578d49cdce 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -14,7 +14,20 @@ class KeyPoints(TVTensor): Each point is represented by its XY coordinates. KeyPoints can be converted from :class:`torchvision.tv_tensors.BoundingBoxes` - by :func:`torchvision.transforms.v2.functional.convert_box_to_points`. + by :func:`torchvision.transforms.v2.functional.convert_bounding_boxes_to_points`. + + KeyPoints may represent any object that can be represented by sequences of 2D points: + - `Polygonal chains`, including polylines, Bézier curves, etc., + which should be of shape ``[N_chains, N_points, 2]``, which is equal to ``[N_chains, N_segments + 1, 2]`` + - Polygons, which should be of shape ``[N_polygons, N_points, 2]``, which is equal to ``[N_polygons, N_sides, 2]`` + - Skeletons, which could be of shape ``[N_skeletons, N_bones, 2, 2]`` for pose-estimation models + + .. note:: + + Like for :class:`torchvision.tv_tensors.BoundingBoxes`, there should only ever be a single + instance of the :class:`torchvision.tv_tensors.KeyPoints` class per sample + e.g. ``{"img": img, "poins_of_interest": KeyPoints(...)}``, + although one :class:`torchvision.tv_tensors.KeyPoints` object can contain multiple key points Args: data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. From e99b82ae671a47d432613b457cb71ac56174a444 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 21:59:34 +0200 Subject: [PATCH 22/27] Improved convert_bounding_boxes_to_points to handle rotated bounding boxes and added tests for all formats --- test/test_transforms_v2.py | 41 ++++++++++++++----- torchvision/transforms/v2/functional/_meta.py | 26 +++++++++++- 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index e3d59dda6b8..c3a9692a664 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -6877,18 +6877,39 @@ def test_no_valid_input(self, query): query(["blah"]) @pytest.mark.parametrize( - "boxes", [tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))] + "boxes", [ + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 2., 2.]]), format="XYXY", canvas_size=(4, 4)), # [boxes0] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1.]]), format="XYWH", canvas_size=(4, 4)), # [boxes1] + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1.]]), format="CXCYWH", canvas_size=(4, 4)), # [boxes2] + tv_tensors.BoundingBoxes(torch.tensor([[1.5, 1.5, 1., 1., 45]]), format="CXCYWHR", canvas_size=(4, 4)), # [boxes3] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 1., 45.]]), format="XYWHR", canvas_size=(4, 4)), # [boxes4] + tv_tensors.BoundingBoxes(torch.tensor([[1., 1., 1., 2., 2., 2., 2., 1.]]), format="XY" * 4, canvas_size=(4, 4)), # [boxes5] + ] ) def test_convert_bounding_boxes_to_points(self, boxes: tv_tensors.BoundingBoxes): - # TODO: this test can't handle rotated boxes yet kp = F.convert_bounding_boxes_to_points(boxes) - assert kp.shape == boxes.shape + (2,) + assert kp.shape == (boxes.shape[0], 4, 2) assert kp.dtype == boxes.dtype # kp is a list of A, B, C, D polygons. - # If we use A | C, we should get back the XYXY format of bounding box - reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) - reconverted_bbox = F.convert_bounding_box_format( - tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), - new_format=boxes.format, - ) - assert (reconverted_bbox == boxes).all(), f"Invalid reconversion : {reconverted_bbox}" + + if F._meta.is_rotated_bounding_box_format(boxes.format): + # In the rotated case + # If we convert to XYXYXYXY format, we should get what we want. + reconverted = kp.reshape(-1, 8) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXYXYXY, canvas_size=kp.canvas_size), + new_format=boxes.format + ) + assert ((reconverted_bbox - boxes).abs() < 1e-5).all(), ( # Rotational computations mean that we can't ensure exactitude. + f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}\n\t" + f"Diff: {reconverted_bbox - boxes}" + ) + else: + # In the unrotated case + # If we use A | C, we should get back the XYXY format of bounding box + reconverted = torch.cat([kp[..., 0, :], kp[..., 2, :]], dim=-1) + reconverted_bbox = F.convert_bounding_box_format( + tv_tensors.BoundingBoxes(reconverted, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=kp.canvas_size), + new_format=boxes.format, + ) + assert (reconverted_bbox == boxes).all(), f"Invalid reconversion :\n\tGot: {reconverted_bbox}\n\tFrom: {boxes}" diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 4de86144de9..843d40de0c6 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -185,16 +185,38 @@ def _xyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: return bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]] +def _xyxyxyxy_to_keypoints(bounding_boxes: torch.Tensor) -> torch.Tensor: + return bounding_boxes[:, [[0, 1], [2, 3], [4, 5], [6, 7]]] + + def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) -> tv_tensors.KeyPoints: """Converts a set of bounding boxes to its edge points. + .. note:: + + This handles rotated :class:`tv_tensors.BoundingBoxes` formats + by first converting them to XYXYXYXY format. + + Due to floating-point approximation, this may not be an exact computation. + Args: bounding_boxes (tv_tensors.BoundingBoxes): A set of ``N`` bounding boxes (of shape ``[N, 4]``) Returns: - tv_tensors.KeyPoints: The edges, of shape ``[N, 4, 2]`` + tv_tensors.KeyPoints: The edges, as a polygon of shape ``[N, 4, 2]`` """ - # TODO: support rotated BBOX + if is_rotated_bounding_box_format(bounding_boxes.format): + # We are working on a rotated bounding box + bbox = _convert_bounding_box_format( + bounding_boxes.as_subclass(torch.Tensor), + old_format=bounding_boxes.format, + new_format=BoundingBoxFormat.XYXYXYXY, + inplace=False, + ) + return tv_tensors.KeyPoints( + _xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size + ) + bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), old_format=bounding_boxes.format, From a869f39e609d5003a65c862aad343cd6fa187c75 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:05:50 +0200 Subject: [PATCH 23/27] Applied ufmt --- .../transforms/v2/functional/_geometry.py | 16 ++++------------ torchvision/transforms/v2/functional/_meta.py | 6 ++---- torchvision/tv_tensors/_keypoints.py | 3 +-- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index 27fe6a722bc..448199dbe0c 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -2677,9 +2677,7 @@ def five_crop_video( return five_crop_image(video, size) -def ten_crop( - inpt: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop(inpt: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2703,9 +2701,7 @@ def ten_crop( @_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor) @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image) -def ten_crop_image( - image: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop_image(image: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, @@ -2730,9 +2726,7 @@ def ten_crop_image( @_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image) -def _ten_crop_image_pil( - image: PIL.Image.Image, size: list[int], vertical_flip: bool = False -) -> tuple[ +def _ten_crop_image_pil(image: PIL.Image.Image, size: list[int], vertical_flip: bool = False) -> tuple[ PIL.Image.Image, PIL.Image.Image, PIL.Image.Image, @@ -2757,9 +2751,7 @@ def _ten_crop_image_pil( @_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video) -def ten_crop_video( - video: torch.Tensor, size: list[int], vertical_flip: bool = False -) -> tuple[ +def ten_crop_video(video: torch.Tensor, size: list[int], vertical_flip: bool = False) -> tuple[ torch.Tensor, torch.Tensor, torch.Tensor, diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 843d40de0c6..d6699235572 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -196,7 +196,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) - This handles rotated :class:`tv_tensors.BoundingBoxes` formats by first converting them to XYXYXYXY format. - + Due to floating-point approximation, this may not be an exact computation. Args: @@ -213,9 +213,7 @@ def convert_bounding_boxes_to_points(bounding_boxes: tv_tensors.BoundingBoxes) - new_format=BoundingBoxFormat.XYXYXYXY, inplace=False, ) - return tv_tensors.KeyPoints( - _xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size - ) + return tv_tensors.KeyPoints(_xyxyxyxy_to_keypoints(bbox), canvas_size=bounding_boxes.canvas_size) bbox = _convert_bounding_box_format( bounding_boxes.as_subclass(torch.Tensor), diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index c578d49cdce..87520353cc3 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -72,8 +72,7 @@ def __init__( dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - ): - ... + ): ... @classmethod def _wrap_output( From 6007b2cdbd298accbe05382662a38821ba4dccd7 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:07:57 +0200 Subject: [PATCH 24/27] Adding a type:ignore[override] on KeyPoints__repr__ as it also exist on BoundingBoxes.__repr__ whose signature was copied --- torchvision/tv_tensors/_keypoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 87520353cc3..1e019e46446 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -102,5 +102,5 @@ def _wrap_output( output = tuple(KeyPoints(part, canvas_size=canvas_size) for part in output) return output - def __repr__(self, *, tensor_contents: Any = None) -> str: + def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] return self._make_repr(canvas_size=self.canvas_size) From b68b57bbe4c1ab54a02666f00d69519d6f7f763c Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:12:11 +0200 Subject: [PATCH 25/27] Fixed flake8 compliance on "..." present in the line of the __init__ "function" definition --- torchvision/tv_tensors/_keypoints.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/torchvision/tv_tensors/_keypoints.py b/torchvision/tv_tensors/_keypoints.py index 1e019e46446..8e0b1a502fc 100644 --- a/torchvision/tv_tensors/_keypoints.py +++ b/torchvision/tv_tensors/_keypoints.py @@ -72,7 +72,8 @@ def __init__( dtype: Optional[torch.dtype] = None, device: Optional[Union[torch.device, str, int]] = None, requires_grad: Optional[bool] = None, - ): ... + ): + pass @classmethod def _wrap_output( From 801e24d61aa5337ebd274f45c46b113ec61e1316 Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:12:44 +0200 Subject: [PATCH 26/27] get_all_keypoints is now get_keypoints and returns the only keypoints object in the sample (as is assumed) --- torchvision/transforms/v2/_utils.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/torchvision/transforms/v2/_utils.py b/torchvision/transforms/v2/_utils.py index 5add3c7bc20..fd41b222b19 100644 --- a/torchvision/transforms/v2/_utils.py +++ b/torchvision/transforms/v2/_utils.py @@ -2,7 +2,7 @@ import collections.abc import numbers -from collections.abc import Iterable, Sequence +from collections.abc import Sequence from contextlib import suppress from typing import Any, Callable, Literal @@ -165,18 +165,16 @@ def get_bounding_boxes(flat_inputs: list[Any]) -> tv_tensors.BoundingBoxes: raise ValueError("No bounding boxes were found in the sample") -def get_all_keypoints(flat_inputs: list[Any]) -> Iterable[tv_tensors.KeyPoints]: - """Yields all KeyPoints in the input. +def get_keypoints(flat_inputs: list[Any]) -> tv_tensors.KeyPoints: + """Returns the KeyPoints in the input. - Raises: - ValueError: No KeyPoints can be found + Assumes only one ``KeyPoints`` object is present """ generator = (inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.KeyPoints)) try: - yield next(generator) + return next(generator) except StopIteration: raise ValueError("No Keypoints were found in the sample.") - return generator def query_chw(flat_inputs: list[Any]) -> tuple[int, int, int]: From 73a40a83c6cfe3df36b6f0c080f7f22b9c93081e Mon Sep 17 00:00:00 2001 From: "alexandre.schoepp" Date: Mon, 5 May 2025 22:31:10 +0200 Subject: [PATCH 27/27] Fixed docstring on sanitize_keypoints --- torchvision/transforms/v2/functional/_misc.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index bddd8e27721..35fc7e3110d 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -334,17 +334,24 @@ def sanitize_keypoints( """Removes degenerate/invalid keypoints and returns the corresponding indexing mask. This removes the keypoints that are outside of their corresponing image. - You may want to first call :func:`~torchvision.transforms.v2.functional.clam_keypoints` - first to avoid undesired removals. + + It is recommended to call it at the end of a pipeline, before passing the + input to the models. It is critical to call this transform if + :class:`~torchvision.transforms.v2.RandomIoUCrop` was called. + If you want to be extra careful, you may call it after all transforms that + may modify the key points but once at the end should be enough in most + cases. .. note:: - Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes` + + Points that touch the edge of the canvas are removed, unlike for :func:`sanitize_bounding_boxes`. Raises: ValueError: If the keypoints are not passed as a two dimensional tensor. Args: - keypoints (torch.Tensor or class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being removed + keypoints (torch.Tensor or :class:`~torchvision.tv_tensors.KeyPoints`): The Keypoints being sanitized. + Should be of shape ``[N, 2]`` canvas_size (Optional[tuple[int, int]], optional): The canvas_size of the bounding boxes (size of the corresponding image/video). Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.KeyPoints` object. @@ -372,8 +379,10 @@ def sanitize_keypoints( canvas_size=canvas_size, ) return keypoints[valid], valid + if not isinstance(keypoints, tv_tensors.KeyPoints): raise ValueError("keypoints must be a tv_tensors.KeyPoints instance or a pure tensor.") + valid = _get_sanitize_keypoints_mask( keypoints, canvas_size=keypoints.canvas_size,