|
|
|
|
|
import math |
|
import random |
|
from copy import deepcopy |
|
from typing import Tuple, Union |
|
|
|
import cv2 |
|
import numpy as np |
|
import torch |
|
from PIL import Image |
|
|
|
from ultralytics.data.utils import polygons2masks, polygons2masks_overlap |
|
from ultralytics.utils import LOGGER, colorstr |
|
from ultralytics.utils.checks import check_version |
|
from ultralytics.utils.instance import Instances |
|
from ultralytics.utils.metrics import bbox_ioa |
|
from ultralytics.utils.ops import segment2box, xyxyxyxy2xywhr |
|
from ultralytics.utils.torch_utils import TORCHVISION_0_10, TORCHVISION_0_11, TORCHVISION_0_13 |
|
|
|
DEFAULT_MEAN = (0.0, 0.0, 0.0) |
|
DEFAULT_STD = (1.0, 1.0, 1.0) |
|
DEFAULT_CROP_FRACTION = 1.0 |
|
|
|
|
|
class BaseTransform: |
|
""" |
|
Base class for image transformations in the Ultralytics library. |
|
|
|
This class serves as a foundation for implementing various image processing operations, designed to be |
|
compatible with both classification and semantic segmentation tasks. |
|
|
|
Methods: |
|
apply_image: Applies image transformations to labels. |
|
apply_instances: Applies transformations to object instances in labels. |
|
apply_semantic: Applies semantic segmentation to an image. |
|
__call__: Applies all label transformations to an image, instances, and semantic masks. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
>>> labels = {"image": np.array(...), "instances": [...], "semantic": np.array(...)} |
|
>>> transformed_labels = transform(labels) |
|
""" |
|
|
|
def __init__(self) -> None: |
|
""" |
|
Initializes the BaseTransform object. |
|
|
|
This constructor sets up the base transformation object, which can be extended for specific image |
|
processing tasks. It is designed to be compatible with both classification and semantic segmentation. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
""" |
|
pass |
|
|
|
def apply_image(self, labels): |
|
""" |
|
Applies image transformations to labels. |
|
|
|
This method is intended to be overridden by subclasses to implement specific image transformation |
|
logic. In its base form, it returns the input labels unchanged. |
|
|
|
Args: |
|
labels (Any): The input labels to be transformed. The exact type and structure of labels may |
|
vary depending on the specific implementation. |
|
|
|
Returns: |
|
(Any): The transformed labels. In the base implementation, this is identical to the input. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
>>> original_labels = [1, 2, 3] |
|
>>> transformed_labels = transform.apply_image(original_labels) |
|
>>> print(transformed_labels) |
|
[1, 2, 3] |
|
""" |
|
pass |
|
|
|
def apply_instances(self, labels): |
|
""" |
|
Applies transformations to object instances in labels. |
|
|
|
This method is responsible for applying various transformations to object instances within the given |
|
labels. It is designed to be overridden by subclasses to implement specific instance transformation |
|
logic. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing label information, including object instances. |
|
|
|
Returns: |
|
(Dict): The modified labels dictionary with transformed object instances. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
>>> labels = {"instances": Instances(xyxy=torch.rand(5, 4), cls=torch.randint(0, 80, (5,)))} |
|
>>> transformed_labels = transform.apply_instances(labels) |
|
""" |
|
pass |
|
|
|
def apply_semantic(self, labels): |
|
""" |
|
Applies semantic segmentation transformations to an image. |
|
|
|
This method is intended to be overridden by subclasses to implement specific semantic segmentation |
|
transformations. In its base form, it does not perform any operations. |
|
|
|
Args: |
|
labels (Any): The input labels or semantic segmentation mask to be transformed. |
|
|
|
Returns: |
|
(Any): The transformed semantic segmentation mask or labels. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
>>> semantic_mask = np.zeros((100, 100), dtype=np.uint8) |
|
>>> transformed_mask = transform.apply_semantic(semantic_mask) |
|
""" |
|
pass |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies all label transformations to an image, instances, and semantic masks. |
|
|
|
This method orchestrates the application of various transformations defined in the BaseTransform class |
|
to the input labels. It sequentially calls the apply_image and apply_instances methods to process the |
|
image and object instances, respectively. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and annotations. Expected keys include 'img' for |
|
the image data, and 'instances' for object instances. |
|
|
|
Returns: |
|
(Dict): The input labels dictionary with transformed image and instances. |
|
|
|
Examples: |
|
>>> transform = BaseTransform() |
|
>>> labels = {"img": np.random.rand(640, 640, 3), "instances": []} |
|
>>> transformed_labels = transform(labels) |
|
""" |
|
self.apply_image(labels) |
|
self.apply_instances(labels) |
|
self.apply_semantic(labels) |
|
|
|
|
|
class Compose: |
|
""" |
|
A class for composing multiple image transformations. |
|
|
|
Attributes: |
|
transforms (List[Callable]): A list of transformation functions to be applied sequentially. |
|
|
|
Methods: |
|
__call__: Applies a series of transformations to input data. |
|
append: Appends a new transform to the existing list of transforms. |
|
insert: Inserts a new transform at a specified index in the list of transforms. |
|
__getitem__: Retrieves a specific transform or a set of transforms using indexing. |
|
__setitem__: Sets a specific transform or a set of transforms using indexing. |
|
tolist: Converts the list of transforms to a standard Python list. |
|
|
|
Examples: |
|
>>> transforms = [RandomFlip(), RandomPerspective(30)] |
|
>>> compose = Compose(transforms) |
|
>>> transformed_data = compose(data) |
|
>>> compose.append(CenterCrop((224, 224))) |
|
>>> compose.insert(0, RandomFlip()) |
|
""" |
|
|
|
def __init__(self, transforms): |
|
""" |
|
Initializes the Compose object with a list of transforms. |
|
|
|
Args: |
|
transforms (List[Callable]): A list of callable transform objects to be applied sequentially. |
|
|
|
Examples: |
|
>>> from ultralytics.data.augment import Compose, RandomHSV, RandomFlip |
|
>>> transforms = [RandomHSV(), RandomFlip()] |
|
>>> compose = Compose(transforms) |
|
""" |
|
self.transforms = transforms if isinstance(transforms, list) else [transforms] |
|
|
|
def __call__(self, data): |
|
""" |
|
Applies a series of transformations to input data. This method sequentially applies each transformation in the |
|
Compose object's list of transforms to the input data. |
|
|
|
Args: |
|
data (Any): The input data to be transformed. This can be of any type, depending on the |
|
transformations in the list. |
|
|
|
Returns: |
|
(Any): The transformed data after applying all transformations in sequence. |
|
|
|
Examples: |
|
>>> transforms = [Transform1(), Transform2(), Transform3()] |
|
>>> compose = Compose(transforms) |
|
>>> transformed_data = compose(input_data) |
|
""" |
|
for t in self.transforms: |
|
data = t(data) |
|
return data |
|
|
|
def append(self, transform): |
|
""" |
|
Appends a new transform to the existing list of transforms. |
|
|
|
Args: |
|
transform (BaseTransform): The transformation to be added to the composition. |
|
|
|
Examples: |
|
>>> compose = Compose([RandomFlip(), RandomPerspective()]) |
|
>>> compose.append(RandomHSV()) |
|
""" |
|
self.transforms.append(transform) |
|
|
|
def insert(self, index, transform): |
|
""" |
|
Inserts a new transform at a specified index in the existing list of transforms. |
|
|
|
Args: |
|
index (int): The index at which to insert the new transform. |
|
transform (BaseTransform): The transform object to be inserted. |
|
|
|
Examples: |
|
>>> compose = Compose([Transform1(), Transform2()]) |
|
>>> compose.insert(1, Transform3()) |
|
>>> len(compose.transforms) |
|
3 |
|
""" |
|
self.transforms.insert(index, transform) |
|
|
|
def __getitem__(self, index: Union[list, int]) -> "Compose": |
|
""" |
|
Retrieves a specific transform or a set of transforms using indexing. |
|
|
|
Args: |
|
index (int | List[int]): Index or list of indices of the transforms to retrieve. |
|
|
|
Returns: |
|
(Compose): A new Compose object containing the selected transform(s). |
|
|
|
Raises: |
|
AssertionError: If the index is not of type int or list. |
|
|
|
Examples: |
|
>>> transforms = [RandomFlip(), RandomPerspective(10), RandomHSV(0.5, 0.5, 0.5)] |
|
>>> compose = Compose(transforms) |
|
>>> single_transform = compose[1] # Returns a Compose object with only RandomPerspective |
|
>>> multiple_transforms = compose[0:2] # Returns a Compose object with RandomFlip and RandomPerspective |
|
""" |
|
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}" |
|
index = [index] if isinstance(index, int) else index |
|
return Compose([self.transforms[i] for i in index]) |
|
|
|
def __setitem__(self, index: Union[list, int], value: Union[list, int]) -> None: |
|
""" |
|
Sets one or more transforms in the composition using indexing. |
|
|
|
Args: |
|
index (int | List[int]): Index or list of indices to set transforms at. |
|
value (Any | List[Any]): Transform or list of transforms to set at the specified index(es). |
|
|
|
Raises: |
|
AssertionError: If index type is invalid, value type doesn't match index type, or index is out of range. |
|
|
|
Examples: |
|
>>> compose = Compose([Transform1(), Transform2(), Transform3()]) |
|
>>> compose[1] = NewTransform() # Replace second transform |
|
>>> compose[0:2] = [NewTransform1(), NewTransform2()] # Replace first two transforms |
|
""" |
|
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}" |
|
if isinstance(index, list): |
|
assert isinstance(value, list), ( |
|
f"The indices should be the same type as values, but got {type(index)} and {type(value)}" |
|
) |
|
if isinstance(index, int): |
|
index, value = [index], [value] |
|
for i, v in zip(index, value): |
|
assert i < len(self.transforms), f"list index {i} out of range {len(self.transforms)}." |
|
self.transforms[i] = v |
|
|
|
def tolist(self): |
|
""" |
|
Converts the list of transforms to a standard Python list. |
|
|
|
Returns: |
|
(List): A list containing all the transform objects in the Compose instance. |
|
|
|
Examples: |
|
>>> transforms = [RandomFlip(), RandomPerspective(10), CenterCrop()] |
|
>>> compose = Compose(transforms) |
|
>>> transform_list = compose.tolist() |
|
>>> print(len(transform_list)) |
|
3 |
|
""" |
|
return self.transforms |
|
|
|
def __repr__(self): |
|
""" |
|
Returns a string representation of the Compose object. |
|
|
|
Returns: |
|
(str): A string representation of the Compose object, including the list of transforms. |
|
|
|
Examples: |
|
>>> transforms = [RandomFlip(), RandomPerspective(degrees=10, translate=0.1, scale=0.1)] |
|
>>> compose = Compose(transforms) |
|
>>> print(compose) |
|
Compose([ |
|
RandomFlip(), |
|
RandomPerspective(degrees=10, translate=0.1, scale=0.1) |
|
]) |
|
""" |
|
return f"{self.__class__.__name__}({', '.join([f'{t}' for t in self.transforms])})" |
|
|
|
|
|
class BaseMixTransform: |
|
""" |
|
Base class for mix transformations like MixUp and Mosaic. |
|
|
|
This class provides a foundation for implementing mix transformations on datasets. It handles the |
|
probability-based application of transforms and manages the mixing of multiple images and labels. |
|
|
|
Attributes: |
|
dataset (Any): The dataset object containing images and labels. |
|
pre_transform (Callable | None): Optional transform to apply before mixing. |
|
p (float): Probability of applying the mix transformation. |
|
|
|
Methods: |
|
__call__: Applies the mix transformation to the input labels. |
|
_mix_transform: Abstract method to be implemented by subclasses for specific mix operations. |
|
get_indexes: Abstract method to get indexes of images to be mixed. |
|
_update_label_text: Updates label text for mixed images. |
|
|
|
Examples: |
|
>>> class CustomMixTransform(BaseMixTransform): |
|
... def _mix_transform(self, labels): |
|
... # Implement custom mix logic here |
|
... return labels |
|
... |
|
... def get_indexes(self): |
|
... return [random.randint(0, len(self.dataset) - 1) for _ in range(3)] |
|
>>> dataset = YourDataset() |
|
>>> transform = CustomMixTransform(dataset, p=0.5) |
|
>>> mixed_labels = transform(original_labels) |
|
""" |
|
|
|
def __init__(self, dataset, pre_transform=None, p=0.0) -> None: |
|
""" |
|
Initializes the BaseMixTransform object for mix transformations like MixUp and Mosaic. |
|
|
|
This class serves as a base for implementing mix transformations in image processing pipelines. |
|
|
|
Args: |
|
dataset (Any): The dataset object containing images and labels for mixing. |
|
pre_transform (Callable | None): Optional transform to apply before mixing. |
|
p (float): Probability of applying the mix transformation. Should be in the range [0.0, 1.0]. |
|
|
|
Examples: |
|
>>> dataset = YOLODataset("path/to/data") |
|
>>> pre_transform = Compose([RandomFlip(), RandomPerspective()]) |
|
>>> mix_transform = BaseMixTransform(dataset, pre_transform, p=0.5) |
|
""" |
|
self.dataset = dataset |
|
self.pre_transform = pre_transform |
|
self.p = p |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies pre-processing transforms and mixup/mosaic transforms to labels data. |
|
|
|
This method determines whether to apply the mix transform based on a probability factor. If applied, it |
|
selects additional images, applies pre-transforms if specified, and then performs the mix transform. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing label data for an image. |
|
|
|
Returns: |
|
(Dict): The transformed labels dictionary, which may include mixed data from other images. |
|
|
|
Examples: |
|
>>> transform = BaseMixTransform(dataset, pre_transform=None, p=0.5) |
|
>>> result = transform({"image": img, "bboxes": boxes, "cls": classes}) |
|
""" |
|
if random.uniform(0, 1) > self.p: |
|
return labels |
|
|
|
|
|
indexes = self.get_indexes() |
|
if isinstance(indexes, int): |
|
indexes = [indexes] |
|
|
|
|
|
mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] |
|
|
|
if self.pre_transform is not None: |
|
for i, data in enumerate(mix_labels): |
|
mix_labels[i] = self.pre_transform(data) |
|
labels["mix_labels"] = mix_labels |
|
|
|
|
|
labels = self._update_label_text(labels) |
|
|
|
labels = self._mix_transform(labels) |
|
labels.pop("mix_labels", None) |
|
return labels |
|
|
|
def _mix_transform(self, labels): |
|
""" |
|
Applies MixUp or Mosaic augmentation to the label dictionary. |
|
|
|
This method should be implemented by subclasses to perform specific mix transformations like MixUp or |
|
Mosaic. It modifies the input label dictionary in-place with the augmented data. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image and label data. Expected to have a 'mix_labels' key |
|
with a list of additional image and label data for mixing. |
|
|
|
Returns: |
|
(Dict): The modified labels dictionary with augmented data after applying the mix transform. |
|
|
|
Examples: |
|
>>> transform = BaseMixTransform(dataset) |
|
>>> labels = {"image": img, "bboxes": boxes, "mix_labels": [{"image": img2, "bboxes": boxes2}]} |
|
>>> augmented_labels = transform._mix_transform(labels) |
|
""" |
|
raise NotImplementedError |
|
|
|
def get_indexes(self): |
|
""" |
|
Gets a list of shuffled indexes for mosaic augmentation. |
|
|
|
Returns: |
|
(List[int]): A list of shuffled indexes from the dataset. |
|
|
|
Examples: |
|
>>> transform = BaseMixTransform(dataset) |
|
>>> indexes = transform.get_indexes() |
|
>>> print(indexes) # [3, 18, 7, 2] |
|
""" |
|
raise NotImplementedError |
|
|
|
@staticmethod |
|
def _update_label_text(labels): |
|
""" |
|
Updates label text and class IDs for mixed labels in image augmentation. |
|
|
|
This method processes the 'texts' and 'cls' fields of the input labels dictionary and any mixed labels, |
|
creating a unified set of text labels and updating class IDs accordingly. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing label information, including 'texts' and 'cls' fields, |
|
and optionally a 'mix_labels' field with additional label dictionaries. |
|
|
|
Returns: |
|
(Dict): The updated labels dictionary with unified text labels and updated class IDs. |
|
|
|
Examples: |
|
>>> labels = { |
|
... "texts": [["cat"], ["dog"]], |
|
... "cls": torch.tensor([[0], [1]]), |
|
... "mix_labels": [{"texts": [["bird"], ["fish"]], "cls": torch.tensor([[0], [1]])}], |
|
... } |
|
>>> updated_labels = self._update_label_text(labels) |
|
>>> print(updated_labels["texts"]) |
|
[['cat'], ['dog'], ['bird'], ['fish']] |
|
>>> print(updated_labels["cls"]) |
|
tensor([[0], |
|
[1]]) |
|
>>> print(updated_labels["mix_labels"][0]["cls"]) |
|
tensor([[2], |
|
[3]]) |
|
""" |
|
if "texts" not in labels: |
|
return labels |
|
|
|
mix_texts = sum([labels["texts"]] + [x["texts"] for x in labels["mix_labels"]], []) |
|
mix_texts = list({tuple(x) for x in mix_texts}) |
|
text2id = {text: i for i, text in enumerate(mix_texts)} |
|
|
|
for label in [labels] + labels["mix_labels"]: |
|
for i, cls in enumerate(label["cls"].squeeze(-1).tolist()): |
|
text = label["texts"][int(cls)] |
|
label["cls"][i] = text2id[tuple(text)] |
|
label["texts"] = mix_texts |
|
return labels |
|
|
|
|
|
class Mosaic(BaseMixTransform): |
|
""" |
|
Mosaic augmentation for image datasets. |
|
|
|
This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. |
|
The augmentation is applied to a dataset with a given probability. |
|
|
|
Attributes: |
|
dataset: The dataset on which the mosaic augmentation is applied. |
|
imgsz (int): Image size (height and width) after mosaic pipeline of a single image. |
|
p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1. |
|
n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3). |
|
border (Tuple[int, int]): Border size for width and height. |
|
|
|
Methods: |
|
get_indexes: Returns a list of random indexes from the dataset. |
|
_mix_transform: Applies mixup transformation to the input image and labels. |
|
_mosaic3: Creates a 1x3 image mosaic. |
|
_mosaic4: Creates a 2x2 image mosaic. |
|
_mosaic9: Creates a 3x3 image mosaic. |
|
_update_labels: Updates labels with padding. |
|
_cat_labels: Concatenates labels and clips mosaic border instances. |
|
|
|
Examples: |
|
>>> from ultralytics.data.augment import Mosaic |
|
>>> dataset = YourDataset(...) # Your image dataset |
|
>>> mosaic_aug = Mosaic(dataset, imgsz=640, p=0.5, n=4) |
|
>>> augmented_labels = mosaic_aug(original_labels) |
|
""" |
|
|
|
def __init__(self, dataset, imgsz=640, p=1.0, n=4): |
|
""" |
|
Initializes the Mosaic augmentation object. |
|
|
|
This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. |
|
The augmentation is applied to a dataset with a given probability. |
|
|
|
Args: |
|
dataset (Any): The dataset on which the mosaic augmentation is applied. |
|
imgsz (int): Image size (height and width) after mosaic pipeline of a single image. |
|
p (float): Probability of applying the mosaic augmentation. Must be in the range 0-1. |
|
n (int): The grid size, either 4 (for 2x2) or 9 (for 3x3). |
|
|
|
Examples: |
|
>>> from ultralytics.data.augment import Mosaic |
|
>>> dataset = YourDataset(...) |
|
>>> mosaic_aug = Mosaic(dataset, imgsz=640, p=0.5, n=4) |
|
""" |
|
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}." |
|
assert n in {4, 9}, "grid must be equal to 4 or 9." |
|
super().__init__(dataset=dataset, p=p) |
|
self.imgsz = imgsz |
|
self.border = (-imgsz // 2, -imgsz // 2) |
|
self.n = n |
|
|
|
def get_indexes(self, buffer=True): |
|
""" |
|
Returns a list of random indexes from the dataset for mosaic augmentation. |
|
|
|
This method selects random image indexes either from a buffer or from the entire dataset, depending on |
|
the 'buffer' parameter. It is used to choose images for creating mosaic augmentations. |
|
|
|
Args: |
|
buffer (bool): If True, selects images from the dataset buffer. If False, selects from the entire |
|
dataset. |
|
|
|
Returns: |
|
(List[int]): A list of random image indexes. The length of the list is n-1, where n is the number |
|
of images used in the mosaic (either 3 or 8, depending on whether n is 4 or 9). |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4) |
|
>>> indexes = mosaic.get_indexes() |
|
>>> print(len(indexes)) # Output: 3 |
|
""" |
|
if buffer: |
|
return random.choices(list(self.dataset.buffer), k=self.n - 1) |
|
else: |
|
return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)] |
|
|
|
def _mix_transform(self, labels): |
|
""" |
|
Applies mosaic augmentation to the input image and labels. |
|
|
|
This method combines multiple images (3, 4, or 9) into a single mosaic image based on the 'n' attribute. |
|
It ensures that rectangular annotations are not present and that there are other images available for |
|
mosaic augmentation. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and annotations. Expected keys include: |
|
- 'rect_shape': Should be None as rect and mosaic are mutually exclusive. |
|
- 'mix_labels': A list of dictionaries containing data for other images to be used in the mosaic. |
|
|
|
Returns: |
|
(Dict): A dictionary containing the mosaic-augmented image and updated annotations. |
|
|
|
Raises: |
|
AssertionError: If 'rect_shape' is not None or if 'mix_labels' is empty. |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4) |
|
>>> augmented_data = mosaic._mix_transform(labels) |
|
""" |
|
assert labels.get("rect_shape", None) is None, "rect and mosaic are mutually exclusive." |
|
assert len(labels.get("mix_labels", [])), "There are no other images for mosaic augment." |
|
return ( |
|
self._mosaic3(labels) if self.n == 3 else self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) |
|
) |
|
|
|
def _mosaic3(self, labels): |
|
""" |
|
Creates a 1x3 image mosaic by combining three images. |
|
|
|
This method arranges three images in a horizontal layout, with the main image in the center and two |
|
additional images on either side. It's part of the Mosaic augmentation technique used in object detection. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image and label information for the main (center) image. |
|
Must include 'img' key with the image array, and 'mix_labels' key with a list of two |
|
dictionaries containing information for the side images. |
|
|
|
Returns: |
|
(Dict): A dictionary with the mosaic image and updated labels. Keys include: |
|
- 'img' (np.ndarray): The mosaic image array with shape (H, W, C). |
|
- Other keys from the input labels, updated to reflect the new image dimensions. |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=3) |
|
>>> labels = { |
|
... "img": np.random.rand(480, 640, 3), |
|
... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(2)], |
|
... } |
|
>>> result = mosaic._mosaic3(labels) |
|
>>> print(result["img"].shape) |
|
(640, 640, 3) |
|
""" |
|
mosaic_labels = [] |
|
s = self.imgsz |
|
for i in range(3): |
|
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] |
|
|
|
img = labels_patch["img"] |
|
h, w = labels_patch.pop("resized_shape") |
|
|
|
|
|
if i == 0: |
|
img3 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) |
|
h0, w0 = h, w |
|
c = s, s, s + w, s + h |
|
elif i == 1: |
|
c = s + w0, s, s + w0 + w, s + h |
|
elif i == 2: |
|
c = s - w, s + h0 - h, s, s + h0 |
|
|
|
padw, padh = c[:2] |
|
x1, y1, x2, y2 = (max(x, 0) for x in c) |
|
|
|
img3[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] |
|
|
|
|
|
|
|
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) |
|
mosaic_labels.append(labels_patch) |
|
final_labels = self._cat_labels(mosaic_labels) |
|
|
|
final_labels["img"] = img3[-self.border[0] : self.border[0], -self.border[1] : self.border[1]] |
|
return final_labels |
|
|
|
def _mosaic4(self, labels): |
|
""" |
|
Creates a 2x2 image mosaic from four input images. |
|
|
|
This method combines four images into a single mosaic image by placing them in a 2x2 grid. It also |
|
updates the corresponding labels for each image in the mosaic. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and labels for the base image (index 0) and three |
|
additional images (indices 1-3) in the 'mix_labels' key. |
|
|
|
Returns: |
|
(Dict): A dictionary containing the mosaic image and updated labels. The 'img' key contains the mosaic |
|
image as a numpy array, and other keys contain the combined and adjusted labels for all four images. |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4) |
|
>>> labels = { |
|
... "img": np.random.rand(480, 640, 3), |
|
... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(3)], |
|
... } |
|
>>> result = mosaic._mosaic4(labels) |
|
>>> assert result["img"].shape == (1280, 1280, 3) |
|
""" |
|
mosaic_labels = [] |
|
s = self.imgsz |
|
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) |
|
for i in range(4): |
|
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] |
|
|
|
img = labels_patch["img"] |
|
h, w = labels_patch.pop("resized_shape") |
|
|
|
|
|
if i == 0: |
|
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) |
|
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc |
|
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h |
|
elif i == 1: |
|
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc |
|
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h |
|
elif i == 2: |
|
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) |
|
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) |
|
elif i == 3: |
|
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) |
|
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) |
|
|
|
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] |
|
padw = x1a - x1b |
|
padh = y1a - y1b |
|
|
|
labels_patch = self._update_labels(labels_patch, padw, padh) |
|
mosaic_labels.append(labels_patch) |
|
final_labels = self._cat_labels(mosaic_labels) |
|
final_labels["img"] = img4 |
|
return final_labels |
|
|
|
def _mosaic9(self, labels): |
|
""" |
|
Creates a 3x3 image mosaic from the input image and eight additional images. |
|
|
|
This method combines nine images into a single mosaic image. The input image is placed at the center, |
|
and eight additional images from the dataset are placed around it in a 3x3 grid pattern. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing the input image and its associated labels. It should have |
|
the following keys: |
|
- 'img' (numpy.ndarray): The input image. |
|
- 'resized_shape' (Tuple[int, int]): The shape of the resized image (height, width). |
|
- 'mix_labels' (List[Dict]): A list of dictionaries containing information for the additional |
|
eight images, each with the same structure as the input labels. |
|
|
|
Returns: |
|
(Dict): A dictionary containing the mosaic image and updated labels. It includes the following keys: |
|
- 'img' (numpy.ndarray): The final mosaic image. |
|
- Other keys from the input labels, updated to reflect the new mosaic arrangement. |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=9) |
|
>>> input_labels = dataset[0] |
|
>>> mosaic_result = mosaic._mosaic9(input_labels) |
|
>>> mosaic_image = mosaic_result["img"] |
|
""" |
|
mosaic_labels = [] |
|
s = self.imgsz |
|
hp, wp = -1, -1 |
|
for i in range(9): |
|
labels_patch = labels if i == 0 else labels["mix_labels"][i - 1] |
|
|
|
img = labels_patch["img"] |
|
h, w = labels_patch.pop("resized_shape") |
|
|
|
|
|
if i == 0: |
|
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) |
|
h0, w0 = h, w |
|
c = s, s, s + w, s + h |
|
elif i == 1: |
|
c = s, s - h, s + w, s |
|
elif i == 2: |
|
c = s + wp, s - h, s + wp + w, s |
|
elif i == 3: |
|
c = s + w0, s, s + w0 + w, s + h |
|
elif i == 4: |
|
c = s + w0, s + hp, s + w0 + w, s + hp + h |
|
elif i == 5: |
|
c = s + w0 - w, s + h0, s + w0, s + h0 + h |
|
elif i == 6: |
|
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h |
|
elif i == 7: |
|
c = s - w, s + h0 - h, s, s + h0 |
|
elif i == 8: |
|
c = s - w, s + h0 - hp - h, s, s + h0 - hp |
|
|
|
padw, padh = c[:2] |
|
x1, y1, x2, y2 = (max(x, 0) for x in c) |
|
|
|
|
|
img9[y1:y2, x1:x2] = img[y1 - padh :, x1 - padw :] |
|
hp, wp = h, w |
|
|
|
|
|
labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) |
|
mosaic_labels.append(labels_patch) |
|
final_labels = self._cat_labels(mosaic_labels) |
|
|
|
final_labels["img"] = img9[-self.border[0] : self.border[0], -self.border[1] : self.border[1]] |
|
return final_labels |
|
|
|
@staticmethod |
|
def _update_labels(labels, padw, padh): |
|
""" |
|
Updates label coordinates with padding values. |
|
|
|
This method adjusts the bounding box coordinates of object instances in the labels by adding padding |
|
values. It also denormalizes the coordinates if they were previously normalized. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image and instance information. |
|
padw (int): Padding width to be added to the x-coordinates. |
|
padh (int): Padding height to be added to the y-coordinates. |
|
|
|
Returns: |
|
(Dict): Updated labels dictionary with adjusted instance coordinates. |
|
|
|
Examples: |
|
>>> labels = {"img": np.zeros((100, 100, 3)), "instances": Instances(...)} |
|
>>> padw, padh = 50, 50 |
|
>>> updated_labels = Mosaic._update_labels(labels, padw, padh) |
|
""" |
|
nh, nw = labels["img"].shape[:2] |
|
labels["instances"].convert_bbox(format="xyxy") |
|
labels["instances"].denormalize(nw, nh) |
|
labels["instances"].add_padding(padw, padh) |
|
return labels |
|
|
|
def _cat_labels(self, mosaic_labels): |
|
""" |
|
Concatenates and processes labels for mosaic augmentation. |
|
|
|
This method combines labels from multiple images used in mosaic augmentation, clips instances to the |
|
mosaic border, and removes zero-area boxes. |
|
|
|
Args: |
|
mosaic_labels (List[Dict]): A list of label dictionaries for each image in the mosaic. |
|
|
|
Returns: |
|
(Dict): A dictionary containing concatenated and processed labels for the mosaic image, including: |
|
- im_file (str): File path of the first image in the mosaic. |
|
- ori_shape (Tuple[int, int]): Original shape of the first image. |
|
- resized_shape (Tuple[int, int]): Shape of the mosaic image (imgsz * 2, imgsz * 2). |
|
- cls (np.ndarray): Concatenated class labels. |
|
- instances (Instances): Concatenated instance annotations. |
|
- mosaic_border (Tuple[int, int]): Mosaic border size. |
|
- texts (List[str], optional): Text labels if present in the original labels. |
|
|
|
Examples: |
|
>>> mosaic = Mosaic(dataset, imgsz=640) |
|
>>> mosaic_labels = [{"cls": np.array([0, 1]), "instances": Instances(...)} for _ in range(4)] |
|
>>> result = mosaic._cat_labels(mosaic_labels) |
|
>>> print(result.keys()) |
|
dict_keys(['im_file', 'ori_shape', 'resized_shape', 'cls', 'instances', 'mosaic_border']) |
|
""" |
|
if len(mosaic_labels) == 0: |
|
return {} |
|
cls = [] |
|
instances = [] |
|
imgsz = self.imgsz * 2 |
|
for labels in mosaic_labels: |
|
cls.append(labels["cls"]) |
|
instances.append(labels["instances"]) |
|
|
|
final_labels = { |
|
"im_file": mosaic_labels[0]["im_file"], |
|
"ori_shape": mosaic_labels[0]["ori_shape"], |
|
"resized_shape": (imgsz, imgsz), |
|
"cls": np.concatenate(cls, 0), |
|
"instances": Instances.concatenate(instances, axis=0), |
|
"mosaic_border": self.border, |
|
} |
|
final_labels["instances"].clip(imgsz, imgsz) |
|
good = final_labels["instances"].remove_zero_area_boxes() |
|
final_labels["cls"] = final_labels["cls"][good] |
|
if "texts" in mosaic_labels[0]: |
|
final_labels["texts"] = mosaic_labels[0]["texts"] |
|
return final_labels |
|
|
|
|
|
class MixUp(BaseMixTransform): |
|
""" |
|
Applies MixUp augmentation to image datasets. |
|
|
|
This class implements the MixUp augmentation technique as described in the paper "mixup: Beyond Empirical Risk |
|
Minimization" (https://arxiv.org/abs/1710.09412). MixUp combines two images and their labels using a random weight. |
|
|
|
Attributes: |
|
dataset (Any): The dataset to which MixUp augmentation will be applied. |
|
pre_transform (Callable | None): Optional transform to apply before MixUp. |
|
p (float): Probability of applying MixUp augmentation. |
|
|
|
Methods: |
|
get_indexes: Returns a random index from the dataset. |
|
_mix_transform: Applies MixUp augmentation to the input labels. |
|
|
|
Examples: |
|
>>> from ultralytics.data.augment import MixUp |
|
>>> dataset = YourDataset(...) # Your image dataset |
|
>>> mixup = MixUp(dataset, p=0.5) |
|
>>> augmented_labels = mixup(original_labels) |
|
""" |
|
|
|
def __init__(self, dataset, pre_transform=None, p=0.0) -> None: |
|
""" |
|
Initializes the MixUp augmentation object. |
|
|
|
MixUp is an image augmentation technique that combines two images by taking a weighted sum of their pixel |
|
values and labels. This implementation is designed for use with the Ultralytics YOLO framework. |
|
|
|
Args: |
|
dataset (Any): The dataset to which MixUp augmentation will be applied. |
|
pre_transform (Callable | None): Optional transform to apply to images before MixUp. |
|
p (float): Probability of applying MixUp augmentation to an image. Must be in the range [0, 1]. |
|
|
|
Examples: |
|
>>> from ultralytics.data.dataset import YOLODataset |
|
>>> dataset = YOLODataset("path/to/data.yaml") |
|
>>> mixup = MixUp(dataset, pre_transform=None, p=0.5) |
|
""" |
|
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) |
|
|
|
def get_indexes(self): |
|
""" |
|
Get a random index from the dataset. |
|
|
|
This method returns a single random index from the dataset, which is used to select an image for MixUp |
|
augmentation. |
|
|
|
Returns: |
|
(int): A random integer index within the range of the dataset length. |
|
|
|
Examples: |
|
>>> mixup = MixUp(dataset) |
|
>>> index = mixup.get_indexes() |
|
>>> print(index) |
|
42 |
|
""" |
|
return random.randint(0, len(self.dataset) - 1) |
|
|
|
def _mix_transform(self, labels): |
|
""" |
|
Applies MixUp augmentation to the input labels. |
|
|
|
This method implements the MixUp augmentation technique as described in the paper |
|
"mixup: Beyond Empirical Risk Minimization" (https://arxiv.org/abs/1710.09412). |
|
|
|
Args: |
|
labels (Dict): A dictionary containing the original image and label information. |
|
|
|
Returns: |
|
(Dict): A dictionary containing the mixed-up image and combined label information. |
|
|
|
Examples: |
|
>>> mixer = MixUp(dataset) |
|
>>> mixed_labels = mixer._mix_transform(labels) |
|
""" |
|
r = np.random.beta(32.0, 32.0) |
|
labels2 = labels["mix_labels"][0] |
|
labels["img"] = (labels["img"] * r + labels2["img"] * (1 - r)).astype(np.uint8) |
|
labels["instances"] = Instances.concatenate([labels["instances"], labels2["instances"]], axis=0) |
|
labels["cls"] = np.concatenate([labels["cls"], labels2["cls"]], 0) |
|
return labels |
|
|
|
|
|
class RandomPerspective: |
|
""" |
|
Implements random perspective and affine transformations on images and corresponding annotations. |
|
|
|
This class applies random rotations, translations, scaling, shearing, and perspective transformations |
|
to images and their associated bounding boxes, segments, and keypoints. It can be used as part of an |
|
augmentation pipeline for object detection and instance segmentation tasks. |
|
|
|
Attributes: |
|
degrees (float): Maximum absolute degree range for random rotations. |
|
translate (float): Maximum translation as a fraction of the image size. |
|
scale (float): Scaling factor range, e.g., scale=0.1 means 0.9-1.1. |
|
shear (float): Maximum shear angle in degrees. |
|
perspective (float): Perspective distortion factor. |
|
border (Tuple[int, int]): Mosaic border size as (x, y). |
|
pre_transform (Callable | None): Optional transform to apply before the random perspective. |
|
|
|
Methods: |
|
affine_transform: Applies affine transformations to the input image. |
|
apply_bboxes: Transforms bounding boxes using the affine matrix. |
|
apply_segments: Transforms segments and generates new bounding boxes. |
|
apply_keypoints: Transforms keypoints using the affine matrix. |
|
__call__: Applies the random perspective transformation to images and annotations. |
|
box_candidates: Filters transformed bounding boxes based on size and aspect ratio. |
|
|
|
Examples: |
|
>>> transform = RandomPerspective(degrees=10, translate=0.1, scale=0.1, shear=10) |
|
>>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8) |
|
>>> labels = {"img": image, "cls": np.array([0, 1]), "instances": Instances(...)} |
|
>>> result = transform(labels) |
|
>>> transformed_image = result["img"] |
|
>>> transformed_instances = result["instances"] |
|
""" |
|
|
|
def __init__( |
|
self, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, border=(0, 0), pre_transform=None |
|
): |
|
""" |
|
Initializes RandomPerspective object with transformation parameters. |
|
|
|
This class implements random perspective and affine transformations on images and corresponding bounding boxes, |
|
segments, and keypoints. Transformations include rotation, translation, scaling, and shearing. |
|
|
|
Args: |
|
degrees (float): Degree range for random rotations. |
|
translate (float): Fraction of total width and height for random translation. |
|
scale (float): Scaling factor interval, e.g., a scale factor of 0.5 allows a resize between 50%-150%. |
|
shear (float): Shear intensity (angle in degrees). |
|
perspective (float): Perspective distortion factor. |
|
border (Tuple[int, int]): Tuple specifying mosaic border (top/bottom, left/right). |
|
pre_transform (Callable | None): Function/transform to apply to the image before starting the random |
|
transformation. |
|
|
|
Examples: |
|
>>> transform = RandomPerspective(degrees=10.0, translate=0.1, scale=0.5, shear=5.0) |
|
>>> result = transform(labels) # Apply random perspective to labels |
|
""" |
|
self.degrees = degrees |
|
self.translate = translate |
|
self.scale = scale |
|
self.shear = shear |
|
self.perspective = perspective |
|
self.border = border |
|
self.pre_transform = pre_transform |
|
|
|
def affine_transform(self, img, border): |
|
""" |
|
Applies a sequence of affine transformations centered around the image center. |
|
|
|
This function performs a series of geometric transformations on the input image, including |
|
translation, perspective change, rotation, scaling, and shearing. The transformations are |
|
applied in a specific order to maintain consistency. |
|
|
|
Args: |
|
img (np.ndarray): Input image to be transformed. |
|
border (Tuple[int, int]): Border dimensions for the transformed image. |
|
|
|
Returns: |
|
(Tuple[np.ndarray, np.ndarray, float]): A tuple containing: |
|
- np.ndarray: Transformed image. |
|
- np.ndarray: 3x3 transformation matrix. |
|
- float: Scale factor applied during the transformation. |
|
|
|
Examples: |
|
>>> import numpy as np |
|
>>> img = np.random.rand(100, 100, 3) |
|
>>> border = (10, 10) |
|
>>> transformed_img, matrix, scale = affine_transform(img, border) |
|
""" |
|
|
|
C = np.eye(3, dtype=np.float32) |
|
|
|
C[0, 2] = -img.shape[1] / 2 |
|
C[1, 2] = -img.shape[0] / 2 |
|
|
|
|
|
P = np.eye(3, dtype=np.float32) |
|
P[2, 0] = random.uniform(-self.perspective, self.perspective) |
|
P[2, 1] = random.uniform(-self.perspective, self.perspective) |
|
|
|
|
|
R = np.eye(3, dtype=np.float32) |
|
a = random.uniform(-self.degrees, self.degrees) |
|
|
|
s = random.uniform(1 - self.scale, 1 + self.scale) |
|
|
|
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) |
|
|
|
|
|
S = np.eye(3, dtype=np.float32) |
|
S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) |
|
S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) |
|
|
|
|
|
T = np.eye(3, dtype=np.float32) |
|
T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] |
|
T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] |
|
|
|
|
|
M = T @ S @ R @ P @ C |
|
|
|
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): |
|
if self.perspective: |
|
img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114)) |
|
else: |
|
img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114)) |
|
return img, M, s |
|
|
|
def apply_bboxes(self, bboxes, M): |
|
""" |
|
Apply affine transformation to bounding boxes. |
|
|
|
This function applies an affine transformation to a set of bounding boxes using the provided |
|
transformation matrix. |
|
|
|
Args: |
|
bboxes (torch.Tensor): Bounding boxes in xyxy format with shape (N, 4), where N is the number |
|
of bounding boxes. |
|
M (torch.Tensor): Affine transformation matrix with shape (3, 3). |
|
|
|
Returns: |
|
(torch.Tensor): Transformed bounding boxes in xyxy format with shape (N, 4). |
|
|
|
Examples: |
|
>>> bboxes = torch.tensor([[10, 10, 20, 20], [30, 30, 40, 40]]) |
|
>>> M = torch.eye(3) |
|
>>> transformed_bboxes = apply_bboxes(bboxes, M) |
|
""" |
|
n = len(bboxes) |
|
if n == 0: |
|
return bboxes |
|
|
|
xy = np.ones((n * 4, 3), dtype=bboxes.dtype) |
|
xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) |
|
xy = xy @ M.T |
|
xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) |
|
|
|
|
|
x = xy[:, [0, 2, 4, 6]] |
|
y = xy[:, [1, 3, 5, 7]] |
|
return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T |
|
|
|
def apply_segments(self, segments, M): |
|
""" |
|
Apply affine transformations to segments and generate new bounding boxes. |
|
|
|
This function applies affine transformations to input segments and generates new bounding boxes based on |
|
the transformed segments. It clips the transformed segments to fit within the new bounding boxes. |
|
|
|
Args: |
|
segments (np.ndarray): Input segments with shape (N, M, 2), where N is the number of segments and M is the |
|
number of points in each segment. |
|
M (np.ndarray): Affine transformation matrix with shape (3, 3). |
|
|
|
Returns: |
|
(Tuple[np.ndarray, np.ndarray]): A tuple containing: |
|
- New bounding boxes with shape (N, 4) in xyxy format. |
|
- Transformed and clipped segments with shape (N, M, 2). |
|
|
|
Examples: |
|
>>> segments = np.random.rand(10, 500, 2) # 10 segments with 500 points each |
|
>>> M = np.eye(3) # Identity transformation matrix |
|
>>> new_bboxes, new_segments = apply_segments(segments, M) |
|
""" |
|
n, num = segments.shape[:2] |
|
if n == 0: |
|
return [], segments |
|
|
|
xy = np.ones((n * num, 3), dtype=segments.dtype) |
|
segments = segments.reshape(-1, 2) |
|
xy[:, :2] = segments |
|
xy = xy @ M.T |
|
xy = xy[:, :2] / xy[:, 2:3] |
|
segments = xy.reshape(n, -1, 2) |
|
bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) |
|
segments[..., 0] = segments[..., 0].clip(bboxes[:, 0:1], bboxes[:, 2:3]) |
|
segments[..., 1] = segments[..., 1].clip(bboxes[:, 1:2], bboxes[:, 3:4]) |
|
return bboxes, segments |
|
|
|
def apply_keypoints(self, keypoints, M): |
|
""" |
|
Applies affine transformation to keypoints. |
|
|
|
This method transforms the input keypoints using the provided affine transformation matrix. It handles |
|
perspective rescaling if necessary and updates the visibility of keypoints that fall outside the image |
|
boundaries after transformation. |
|
|
|
Args: |
|
keypoints (np.ndarray): Array of keypoints with shape (N, 17, 3), where N is the number of instances, |
|
17 is the number of keypoints per instance, and 3 represents (x, y, visibility). |
|
M (np.ndarray): 3x3 affine transformation matrix. |
|
|
|
Returns: |
|
(np.ndarray): Transformed keypoints array with the same shape as input (N, 17, 3). |
|
|
|
Examples: |
|
>>> random_perspective = RandomPerspective() |
|
>>> keypoints = np.random.rand(5, 17, 3) # 5 instances, 17 keypoints each |
|
>>> M = np.eye(3) # Identity transformation |
|
>>> transformed_keypoints = random_perspective.apply_keypoints(keypoints, M) |
|
""" |
|
n, nkpt = keypoints.shape[:2] |
|
if n == 0: |
|
return keypoints |
|
xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype) |
|
visible = keypoints[..., 2].reshape(n * nkpt, 1) |
|
xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2) |
|
xy = xy @ M.T |
|
xy = xy[:, :2] / xy[:, 2:3] |
|
out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1]) |
|
visible[out_mask] = 0 |
|
return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3) |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies random perspective and affine transformations to an image and its associated labels. |
|
|
|
This method performs a series of transformations including rotation, translation, scaling, shearing, |
|
and perspective distortion on the input image and adjusts the corresponding bounding boxes, segments, |
|
and keypoints accordingly. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and annotations. |
|
Must include: |
|
'img' (ndarray): The input image. |
|
'cls' (ndarray): Class labels. |
|
'instances' (Instances): Object instances with bounding boxes, segments, and keypoints. |
|
May include: |
|
'mosaic_border' (Tuple[int, int]): Border size for mosaic augmentation. |
|
|
|
Returns: |
|
(Dict): Transformed labels dictionary containing: |
|
- 'img' (np.ndarray): The transformed image. |
|
- 'cls' (np.ndarray): Updated class labels. |
|
- 'instances' (Instances): Updated object instances. |
|
- 'resized_shape' (Tuple[int, int]): New image shape after transformation. |
|
|
|
Examples: |
|
>>> transform = RandomPerspective() |
|
>>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8) |
|
>>> labels = { |
|
... "img": image, |
|
... "cls": np.array([0, 1, 2]), |
|
... "instances": Instances(bboxes=np.array([[10, 10, 50, 50], [100, 100, 150, 150]])), |
|
... } |
|
>>> result = transform(labels) |
|
>>> assert result["img"].shape[:2] == result["resized_shape"] |
|
""" |
|
if self.pre_transform and "mosaic_border" not in labels: |
|
labels = self.pre_transform(labels) |
|
labels.pop("ratio_pad", None) |
|
|
|
img = labels["img"] |
|
cls = labels["cls"] |
|
instances = labels.pop("instances") |
|
|
|
instances.convert_bbox(format="xyxy") |
|
instances.denormalize(*img.shape[:2][::-1]) |
|
|
|
border = labels.pop("mosaic_border", self.border) |
|
self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 |
|
|
|
|
|
img, M, scale = self.affine_transform(img, border) |
|
|
|
bboxes = self.apply_bboxes(instances.bboxes, M) |
|
|
|
segments = instances.segments |
|
keypoints = instances.keypoints |
|
|
|
if len(segments): |
|
bboxes, segments = self.apply_segments(segments, M) |
|
|
|
if keypoints is not None: |
|
keypoints = self.apply_keypoints(keypoints, M) |
|
new_instances = Instances(bboxes, segments, keypoints, bbox_format="xyxy", normalized=False) |
|
|
|
new_instances.clip(*self.size) |
|
|
|
|
|
instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) |
|
|
|
i = self.box_candidates( |
|
box1=instances.bboxes.T, box2=new_instances.bboxes.T, area_thr=0.01 if len(segments) else 0.10 |
|
) |
|
labels["instances"] = new_instances[i] |
|
labels["cls"] = cls[i] |
|
labels["img"] = img |
|
labels["resized_shape"] = img.shape[:2] |
|
return labels |
|
|
|
@staticmethod |
|
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): |
|
""" |
|
Compute candidate boxes for further processing based on size and aspect ratio criteria. |
|
|
|
This method compares boxes before and after augmentation to determine if they meet specified |
|
thresholds for width, height, aspect ratio, and area. It's used to filter out boxes that have |
|
been overly distorted or reduced by the augmentation process. |
|
|
|
Args: |
|
box1 (numpy.ndarray): Original boxes before augmentation, shape (4, N) where n is the |
|
number of boxes. Format is [x1, y1, x2, y2] in absolute coordinates. |
|
box2 (numpy.ndarray): Augmented boxes after transformation, shape (4, N). Format is |
|
[x1, y1, x2, y2] in absolute coordinates. |
|
wh_thr (float): Width and height threshold in pixels. Boxes smaller than this in either |
|
dimension are rejected. |
|
ar_thr (float): Aspect ratio threshold. Boxes with an aspect ratio greater than this |
|
value are rejected. |
|
area_thr (float): Area ratio threshold. Boxes with an area ratio (new/old) less than |
|
this value are rejected. |
|
eps (float): Small epsilon value to prevent division by zero. |
|
|
|
Returns: |
|
(numpy.ndarray): Boolean array of shape (n) indicating which boxes are candidates. |
|
True values correspond to boxes that meet all criteria. |
|
|
|
Examples: |
|
>>> random_perspective = RandomPerspective() |
|
>>> box1 = np.array([[0, 0, 100, 100], [0, 0, 50, 50]]).T |
|
>>> box2 = np.array([[10, 10, 90, 90], [5, 5, 45, 45]]).T |
|
>>> candidates = random_perspective.box_candidates(box1, box2) |
|
>>> print(candidates) |
|
[True True] |
|
""" |
|
w1, h1 = box1[2] - box1[0], box1[3] - box1[1] |
|
w2, h2 = box2[2] - box2[0], box2[3] - box2[1] |
|
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) |
|
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) |
|
|
|
|
|
class RandomHSV: |
|
""" |
|
Randomly adjusts the Hue, Saturation, and Value (HSV) channels of an image. |
|
|
|
This class applies random HSV augmentation to images within predefined limits set by hgain, sgain, and vgain. |
|
|
|
Attributes: |
|
hgain (float): Maximum variation for hue. Range is typically [0, 1]. |
|
sgain (float): Maximum variation for saturation. Range is typically [0, 1]. |
|
vgain (float): Maximum variation for value. Range is typically [0, 1]. |
|
|
|
Methods: |
|
__call__: Applies random HSV augmentation to an image. |
|
|
|
Examples: |
|
>>> import numpy as np |
|
>>> from ultralytics.data.augment import RandomHSV |
|
>>> augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5) |
|
>>> image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) |
|
>>> labels = {"img": image} |
|
>>> augmenter(labels) |
|
>>> augmented_image = augmented_labels["img"] |
|
""" |
|
|
|
def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: |
|
""" |
|
Initializes the RandomHSV object for random HSV (Hue, Saturation, Value) augmentation. |
|
|
|
This class applies random adjustments to the HSV channels of an image within specified limits. |
|
|
|
Args: |
|
hgain (float): Maximum variation for hue. Should be in the range [0, 1]. |
|
sgain (float): Maximum variation for saturation. Should be in the range [0, 1]. |
|
vgain (float): Maximum variation for value. Should be in the range [0, 1]. |
|
|
|
Examples: |
|
>>> hsv_aug = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5) |
|
>>> hsv_aug(image) |
|
""" |
|
self.hgain = hgain |
|
self.sgain = sgain |
|
self.vgain = vgain |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies random HSV augmentation to an image within predefined limits. |
|
|
|
This method modifies the input image by randomly adjusting its Hue, Saturation, and Value (HSV) channels. |
|
The adjustments are made within the limits set by hgain, sgain, and vgain during initialization. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and metadata. Must include an 'img' key with |
|
the image as a numpy array. |
|
|
|
Returns: |
|
(None): The function modifies the input 'labels' dictionary in-place, updating the 'img' key |
|
with the HSV-augmented image. |
|
|
|
Examples: |
|
>>> hsv_augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5) |
|
>>> labels = {"img": np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)} |
|
>>> hsv_augmenter(labels) |
|
>>> augmented_img = labels["img"] |
|
""" |
|
img = labels["img"] |
|
if self.hgain or self.sgain or self.vgain: |
|
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 |
|
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) |
|
dtype = img.dtype |
|
|
|
x = np.arange(0, 256, dtype=r.dtype) |
|
lut_hue = ((x * r[0]) % 180).astype(dtype) |
|
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) |
|
lut_val = np.clip(x * r[2], 0, 255).astype(dtype) |
|
|
|
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) |
|
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) |
|
return labels |
|
|
|
|
|
class RandomFlip: |
|
""" |
|
Applies a random horizontal or vertical flip to an image with a given probability. |
|
|
|
This class performs random image flipping and updates corresponding instance annotations such as |
|
bounding boxes and keypoints. |
|
|
|
Attributes: |
|
p (float): Probability of applying the flip. Must be between 0 and 1. |
|
direction (str): Direction of flip, either 'horizontal' or 'vertical'. |
|
flip_idx (array-like): Index mapping for flipping keypoints, if applicable. |
|
|
|
Methods: |
|
__call__: Applies the random flip transformation to an image and its annotations. |
|
|
|
Examples: |
|
>>> transform = RandomFlip(p=0.5, direction="horizontal") |
|
>>> result = transform({"img": image, "instances": instances}) |
|
>>> flipped_image = result["img"] |
|
>>> flipped_instances = result["instances"] |
|
""" |
|
|
|
def __init__(self, p=0.5, direction="horizontal", flip_idx=None) -> None: |
|
""" |
|
Initializes the RandomFlip class with probability and direction. |
|
|
|
This class applies a random horizontal or vertical flip to an image with a given probability. |
|
It also updates any instances (bounding boxes, keypoints, etc.) accordingly. |
|
|
|
Args: |
|
p (float): The probability of applying the flip. Must be between 0 and 1. |
|
direction (str): The direction to apply the flip. Must be 'horizontal' or 'vertical'. |
|
flip_idx (List[int] | None): Index mapping for flipping keypoints, if any. |
|
|
|
Raises: |
|
AssertionError: If direction is not 'horizontal' or 'vertical', or if p is not between 0 and 1. |
|
|
|
Examples: |
|
>>> flip = RandomFlip(p=0.5, direction="horizontal") |
|
>>> flip_with_idx = RandomFlip(p=0.7, direction="vertical", flip_idx=[1, 0, 3, 2, 5, 4]) |
|
""" |
|
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}" |
|
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}." |
|
|
|
self.p = p |
|
self.direction = direction |
|
self.flip_idx = flip_idx |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies random flip to an image and updates any instances like bounding boxes or keypoints accordingly. |
|
|
|
This method randomly flips the input image either horizontally or vertically based on the initialized |
|
probability and direction. It also updates the corresponding instances (bounding boxes, keypoints) to |
|
match the flipped image. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing the following keys: |
|
'img' (numpy.ndarray): The image to be flipped. |
|
'instances' (ultralytics.utils.instance.Instances): An object containing bounding boxes and |
|
optionally keypoints. |
|
|
|
Returns: |
|
(Dict): The same dictionary with the flipped image and updated instances: |
|
'img' (numpy.ndarray): The flipped image. |
|
'instances' (ultralytics.utils.instance.Instances): Updated instances matching the flipped image. |
|
|
|
Examples: |
|
>>> labels = {"img": np.random.rand(640, 640, 3), "instances": Instances(...)} |
|
>>> random_flip = RandomFlip(p=0.5, direction="horizontal") |
|
>>> flipped_labels = random_flip(labels) |
|
""" |
|
img = labels["img"] |
|
instances = labels.pop("instances") |
|
instances.convert_bbox(format="xywh") |
|
h, w = img.shape[:2] |
|
h = 1 if instances.normalized else h |
|
w = 1 if instances.normalized else w |
|
|
|
|
|
if self.direction == "vertical" and random.random() < self.p: |
|
img = np.flipud(img) |
|
instances.flipud(h) |
|
if self.direction == "horizontal" and random.random() < self.p: |
|
img = np.fliplr(img) |
|
instances.fliplr(w) |
|
|
|
if self.flip_idx is not None and instances.keypoints is not None: |
|
instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :]) |
|
labels["img"] = np.ascontiguousarray(img) |
|
labels["instances"] = instances |
|
return labels |
|
|
|
|
|
class LetterBox: |
|
""" |
|
Resize image and padding for detection, instance segmentation, pose. |
|
|
|
This class resizes and pads images to a specified shape while preserving aspect ratio. It also updates |
|
corresponding labels and bounding boxes. |
|
|
|
Attributes: |
|
new_shape (tuple): Target shape (height, width) for resizing. |
|
auto (bool): Whether to use minimum rectangle. |
|
scaleFill (bool): Whether to stretch the image to new_shape. |
|
scaleup (bool): Whether to allow scaling up. If False, only scale down. |
|
stride (int): Stride for rounding padding. |
|
center (bool): Whether to center the image or align to top-left. |
|
|
|
Methods: |
|
__call__: Resize and pad image, update labels and bounding boxes. |
|
|
|
Examples: |
|
>>> transform = LetterBox(new_shape=(640, 640)) |
|
>>> result = transform(labels) |
|
>>> resized_img = result["img"] |
|
>>> updated_instances = result["instances"] |
|
""" |
|
|
|
def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32): |
|
""" |
|
Initialize LetterBox object for resizing and padding images. |
|
|
|
This class is designed to resize and pad images for object detection, instance segmentation, and pose estimation |
|
tasks. It supports various resizing modes including auto-sizing, scale-fill, and letterboxing. |
|
|
|
Args: |
|
new_shape (Tuple[int, int]): Target size (height, width) for the resized image. |
|
auto (bool): If True, use minimum rectangle to resize. If False, use new_shape directly. |
|
scaleFill (bool): If True, stretch the image to new_shape without padding. |
|
scaleup (bool): If True, allow scaling up. If False, only scale down. |
|
center (bool): If True, center the placed image. If False, place image in top-left corner. |
|
stride (int): Stride of the model (e.g., 32 for YOLOv5). |
|
|
|
Attributes: |
|
new_shape (Tuple[int, int]): Target size for the resized image. |
|
auto (bool): Flag for using minimum rectangle resizing. |
|
scaleFill (bool): Flag for stretching image without padding. |
|
scaleup (bool): Flag for allowing upscaling. |
|
stride (int): Stride value for ensuring image size is divisible by stride. |
|
|
|
Examples: |
|
>>> letterbox = LetterBox(new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32) |
|
>>> resized_img = letterbox(original_img) |
|
""" |
|
self.new_shape = new_shape |
|
self.auto = auto |
|
self.scaleFill = scaleFill |
|
self.scaleup = scaleup |
|
self.stride = stride |
|
self.center = center |
|
|
|
def __call__(self, labels=None, image=None): |
|
""" |
|
Resizes and pads an image for object detection, instance segmentation, or pose estimation tasks. |
|
|
|
This method applies letterboxing to the input image, which involves resizing the image while maintaining its |
|
aspect ratio and adding padding to fit the new shape. It also updates any associated labels accordingly. |
|
|
|
Args: |
|
labels (Dict | None): A dictionary containing image data and associated labels, or empty dict if None. |
|
image (np.ndarray | None): The input image as a numpy array. If None, the image is taken from 'labels'. |
|
|
|
Returns: |
|
(Dict | Tuple): If 'labels' is provided, returns an updated dictionary with the resized and padded image, |
|
updated labels, and additional metadata. If 'labels' is empty, returns a tuple containing the resized |
|
and padded image, and a tuple of (ratio, (left_pad, top_pad)). |
|
|
|
Examples: |
|
>>> letterbox = LetterBox(new_shape=(640, 640)) |
|
>>> result = letterbox(labels={"img": np.zeros((480, 640, 3)), "instances": Instances(...)}) |
|
>>> resized_img = result["img"] |
|
>>> updated_instances = result["instances"] |
|
""" |
|
if labels is None: |
|
labels = {} |
|
img = labels.get("img") if image is None else image |
|
shape = img.shape[:2] |
|
new_shape = labels.pop("rect_shape", self.new_shape) |
|
if isinstance(new_shape, int): |
|
new_shape = (new_shape, new_shape) |
|
|
|
|
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) |
|
if not self.scaleup: |
|
r = min(r, 1.0) |
|
|
|
|
|
ratio = r, r |
|
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) |
|
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] |
|
if self.auto: |
|
dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) |
|
elif self.scaleFill: |
|
dw, dh = 0.0, 0.0 |
|
new_unpad = (new_shape[1], new_shape[0]) |
|
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] |
|
|
|
if self.center: |
|
dw /= 2 |
|
dh /= 2 |
|
|
|
if shape[::-1] != new_unpad: |
|
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) |
|
top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1)) |
|
left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1)) |
|
img = cv2.copyMakeBorder( |
|
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114) |
|
) |
|
if labels.get("ratio_pad"): |
|
labels["ratio_pad"] = (labels["ratio_pad"], (left, top)) |
|
|
|
if len(labels): |
|
labels = self._update_labels(labels, ratio, left, top) |
|
labels["img"] = img |
|
labels["resized_shape"] = new_shape |
|
return labels |
|
else: |
|
return img |
|
|
|
@staticmethod |
|
def _update_labels(labels, ratio, padw, padh): |
|
""" |
|
Updates labels after applying letterboxing to an image. |
|
|
|
This method modifies the bounding box coordinates of instances in the labels |
|
to account for resizing and padding applied during letterboxing. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image labels and instances. |
|
ratio (Tuple[float, float]): Scaling ratios (width, height) applied to the image. |
|
padw (float): Padding width added to the image. |
|
padh (float): Padding height added to the image. |
|
|
|
Returns: |
|
(Dict): Updated labels dictionary with modified instance coordinates. |
|
|
|
Examples: |
|
>>> letterbox = LetterBox(new_shape=(640, 640)) |
|
>>> labels = {"instances": Instances(...)} |
|
>>> ratio = (0.5, 0.5) |
|
>>> padw, padh = 10, 20 |
|
>>> updated_labels = letterbox._update_labels(labels, ratio, padw, padh) |
|
""" |
|
labels["instances"].convert_bbox(format="xyxy") |
|
labels["instances"].denormalize(*labels["img"].shape[:2][::-1]) |
|
labels["instances"].scale(*ratio) |
|
labels["instances"].add_padding(padw, padh) |
|
return labels |
|
|
|
|
|
class CopyPaste(BaseMixTransform): |
|
""" |
|
CopyPaste class for applying Copy-Paste augmentation to image datasets. |
|
|
|
This class implements the Copy-Paste augmentation technique as described in the paper "Simple Copy-Paste is a Strong |
|
Data Augmentation Method for Instance Segmentation" (https://arxiv.org/abs/2012.07177). It combines objects from |
|
different images to create new training samples. |
|
|
|
Attributes: |
|
dataset (Any): The dataset to which Copy-Paste augmentation will be applied. |
|
pre_transform (Callable | None): Optional transform to apply before Copy-Paste. |
|
p (float): Probability of applying Copy-Paste augmentation. |
|
|
|
Methods: |
|
get_indexes: Returns a random index from the dataset. |
|
_mix_transform: Applies Copy-Paste augmentation to the input labels. |
|
__call__: Applies the Copy-Paste transformation to images and annotations. |
|
|
|
Examples: |
|
>>> from ultralytics.data.augment import CopyPaste |
|
>>> dataset = YourDataset(...) # Your image dataset |
|
>>> copypaste = CopyPaste(dataset, p=0.5) |
|
>>> augmented_labels = copypaste(original_labels) |
|
""" |
|
|
|
def __init__(self, dataset=None, pre_transform=None, p=0.5, mode="flip") -> None: |
|
"""Initializes CopyPaste object with dataset, pre_transform, and probability of applying MixUp.""" |
|
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) |
|
assert mode in {"flip", "mixup"}, f"Expected `mode` to be `flip` or `mixup`, but got {mode}." |
|
self.mode = mode |
|
|
|
def get_indexes(self): |
|
"""Returns a list of random indexes from the dataset for CopyPaste augmentation.""" |
|
return random.randint(0, len(self.dataset) - 1) |
|
|
|
def _mix_transform(self, labels): |
|
"""Applies Copy-Paste augmentation to combine objects from another image into the current image.""" |
|
labels2 = labels["mix_labels"][0] |
|
return self._transform(labels, labels2) |
|
|
|
def __call__(self, labels): |
|
"""Applies Copy-Paste augmentation to an image and its labels.""" |
|
if len(labels["instances"].segments) == 0 or self.p == 0: |
|
return labels |
|
if self.mode == "flip": |
|
return self._transform(labels) |
|
|
|
|
|
indexes = self.get_indexes() |
|
if isinstance(indexes, int): |
|
indexes = [indexes] |
|
|
|
|
|
mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] |
|
|
|
if self.pre_transform is not None: |
|
for i, data in enumerate(mix_labels): |
|
mix_labels[i] = self.pre_transform(data) |
|
labels["mix_labels"] = mix_labels |
|
|
|
|
|
labels = self._update_label_text(labels) |
|
|
|
labels = self._mix_transform(labels) |
|
labels.pop("mix_labels", None) |
|
return labels |
|
|
|
def _transform(self, labels1, labels2={}): |
|
"""Applies Copy-Paste augmentation to combine objects from another image into the current image.""" |
|
im = labels1["img"] |
|
cls = labels1["cls"] |
|
h, w = im.shape[:2] |
|
instances = labels1.pop("instances") |
|
instances.convert_bbox(format="xyxy") |
|
instances.denormalize(w, h) |
|
|
|
im_new = np.zeros(im.shape, np.uint8) |
|
instances2 = labels2.pop("instances", None) |
|
if instances2 is None: |
|
instances2 = deepcopy(instances) |
|
instances2.fliplr(w) |
|
ioa = bbox_ioa(instances2.bboxes, instances.bboxes) |
|
indexes = np.nonzero((ioa < 0.30).all(1))[0] |
|
n = len(indexes) |
|
sorted_idx = np.argsort(ioa.max(1)[indexes]) |
|
indexes = indexes[sorted_idx] |
|
for j in indexes[: round(self.p * n)]: |
|
cls = np.concatenate((cls, labels2.get("cls", cls)[[j]]), axis=0) |
|
instances = Instances.concatenate((instances, instances2[[j]]), axis=0) |
|
cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) |
|
|
|
result = labels2.get("img", cv2.flip(im, 1)) |
|
i = im_new.astype(bool) |
|
im[i] = result[i] |
|
|
|
labels1["img"] = im |
|
labels1["cls"] = cls |
|
labels1["instances"] = instances |
|
return labels1 |
|
|
|
|
|
class Albumentations: |
|
""" |
|
Albumentations transformations for image augmentation. |
|
|
|
This class applies various image transformations using the Albumentations library. It includes operations such as |
|
Blur, Median Blur, conversion to grayscale, Contrast Limited Adaptive Histogram Equalization (CLAHE), random changes |
|
in brightness and contrast, RandomGamma, and image quality reduction through compression. |
|
|
|
Attributes: |
|
p (float): Probability of applying the transformations. |
|
transform (albumentations.Compose): Composed Albumentations transforms. |
|
contains_spatial (bool): Indicates if the transforms include spatial operations. |
|
|
|
Methods: |
|
__call__: Applies the Albumentations transformations to the input labels. |
|
|
|
Examples: |
|
>>> transform = Albumentations(p=0.5) |
|
>>> augmented_labels = transform(labels) |
|
|
|
Notes: |
|
- The Albumentations package must be installed to use this class. |
|
- If the package is not installed or an error occurs during initialization, the transform will be set to None. |
|
- Spatial transforms are handled differently and require special processing for bounding boxes. |
|
""" |
|
|
|
def __init__(self, p=1.0): |
|
""" |
|
Initialize the Albumentations transform object for YOLO bbox formatted parameters. |
|
|
|
This class applies various image augmentations using the Albumentations library, including Blur, Median Blur, |
|
conversion to grayscale, Contrast Limited Adaptive Histogram Equalization, random changes of brightness and |
|
contrast, RandomGamma, and image quality reduction through compression. |
|
|
|
Args: |
|
p (float): Probability of applying the augmentations. Must be between 0 and 1. |
|
|
|
Attributes: |
|
p (float): Probability of applying the augmentations. |
|
transform (albumentations.Compose): Composed Albumentations transforms. |
|
contains_spatial (bool): Indicates if the transforms include spatial transformations. |
|
|
|
Raises: |
|
ImportError: If the Albumentations package is not installed. |
|
Exception: For any other errors during initialization. |
|
|
|
Examples: |
|
>>> transform = Albumentations(p=0.5) |
|
>>> augmented = transform(image=image, bboxes=bboxes, class_labels=classes) |
|
>>> augmented_image = augmented["image"] |
|
>>> augmented_bboxes = augmented["bboxes"] |
|
|
|
Notes: |
|
- Requires Albumentations version 1.0.3 or higher. |
|
- Spatial transforms are handled differently to ensure bbox compatibility. |
|
- Some transforms are applied with very low probability (0.01) by default. |
|
""" |
|
self.p = p |
|
self.transform = None |
|
prefix = colorstr("albumentations: ") |
|
|
|
try: |
|
import albumentations as A |
|
|
|
check_version(A.__version__, "1.0.3", hard=True) |
|
|
|
|
|
spatial_transforms = { |
|
"Affine", |
|
"BBoxSafeRandomCrop", |
|
"CenterCrop", |
|
"CoarseDropout", |
|
"Crop", |
|
"CropAndPad", |
|
"CropNonEmptyMaskIfExists", |
|
"D4", |
|
"ElasticTransform", |
|
"Flip", |
|
"GridDistortion", |
|
"GridDropout", |
|
"HorizontalFlip", |
|
"Lambda", |
|
"LongestMaxSize", |
|
"MaskDropout", |
|
"MixUp", |
|
"Morphological", |
|
"NoOp", |
|
"OpticalDistortion", |
|
"PadIfNeeded", |
|
"Perspective", |
|
"PiecewiseAffine", |
|
"PixelDropout", |
|
"RandomCrop", |
|
"RandomCropFromBorders", |
|
"RandomGridShuffle", |
|
"RandomResizedCrop", |
|
"RandomRotate90", |
|
"RandomScale", |
|
"RandomSizedBBoxSafeCrop", |
|
"RandomSizedCrop", |
|
"Resize", |
|
"Rotate", |
|
"SafeRotate", |
|
"ShiftScaleRotate", |
|
"SmallestMaxSize", |
|
"Transpose", |
|
"VerticalFlip", |
|
"XYMasking", |
|
} |
|
|
|
|
|
T = [ |
|
A.Blur(p=0.01), |
|
A.MedianBlur(p=0.01), |
|
A.ToGray(p=0.01), |
|
A.CLAHE(p=0.01), |
|
A.RandomBrightnessContrast(p=0.0), |
|
A.RandomGamma(p=0.0), |
|
A.ImageCompression(quality_lower=75, p=0.0), |
|
] |
|
|
|
|
|
self.contains_spatial = any(transform.__class__.__name__ in spatial_transforms for transform in T) |
|
self.transform = ( |
|
A.Compose(T, bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"])) |
|
if self.contains_spatial |
|
else A.Compose(T) |
|
) |
|
if hasattr(self.transform, "set_random_seed"): |
|
|
|
self.transform.set_random_seed(torch.initial_seed()) |
|
LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p)) |
|
except ImportError: |
|
pass |
|
except Exception as e: |
|
LOGGER.info(f"{prefix}{e}") |
|
|
|
def __call__(self, labels): |
|
""" |
|
Applies Albumentations transformations to input labels. |
|
|
|
This method applies a series of image augmentations using the Albumentations library. It can perform both |
|
spatial and non-spatial transformations on the input image and its corresponding labels. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image data and annotations. Expected keys are: |
|
- 'img': numpy.ndarray representing the image |
|
- 'cls': numpy.ndarray of class labels |
|
- 'instances': object containing bounding boxes and other instance information |
|
|
|
Returns: |
|
(Dict): The input dictionary with augmented image and updated annotations. |
|
|
|
Examples: |
|
>>> transform = Albumentations(p=0.5) |
|
>>> labels = { |
|
... "img": np.random.rand(640, 640, 3), |
|
... "cls": np.array([0, 1]), |
|
... "instances": Instances(bboxes=np.array([[0, 0, 1, 1], [0.5, 0.5, 0.8, 0.8]])), |
|
... } |
|
>>> augmented = transform(labels) |
|
>>> assert augmented["img"].shape == (640, 640, 3) |
|
|
|
Notes: |
|
- The method applies transformations with probability self.p. |
|
- Spatial transforms update bounding boxes, while non-spatial transforms only modify the image. |
|
- Requires the Albumentations library to be installed. |
|
""" |
|
if self.transform is None or random.random() > self.p: |
|
return labels |
|
|
|
if self.contains_spatial: |
|
cls = labels["cls"] |
|
if len(cls): |
|
im = labels["img"] |
|
labels["instances"].convert_bbox("xywh") |
|
labels["instances"].normalize(*im.shape[:2][::-1]) |
|
bboxes = labels["instances"].bboxes |
|
|
|
new = self.transform(image=im, bboxes=bboxes, class_labels=cls) |
|
if len(new["class_labels"]) > 0: |
|
labels["img"] = new["image"] |
|
labels["cls"] = np.array(new["class_labels"]) |
|
bboxes = np.array(new["bboxes"], dtype=np.float32) |
|
labels["instances"].update(bboxes=bboxes) |
|
else: |
|
labels["img"] = self.transform(image=labels["img"])["image"] |
|
|
|
return labels |
|
|
|
|
|
class Format: |
|
""" |
|
A class for formatting image annotations for object detection, instance segmentation, and pose estimation tasks. |
|
|
|
This class standardizes image and instance annotations to be used by the `collate_fn` in PyTorch DataLoader. |
|
|
|
Attributes: |
|
bbox_format (str): Format for bounding boxes. Options are 'xywh' or 'xyxy'. |
|
normalize (bool): Whether to normalize bounding boxes. |
|
return_mask (bool): Whether to return instance masks for segmentation. |
|
return_keypoint (bool): Whether to return keypoints for pose estimation. |
|
return_obb (bool): Whether to return oriented bounding boxes. |
|
mask_ratio (int): Downsample ratio for masks. |
|
mask_overlap (bool): Whether to overlap masks. |
|
batch_idx (bool): Whether to keep batch indexes. |
|
bgr (float): The probability to return BGR images. |
|
|
|
Methods: |
|
__call__: Formats labels dictionary with image, classes, bounding boxes, and optionally masks and keypoints. |
|
_format_img: Converts image from Numpy array to PyTorch tensor. |
|
_format_segments: Converts polygon points to bitmap masks. |
|
|
|
Examples: |
|
>>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True) |
|
>>> formatted_labels = formatter(labels) |
|
>>> img = formatted_labels["img"] |
|
>>> bboxes = formatted_labels["bboxes"] |
|
>>> masks = formatted_labels["masks"] |
|
""" |
|
|
|
def __init__( |
|
self, |
|
bbox_format="xywh", |
|
normalize=True, |
|
return_mask=False, |
|
return_keypoint=False, |
|
return_obb=False, |
|
mask_ratio=4, |
|
mask_overlap=True, |
|
batch_idx=True, |
|
bgr=0.0, |
|
): |
|
""" |
|
Initializes the Format class with given parameters for image and instance annotation formatting. |
|
|
|
This class standardizes image and instance annotations for object detection, instance segmentation, and pose |
|
estimation tasks, preparing them for use in PyTorch DataLoader's `collate_fn`. |
|
|
|
Args: |
|
bbox_format (str): Format for bounding boxes. Options are 'xywh', 'xyxy', etc. |
|
normalize (bool): Whether to normalize bounding boxes to [0,1]. |
|
return_mask (bool): If True, returns instance masks for segmentation tasks. |
|
return_keypoint (bool): If True, returns keypoints for pose estimation tasks. |
|
return_obb (bool): If True, returns oriented bounding boxes. |
|
mask_ratio (int): Downsample ratio for masks. |
|
mask_overlap (bool): If True, allows mask overlap. |
|
batch_idx (bool): If True, keeps batch indexes. |
|
bgr (float): Probability of returning BGR images instead of RGB. |
|
|
|
Attributes: |
|
bbox_format (str): Format for bounding boxes. |
|
normalize (bool): Whether bounding boxes are normalized. |
|
return_mask (bool): Whether to return instance masks. |
|
return_keypoint (bool): Whether to return keypoints. |
|
return_obb (bool): Whether to return oriented bounding boxes. |
|
mask_ratio (int): Downsample ratio for masks. |
|
mask_overlap (bool): Whether masks can overlap. |
|
batch_idx (bool): Whether to keep batch indexes. |
|
bgr (float): The probability to return BGR images. |
|
|
|
Examples: |
|
>>> format = Format(bbox_format="xyxy", return_mask=True, return_keypoint=False) |
|
>>> print(format.bbox_format) |
|
xyxy |
|
""" |
|
self.bbox_format = bbox_format |
|
self.normalize = normalize |
|
self.return_mask = return_mask |
|
self.return_keypoint = return_keypoint |
|
self.return_obb = return_obb |
|
self.mask_ratio = mask_ratio |
|
self.mask_overlap = mask_overlap |
|
self.batch_idx = batch_idx |
|
self.bgr = bgr |
|
|
|
def __call__(self, labels): |
|
""" |
|
Formats image annotations for object detection, instance segmentation, and pose estimation tasks. |
|
|
|
This method standardizes the image and instance annotations to be used by the `collate_fn` in PyTorch |
|
DataLoader. It processes the input labels dictionary, converting annotations to the specified format and |
|
applying normalization if required. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image and annotation data with the following keys: |
|
- 'img': The input image as a numpy array. |
|
- 'cls': Class labels for instances. |
|
- 'instances': An Instances object containing bounding boxes, segments, and keypoints. |
|
|
|
Returns: |
|
(Dict): A dictionary with formatted data, including: |
|
- 'img': Formatted image tensor. |
|
- 'cls': Class label's tensor. |
|
- 'bboxes': Bounding boxes tensor in the specified format. |
|
- 'masks': Instance masks tensor (if return_mask is True). |
|
- 'keypoints': Keypoints tensor (if return_keypoint is True). |
|
- 'batch_idx': Batch index tensor (if batch_idx is True). |
|
|
|
Examples: |
|
>>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True) |
|
>>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1]), "instances": Instances(...)} |
|
>>> formatted_labels = formatter(labels) |
|
>>> print(formatted_labels.keys()) |
|
""" |
|
img = labels.pop("img") |
|
h, w = img.shape[:2] |
|
cls = labels.pop("cls") |
|
instances = labels.pop("instances") |
|
instances.convert_bbox(format=self.bbox_format) |
|
instances.denormalize(w, h) |
|
nl = len(instances) |
|
|
|
if self.return_mask: |
|
if nl: |
|
masks, instances, cls = self._format_segments(instances, cls, w, h) |
|
masks = torch.from_numpy(masks) |
|
else: |
|
masks = torch.zeros( |
|
1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio |
|
) |
|
labels["masks"] = masks |
|
labels["img"] = self._format_img(img) |
|
labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl) |
|
labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) |
|
if self.return_keypoint: |
|
labels["keypoints"] = torch.from_numpy(instances.keypoints) |
|
if self.normalize: |
|
labels["keypoints"][..., 0] /= w |
|
labels["keypoints"][..., 1] /= h |
|
if self.return_obb: |
|
labels["bboxes"] = ( |
|
xyxyxyxy2xywhr(torch.from_numpy(instances.segments)) if len(instances.segments) else torch.zeros((0, 5)) |
|
) |
|
|
|
if self.normalize: |
|
labels["bboxes"][:, [0, 2]] /= w |
|
labels["bboxes"][:, [1, 3]] /= h |
|
|
|
if self.batch_idx: |
|
labels["batch_idx"] = torch.zeros(nl) |
|
return labels |
|
|
|
def _format_img(self, img): |
|
""" |
|
Formats an image for YOLO from a Numpy array to a PyTorch tensor. |
|
|
|
This function performs the following operations: |
|
1. Ensures the image has 3 dimensions (adds a channel dimension if needed). |
|
2. Transposes the image from HWC to CHW format. |
|
3. Optionally flips the color channels from RGB to BGR. |
|
4. Converts the image to a contiguous array. |
|
5. Converts the Numpy array to a PyTorch tensor. |
|
|
|
Args: |
|
img (np.ndarray): Input image as a Numpy array with shape (H, W, C) or (H, W). |
|
|
|
Returns: |
|
(torch.Tensor): Formatted image as a PyTorch tensor with shape (C, H, W). |
|
|
|
Examples: |
|
>>> import numpy as np |
|
>>> img = np.random.rand(100, 100, 3) |
|
>>> formatted_img = self._format_img(img) |
|
>>> print(formatted_img.shape) |
|
torch.Size([3, 100, 100]) |
|
""" |
|
if len(img.shape) < 3: |
|
img = np.expand_dims(img, -1) |
|
img = img.transpose(2, 0, 1) |
|
img = np.ascontiguousarray(img[::-1] if random.uniform(0, 1) > self.bgr else img) |
|
img = torch.from_numpy(img) |
|
return img |
|
|
|
def _format_segments(self, instances, cls, w, h): |
|
""" |
|
Converts polygon segments to bitmap masks. |
|
|
|
Args: |
|
instances (Instances): Object containing segment information. |
|
cls (numpy.ndarray): Class labels for each instance. |
|
w (int): Width of the image. |
|
h (int): Height of the image. |
|
|
|
Returns: |
|
masks (numpy.ndarray): Bitmap masks with shape (N, H, W) or (1, H, W) if mask_overlap is True. |
|
instances (Instances): Updated instances object with sorted segments if mask_overlap is True. |
|
cls (numpy.ndarray): Updated class labels, sorted if mask_overlap is True. |
|
|
|
Notes: |
|
- If self.mask_overlap is True, masks are overlapped and sorted by area. |
|
- If self.mask_overlap is False, each mask is represented separately. |
|
- Masks are downsampled according to self.mask_ratio. |
|
""" |
|
segments = instances.segments |
|
if self.mask_overlap: |
|
masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio) |
|
masks = masks[None] |
|
instances = instances[sorted_idx] |
|
cls = cls[sorted_idx] |
|
else: |
|
masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio) |
|
|
|
return masks, instances, cls |
|
|
|
|
|
class RandomLoadText: |
|
""" |
|
Randomly samples positive and negative texts and updates class indices accordingly. |
|
|
|
This class is responsible for sampling texts from a given set of class texts, including both positive |
|
(present in the image) and negative (not present in the image) samples. It updates the class indices |
|
to reflect the sampled texts and can optionally pad the text list to a fixed length. |
|
|
|
Attributes: |
|
prompt_format (str): Format string for text prompts. |
|
neg_samples (Tuple[int, int]): Range for randomly sampling negative texts. |
|
max_samples (int): Maximum number of different text samples in one image. |
|
padding (bool): Whether to pad texts to max_samples. |
|
padding_value (str): The text used for padding when padding is True. |
|
|
|
Methods: |
|
__call__: Processes the input labels and returns updated classes and texts. |
|
|
|
Examples: |
|
>>> loader = RandomLoadText(prompt_format="Object: {}", neg_samples=(5, 10), max_samples=20) |
|
>>> labels = {"cls": [0, 1, 2], "texts": [["cat"], ["dog"], ["bird"]], "instances": [...]} |
|
>>> updated_labels = loader(labels) |
|
>>> print(updated_labels["texts"]) |
|
['Object: cat', 'Object: dog', 'Object: bird', 'Object: elephant', 'Object: car'] |
|
""" |
|
|
|
def __init__( |
|
self, |
|
prompt_format: str = "{}", |
|
neg_samples: Tuple[int, int] = (80, 80), |
|
max_samples: int = 80, |
|
padding: bool = False, |
|
padding_value: str = "", |
|
) -> None: |
|
""" |
|
Initializes the RandomLoadText class for randomly sampling positive and negative texts. |
|
|
|
This class is designed to randomly sample positive texts and negative texts, and update the class |
|
indices accordingly to the number of samples. It can be used for text-based object detection tasks. |
|
|
|
Args: |
|
prompt_format (str): Format string for the prompt. Default is '{}'. The format string should |
|
contain a single pair of curly braces {} where the text will be inserted. |
|
neg_samples (Tuple[int, int]): A range to randomly sample negative texts. The first integer |
|
specifies the minimum number of negative samples, and the second integer specifies the |
|
maximum. Default is (80, 80). |
|
max_samples (int): The maximum number of different text samples in one image. Default is 80. |
|
padding (bool): Whether to pad texts to max_samples. If True, the number of texts will always |
|
be equal to max_samples. Default is False. |
|
padding_value (str): The padding text to use when padding is True. Default is an empty string. |
|
|
|
Attributes: |
|
prompt_format (str): The format string for the prompt. |
|
neg_samples (Tuple[int, int]): The range for sampling negative texts. |
|
max_samples (int): The maximum number of text samples. |
|
padding (bool): Whether padding is enabled. |
|
padding_value (str): The value used for padding. |
|
|
|
Examples: |
|
>>> random_load_text = RandomLoadText(prompt_format="Object: {}", neg_samples=(50, 100), max_samples=120) |
|
>>> random_load_text.prompt_format |
|
'Object: {}' |
|
>>> random_load_text.neg_samples |
|
(50, 100) |
|
>>> random_load_text.max_samples |
|
120 |
|
""" |
|
self.prompt_format = prompt_format |
|
self.neg_samples = neg_samples |
|
self.max_samples = max_samples |
|
self.padding = padding |
|
self.padding_value = padding_value |
|
|
|
def __call__(self, labels: dict) -> dict: |
|
""" |
|
Randomly samples positive and negative texts and updates class indices accordingly. |
|
|
|
This method samples positive texts based on the existing class labels in the image, and randomly |
|
selects negative texts from the remaining classes. It then updates the class indices to match the |
|
new sampled text order. |
|
|
|
Args: |
|
labels (Dict): A dictionary containing image labels and metadata. Must include 'texts' and 'cls' keys. |
|
|
|
Returns: |
|
(Dict): Updated labels dictionary with new 'cls' and 'texts' entries. |
|
|
|
Examples: |
|
>>> loader = RandomLoadText(prompt_format="A photo of {}", neg_samples=(5, 10), max_samples=20) |
|
>>> labels = {"cls": np.array([[0], [1], [2]]), "texts": [["dog"], ["cat"], ["bird"]]} |
|
>>> updated_labels = loader(labels) |
|
""" |
|
assert "texts" in labels, "No texts found in labels." |
|
class_texts = labels["texts"] |
|
num_classes = len(class_texts) |
|
cls = np.asarray(labels.pop("cls"), dtype=int) |
|
pos_labels = np.unique(cls).tolist() |
|
|
|
if len(pos_labels) > self.max_samples: |
|
pos_labels = random.sample(pos_labels, k=self.max_samples) |
|
|
|
neg_samples = min(min(num_classes, self.max_samples) - len(pos_labels), random.randint(*self.neg_samples)) |
|
neg_labels = [i for i in range(num_classes) if i not in pos_labels] |
|
neg_labels = random.sample(neg_labels, k=neg_samples) |
|
|
|
sampled_labels = pos_labels + neg_labels |
|
random.shuffle(sampled_labels) |
|
|
|
label2ids = {label: i for i, label in enumerate(sampled_labels)} |
|
valid_idx = np.zeros(len(labels["instances"]), dtype=bool) |
|
new_cls = [] |
|
for i, label in enumerate(cls.squeeze(-1).tolist()): |
|
if label not in label2ids: |
|
continue |
|
valid_idx[i] = True |
|
new_cls.append([label2ids[label]]) |
|
labels["instances"] = labels["instances"][valid_idx] |
|
labels["cls"] = np.array(new_cls) |
|
|
|
|
|
texts = [] |
|
for label in sampled_labels: |
|
prompts = class_texts[label] |
|
assert len(prompts) > 0 |
|
prompt = self.prompt_format.format(prompts[random.randrange(len(prompts))]) |
|
texts.append(prompt) |
|
|
|
if self.padding: |
|
valid_labels = len(pos_labels) + len(neg_labels) |
|
num_padding = self.max_samples - valid_labels |
|
if num_padding > 0: |
|
texts += [self.padding_value] * num_padding |
|
|
|
labels["texts"] = texts |
|
return labels |
|
|
|
|
|
def v8_transforms(dataset, imgsz, hyp, stretch=False): |
|
""" |
|
Applies a series of image transformations for training. |
|
|
|
This function creates a composition of image augmentation techniques to prepare images for YOLO training. |
|
It includes operations such as mosaic, copy-paste, random perspective, mixup, and various color adjustments. |
|
|
|
Args: |
|
dataset (Dataset): The dataset object containing image data and annotations. |
|
imgsz (int): The target image size for resizing. |
|
hyp (Namespace): A dictionary of hyperparameters controlling various aspects of the transformations. |
|
stretch (bool): If True, applies stretching to the image. If False, uses LetterBox resizing. |
|
|
|
Returns: |
|
(Compose): A composition of image transformations to be applied to the dataset. |
|
|
|
Examples: |
|
>>> from ultralytics.data.dataset import YOLODataset |
|
>>> from ultralytics.utils import IterableSimpleNamespace |
|
>>> dataset = YOLODataset(img_path="path/to/images", imgsz=640) |
|
>>> hyp = IterableSimpleNamespace(mosaic=1.0, copy_paste=0.5, degrees=10.0, translate=0.2, scale=0.9) |
|
>>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp) |
|
>>> augmented_data = transforms(dataset[0]) |
|
""" |
|
mosaic = Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic) |
|
affine = RandomPerspective( |
|
degrees=hyp.degrees, |
|
translate=hyp.translate, |
|
scale=hyp.scale, |
|
shear=hyp.shear, |
|
perspective=hyp.perspective, |
|
pre_transform=None if stretch else LetterBox(new_shape=(imgsz, imgsz)), |
|
) |
|
|
|
pre_transform = Compose([mosaic, affine]) |
|
if hyp.copy_paste_mode == "flip": |
|
pre_transform.insert(1, CopyPaste(p=hyp.copy_paste, mode=hyp.copy_paste_mode)) |
|
else: |
|
pre_transform.append( |
|
CopyPaste( |
|
dataset, |
|
pre_transform=Compose([Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic), affine]), |
|
p=hyp.copy_paste, |
|
mode=hyp.copy_paste_mode, |
|
) |
|
) |
|
flip_idx = dataset.data.get("flip_idx", []) |
|
if dataset.use_keypoints: |
|
kpt_shape = dataset.data.get("kpt_shape", None) |
|
if len(flip_idx) == 0 and hyp.fliplr > 0.0: |
|
hyp.fliplr = 0.0 |
|
LOGGER.warning("WARNING ⚠️ No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'") |
|
elif flip_idx and (len(flip_idx) != kpt_shape[0]): |
|
raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}") |
|
|
|
return Compose( |
|
[ |
|
pre_transform, |
|
MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup), |
|
Albumentations(p=1.0), |
|
RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v), |
|
RandomFlip(direction="vertical", p=hyp.flipud), |
|
RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx), |
|
] |
|
) |
|
|
|
|
|
|
|
def classify_transforms( |
|
size=224, |
|
mean=DEFAULT_MEAN, |
|
std=DEFAULT_STD, |
|
interpolation="BILINEAR", |
|
crop_fraction: float = DEFAULT_CROP_FRACTION, |
|
): |
|
""" |
|
Creates a composition of image transforms for classification tasks. |
|
|
|
This function generates a sequence of torchvision transforms suitable for preprocessing images |
|
for classification models during evaluation or inference. The transforms include resizing, |
|
center cropping, conversion to tensor, and normalization. |
|
|
|
Args: |
|
size (int | tuple): The target size for the transformed image. If an int, it defines the shortest edge. If a |
|
tuple, it defines (height, width). |
|
mean (tuple): Mean values for each RGB channel used in normalization. |
|
std (tuple): Standard deviation values for each RGB channel used in normalization. |
|
interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'. |
|
crop_fraction (float): Fraction of the image to be cropped. |
|
|
|
Returns: |
|
(torchvision.transforms.Compose): A composition of torchvision transforms. |
|
|
|
Examples: |
|
>>> transforms = classify_transforms(size=224) |
|
>>> img = Image.open("path/to/image.jpg") |
|
>>> transformed_img = transforms(img) |
|
""" |
|
import torchvision.transforms as T |
|
|
|
if isinstance(size, (tuple, list)): |
|
assert len(size) == 2, f"'size' tuples must be length 2, not length {len(size)}" |
|
scale_size = tuple(math.floor(x / crop_fraction) for x in size) |
|
else: |
|
scale_size = math.floor(size / crop_fraction) |
|
scale_size = (scale_size, scale_size) |
|
|
|
|
|
if scale_size[0] == scale_size[1]: |
|
|
|
tfl = [T.Resize(scale_size[0], interpolation=getattr(T.InterpolationMode, interpolation))] |
|
else: |
|
|
|
tfl = [T.Resize(scale_size)] |
|
tfl.extend( |
|
[ |
|
T.CenterCrop(size), |
|
T.ToTensor(), |
|
T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), |
|
] |
|
) |
|
return T.Compose(tfl) |
|
|
|
|
|
|
|
def classify_augmentations( |
|
size=224, |
|
mean=DEFAULT_MEAN, |
|
std=DEFAULT_STD, |
|
scale=None, |
|
ratio=None, |
|
hflip=0.5, |
|
vflip=0.0, |
|
auto_augment=None, |
|
hsv_h=0.015, |
|
hsv_s=0.4, |
|
hsv_v=0.4, |
|
force_color_jitter=False, |
|
erasing=0.0, |
|
interpolation="BILINEAR", |
|
): |
|
""" |
|
Creates a composition of image augmentation transforms for classification tasks. |
|
|
|
This function generates a set of image transformations suitable for training classification models. It includes |
|
options for resizing, flipping, color jittering, auto augmentation, and random erasing. |
|
|
|
Args: |
|
size (int): Target size for the image after transformations. |
|
mean (tuple): Mean values for normalization, one per channel. |
|
std (tuple): Standard deviation values for normalization, one per channel. |
|
scale (tuple | None): Range of size of the origin size cropped. |
|
ratio (tuple | None): Range of aspect ratio of the origin aspect ratio cropped. |
|
hflip (float): Probability of horizontal flip. |
|
vflip (float): Probability of vertical flip. |
|
auto_augment (str | None): Auto augmentation policy. Can be 'randaugment', 'augmix', 'autoaugment' or None. |
|
hsv_h (float): Image HSV-Hue augmentation factor. |
|
hsv_s (float): Image HSV-Saturation augmentation factor. |
|
hsv_v (float): Image HSV-Value augmentation factor. |
|
force_color_jitter (bool): Whether to apply color jitter even if auto augment is enabled. |
|
erasing (float): Probability of random erasing. |
|
interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'. |
|
|
|
Returns: |
|
(torchvision.transforms.Compose): A composition of image augmentation transforms. |
|
|
|
Examples: |
|
>>> transforms = classify_augmentations(size=224, auto_augment="randaugment") |
|
>>> augmented_image = transforms(original_image) |
|
""" |
|
|
|
import torchvision.transforms as T |
|
|
|
if not isinstance(size, int): |
|
raise TypeError(f"classify_transforms() size {size} must be integer, not (list, tuple)") |
|
scale = tuple(scale or (0.08, 1.0)) |
|
ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) |
|
interpolation = getattr(T.InterpolationMode, interpolation) |
|
primary_tfl = [T.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation)] |
|
if hflip > 0.0: |
|
primary_tfl.append(T.RandomHorizontalFlip(p=hflip)) |
|
if vflip > 0.0: |
|
primary_tfl.append(T.RandomVerticalFlip(p=vflip)) |
|
|
|
secondary_tfl = [] |
|
disable_color_jitter = False |
|
if auto_augment: |
|
assert isinstance(auto_augment, str), f"Provided argument should be string, but got type {type(auto_augment)}" |
|
|
|
|
|
disable_color_jitter = not force_color_jitter |
|
|
|
if auto_augment == "randaugment": |
|
if TORCHVISION_0_11: |
|
secondary_tfl.append(T.RandAugment(interpolation=interpolation)) |
|
else: |
|
LOGGER.warning('"auto_augment=randaugment" requires torchvision >= 0.11.0. Disabling it.') |
|
|
|
elif auto_augment == "augmix": |
|
if TORCHVISION_0_13: |
|
secondary_tfl.append(T.AugMix(interpolation=interpolation)) |
|
else: |
|
LOGGER.warning('"auto_augment=augmix" requires torchvision >= 0.13.0. Disabling it.') |
|
|
|
elif auto_augment == "autoaugment": |
|
if TORCHVISION_0_10: |
|
secondary_tfl.append(T.AutoAugment(interpolation=interpolation)) |
|
else: |
|
LOGGER.warning('"auto_augment=autoaugment" requires torchvision >= 0.10.0. Disabling it.') |
|
|
|
else: |
|
raise ValueError( |
|
f'Invalid auto_augment policy: {auto_augment}. Should be one of "randaugment", ' |
|
f'"augmix", "autoaugment" or None' |
|
) |
|
|
|
if not disable_color_jitter: |
|
secondary_tfl.append(T.ColorJitter(brightness=hsv_v, contrast=hsv_v, saturation=hsv_s, hue=hsv_h)) |
|
|
|
final_tfl = [ |
|
T.ToTensor(), |
|
T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), |
|
T.RandomErasing(p=erasing, inplace=True), |
|
] |
|
|
|
return T.Compose(primary_tfl + secondary_tfl + final_tfl) |
|
|
|
|
|
|
|
class ClassifyLetterBox: |
|
""" |
|
A class for resizing and padding images for classification tasks. |
|
|
|
This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]). |
|
It resizes and pads images to a specified size while maintaining the original aspect ratio. |
|
|
|
Attributes: |
|
h (int): Target height of the image. |
|
w (int): Target width of the image. |
|
auto (bool): If True, automatically calculates the short side using stride. |
|
stride (int): The stride value, used when 'auto' is True. |
|
|
|
Methods: |
|
__call__: Applies the letterbox transformation to an input image. |
|
|
|
Examples: |
|
>>> transform = ClassifyLetterBox(size=(640, 640), auto=False, stride=32) |
|
>>> img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8) |
|
>>> result = transform(img) |
|
>>> print(result.shape) |
|
(640, 640, 3) |
|
""" |
|
|
|
def __init__(self, size=(640, 640), auto=False, stride=32): |
|
""" |
|
Initializes the ClassifyLetterBox object for image preprocessing. |
|
|
|
This class is designed to be part of a transformation pipeline for image classification tasks. It resizes and |
|
pads images to a specified size while maintaining the original aspect ratio. |
|
|
|
Args: |
|
size (int | Tuple[int, int]): Target size for the letterboxed image. If an int, a square image of |
|
(size, size) is created. If a tuple, it should be (height, width). |
|
auto (bool): If True, automatically calculates the short side based on stride. Default is False. |
|
stride (int): The stride value, used when 'auto' is True. Default is 32. |
|
|
|
Attributes: |
|
h (int): Target height of the letterboxed image. |
|
w (int): Target width of the letterboxed image. |
|
auto (bool): Flag indicating whether to automatically calculate short side. |
|
stride (int): Stride value for automatic short side calculation. |
|
|
|
Examples: |
|
>>> transform = ClassifyLetterBox(size=224) |
|
>>> img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8) |
|
>>> result = transform(img) |
|
>>> print(result.shape) |
|
(224, 224, 3) |
|
""" |
|
super().__init__() |
|
self.h, self.w = (size, size) if isinstance(size, int) else size |
|
self.auto = auto |
|
self.stride = stride |
|
|
|
def __call__(self, im): |
|
""" |
|
Resizes and pads an image using the letterbox method. |
|
|
|
This method resizes the input image to fit within the specified dimensions while maintaining its aspect ratio, |
|
then pads the resized image to match the target size. |
|
|
|
Args: |
|
im (numpy.ndarray): Input image as a numpy array with shape (H, W, C). |
|
|
|
Returns: |
|
(numpy.ndarray): Resized and padded image as a numpy array with shape (hs, ws, 3), where hs and ws are |
|
the target height and width respectively. |
|
|
|
Examples: |
|
>>> letterbox = ClassifyLetterBox(size=(640, 640)) |
|
>>> image = np.random.randint(0, 255, (720, 1280, 3), dtype=np.uint8) |
|
>>> resized_image = letterbox(image) |
|
>>> print(resized_image.shape) |
|
(640, 640, 3) |
|
""" |
|
imh, imw = im.shape[:2] |
|
r = min(self.h / imh, self.w / imw) |
|
h, w = round(imh * r), round(imw * r) |
|
|
|
|
|
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w) |
|
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) |
|
|
|
|
|
im_out = np.full((hs, ws, 3), 114, dtype=im.dtype) |
|
im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) |
|
return im_out |
|
|
|
|
|
|
|
class CenterCrop: |
|
""" |
|
Applies center cropping to images for classification tasks. |
|
|
|
This class performs center cropping on input images, resizing them to a specified size while maintaining the aspect |
|
ratio. It is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]). |
|
|
|
Attributes: |
|
h (int): Target height of the cropped image. |
|
w (int): Target width of the cropped image. |
|
|
|
Methods: |
|
__call__: Applies the center crop transformation to an input image. |
|
|
|
Examples: |
|
>>> transform = CenterCrop(640) |
|
>>> image = np.random.randint(0, 255, (1080, 1920, 3), dtype=np.uint8) |
|
>>> cropped_image = transform(image) |
|
>>> print(cropped_image.shape) |
|
(640, 640, 3) |
|
""" |
|
|
|
def __init__(self, size=640): |
|
""" |
|
Initializes the CenterCrop object for image preprocessing. |
|
|
|
This class is designed to be part of a transformation pipeline, e.g., T.Compose([CenterCrop(size), ToTensor()]). |
|
It performs a center crop on input images to a specified size. |
|
|
|
Args: |
|
size (int | Tuple[int, int]): The desired output size of the crop. If size is an int, a square crop |
|
(size, size) is made. If size is a sequence like (h, w), it is used as the output size. |
|
|
|
Returns: |
|
(None): This method initializes the object and does not return anything. |
|
|
|
Examples: |
|
>>> transform = CenterCrop(224) |
|
>>> img = np.random.rand(300, 300, 3) |
|
>>> cropped_img = transform(img) |
|
>>> print(cropped_img.shape) |
|
(224, 224, 3) |
|
""" |
|
super().__init__() |
|
self.h, self.w = (size, size) if isinstance(size, int) else size |
|
|
|
def __call__(self, im): |
|
""" |
|
Applies center cropping to an input image. |
|
|
|
This method resizes and crops the center of the image using a letterbox method. It maintains the aspect |
|
ratio of the original image while fitting it into the specified dimensions. |
|
|
|
Args: |
|
im (numpy.ndarray | PIL.Image.Image): The input image as a numpy array of shape (H, W, C) or a |
|
PIL Image object. |
|
|
|
Returns: |
|
(numpy.ndarray): The center-cropped and resized image as a numpy array of shape (self.h, self.w, C). |
|
|
|
Examples: |
|
>>> transform = CenterCrop(size=224) |
|
>>> image = np.random.randint(0, 255, (640, 480, 3), dtype=np.uint8) |
|
>>> cropped_image = transform(image) |
|
>>> assert cropped_image.shape == (224, 224, 3) |
|
""" |
|
if isinstance(im, Image.Image): |
|
im = np.asarray(im) |
|
imh, imw = im.shape[:2] |
|
m = min(imh, imw) |
|
top, left = (imh - m) // 2, (imw - m) // 2 |
|
return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) |
|
|
|
|
|
|
|
class ToTensor: |
|
""" |
|
Converts an image from a numpy array to a PyTorch tensor. |
|
|
|
This class is designed to be part of a transformation pipeline, e.g., T.Compose([LetterBox(size), ToTensor()]). |
|
|
|
Attributes: |
|
half (bool): If True, converts the image to half precision (float16). |
|
|
|
Methods: |
|
__call__: Applies the tensor conversion to an input image. |
|
|
|
Examples: |
|
>>> transform = ToTensor(half=True) |
|
>>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8) |
|
>>> tensor_img = transform(img) |
|
>>> print(tensor_img.shape, tensor_img.dtype) |
|
torch.Size([3, 640, 640]) torch.float16 |
|
|
|
Notes: |
|
The input image is expected to be in BGR format with shape (H, W, C). |
|
The output tensor will be in RGB format with shape (C, H, W), normalized to [0, 1]. |
|
""" |
|
|
|
def __init__(self, half=False): |
|
""" |
|
Initializes the ToTensor object for converting images to PyTorch tensors. |
|
|
|
This class is designed to be used as part of a transformation pipeline for image preprocessing in the |
|
Ultralytics YOLO framework. It converts numpy arrays or PIL Images to PyTorch tensors, with an option |
|
for half-precision (float16) conversion. |
|
|
|
Args: |
|
half (bool): If True, converts the tensor to half precision (float16). Default is False. |
|
|
|
Examples: |
|
>>> transform = ToTensor(half=True) |
|
>>> img = np.random.rand(640, 640, 3) |
|
>>> tensor_img = transform(img) |
|
>>> print(tensor_img.dtype) |
|
torch.float16 |
|
""" |
|
super().__init__() |
|
self.half = half |
|
|
|
def __call__(self, im): |
|
""" |
|
Transforms an image from a numpy array to a PyTorch tensor. |
|
|
|
This method converts the input image from a numpy array to a PyTorch tensor, applying optional |
|
half-precision conversion and normalization. The image is transposed from HWC to CHW format and |
|
the color channels are reversed from BGR to RGB. |
|
|
|
Args: |
|
im (numpy.ndarray): Input image as a numpy array with shape (H, W, C) in BGR order. |
|
|
|
Returns: |
|
(torch.Tensor): The transformed image as a PyTorch tensor in float32 or float16, normalized |
|
to [0, 1] with shape (C, H, W) in RGB order. |
|
|
|
Examples: |
|
>>> transform = ToTensor(half=True) |
|
>>> img = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8) |
|
>>> tensor_img = transform(img) |
|
>>> print(tensor_img.shape, tensor_img.dtype) |
|
torch.Size([3, 640, 640]) torch.float16 |
|
""" |
|
im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) |
|
im = torch.from_numpy(im) |
|
im = im.half() if self.half else im.float() |
|
im /= 255.0 |
|
return im |
|
|