Saltar al contenido

Referencia para ultralytics/data/converter.py

Nota

Este archivo est谩 disponible en https://github.com/ultralytics/ ultralytics/blob/main/ ultralytics/data/converter .py. Si detectas alg煤n problema, por favor, ayuda a solucionarlo contribuyendo con una Pull Request 馃洜锔. 隆Gracias 馃檹!



ultralytics.data.converter.coco91_to_coco80_class()

Convierte los ID de clase COCO de 91 铆ndices en ID de clase COCO de 80 铆ndices.

Devuelve:

Tipo Descripci贸n
list

Una lista de 91 ID de clase en la que el 铆ndice representa el ID de clase de 80 铆ndices y el valor es el ID de clase de 91 铆ndices correspondiente. correspondiente ID de clase de 91 铆ndices.

C贸digo fuente en ultralytics/data/converter.py
def coco91_to_coco80_class():
    """
    Converts 91-index COCO class IDs to 80-index COCO class IDs.

    Returns:
        (list): A list of 91 class IDs where the index represents the 80-index class ID and the value is the
            corresponding 91-index class ID.
    """
    return [
        0,
        1,
        2,
        3,
        4,
        5,
        6,
        7,
        8,
        9,
        10,
        None,
        11,
        12,
        13,
        14,
        15,
        16,
        17,
        18,
        19,
        20,
        21,
        22,
        23,
        None,
        24,
        25,
        None,
        None,
        26,
        27,
        28,
        29,
        30,
        31,
        32,
        33,
        34,
        35,
        36,
        37,
        38,
        39,
        None,
        40,
        41,
        42,
        43,
        44,
        45,
        46,
        47,
        48,
        49,
        50,
        51,
        52,
        53,
        54,
        55,
        56,
        57,
        58,
        59,
        None,
        60,
        None,
        None,
        61,
        None,
        62,
        63,
        64,
        65,
        66,
        67,
        68,
        69,
        70,
        71,
        72,
        None,
        73,
        74,
        75,
        76,
        77,
        78,
        79,
        None,
    ]



ultralytics.data.converter.coco80_to_coco91_class()

Converts 80-index (val2014) to 91-index (paper).
For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/.

Example:
    ```python
    import numpy as np

    a = np.loadtxt('data/coco.names', dtype='str', delimiter='

') b = np.loadtxt('datos/coco_papel.nombres', dtype='str', delimiter=' ') x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet a coco x2 = [lista(b[i] == a).铆ndice(Verdadero) si alguno(b[i] == a) si no Ninguno para i en rango(91)] # de coco a darknet ```

C贸digo fuente en ultralytics/data/converter.py
def coco80_to_coco91_class():
    """
    Converts 80-index (val2014) to 91-index (paper).
    For details see https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/.

    Example:
        ```python
        import numpy as np

        a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
        b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
        x1 = [list(a[i] == b).index(True) + 1 for i in range(80)]  # darknet to coco
        x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)]  # coco to darknet
        ```
    """
    return [
        1,
        2,
        3,
        4,
        5,
        6,
        7,
        8,
        9,
        10,
        11,
        13,
        14,
        15,
        16,
        17,
        18,
        19,
        20,
        21,
        22,
        23,
        24,
        25,
        27,
        28,
        31,
        32,
        33,
        34,
        35,
        36,
        37,
        38,
        39,
        40,
        41,
        42,
        43,
        44,
        46,
        47,
        48,
        49,
        50,
        51,
        52,
        53,
        54,
        55,
        56,
        57,
        58,
        59,
        60,
        61,
        62,
        63,
        64,
        65,
        67,
        70,
        72,
        73,
        74,
        75,
        76,
        77,
        78,
        79,
        80,
        81,
        82,
        84,
        85,
        86,
        87,
        88,
        89,
        90,
    ]



ultralytics.data.converter.convert_coco(labels_dir='../coco/annotations/', save_dir='coco_converted/', use_segments=False, use_keypoints=False, cls91to80=True, lvis=False)

Convierte las anotaciones del conjunto de datos COCO a un formato de anotaci贸n YOLO adecuado para entrenar modelos YOLO .

Par谩metros:

Nombre Tipo Descripci贸n Por defecto
labels_dir str

Ruta al directorio que contiene los archivos de anotaci贸n del conjunto de datos COCO.

'../coco/annotations/'
save_dir str

Ruta al directorio donde guardar los resultados.

'coco_converted/'
use_segments bool

Si incluir m谩scaras de segmentaci贸n en la salida.

False
use_keypoints bool

Si incluir anotaciones de puntos clave en la salida.

False
cls91to80 bool

Si se asignan 91 ID de clase COCO a los correspondientes 80 ID de clase COCO.

True
lvis bool

Si convertir los datos en forma de conjunto de datos lvis.

False
Ejemplo
from ultralytics.data.converter import convert_coco

convert_coco('../datasets/coco/annotations/', use_segments=True, use_keypoints=False, cls91to80=True)
convert_coco('../datasets/lvis/annotations/', use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
Salida

Genera archivos de salida en el directorio de salida especificado.

C贸digo fuente en ultralytics/data/converter.py
def convert_coco(
    labels_dir="../coco/annotations/",
    save_dir="coco_converted/",
    use_segments=False,
    use_keypoints=False,
    cls91to80=True,
    lvis=False,
):
    """
    Converts COCO dataset annotations to a YOLO annotation format  suitable for training YOLO models.

    Args:
        labels_dir (str, optional): Path to directory containing COCO dataset annotation files.
        save_dir (str, optional): Path to directory to save results to.
        use_segments (bool, optional): Whether to include segmentation masks in the output.
        use_keypoints (bool, optional): Whether to include keypoint annotations in the output.
        cls91to80 (bool, optional): Whether to map 91 COCO class IDs to the corresponding 80 COCO class IDs.
        lvis (bool, optional): Whether to convert data in lvis dataset way.

    Example:
        ```python
        from ultralytics.data.converter import convert_coco

        convert_coco('../datasets/coco/annotations/', use_segments=True, use_keypoints=False, cls91to80=True)
        convert_coco('../datasets/lvis/annotations/', use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
        ```

    Output:
        Generates output files in the specified output directory.
    """

    # Create dataset directory
    save_dir = increment_path(save_dir)  # increment if save directory already exists
    for p in save_dir / "labels", save_dir / "images":
        p.mkdir(parents=True, exist_ok=True)  # make dir

    # Convert classes
    coco80 = coco91_to_coco80_class()

    # Import json
    for json_file in sorted(Path(labels_dir).resolve().glob("*.json")):
        lname = "" if lvis else json_file.stem.replace("instances_", "")
        fn = Path(save_dir) / "labels" / lname  # folder name
        fn.mkdir(parents=True, exist_ok=True)
        if lvis:
            # NOTE: create folders for both train and val in advance,
            # since LVIS val set contains images from COCO 2017 train in addition to the COCO 2017 val split.
            (fn / "train2017").mkdir(parents=True, exist_ok=True)
            (fn / "val2017").mkdir(parents=True, exist_ok=True)
        with open(json_file) as f:
            data = json.load(f)

        # Create image dict
        images = {f'{x["id"]:d}': x for x in data["images"]}
        # Create image-annotations dict
        imgToAnns = defaultdict(list)
        for ann in data["annotations"]:
            imgToAnns[ann["image_id"]].append(ann)

        image_txt = []
        # Write labels file
        for img_id, anns in TQDM(imgToAnns.items(), desc=f"Annotations {json_file}"):
            img = images[f"{img_id:d}"]
            h, w = img["height"], img["width"]
            f = str(Path(img["coco_url"]).relative_to("http://images.cocodataset.org")) if lvis else img["file_name"]
            if lvis:
                image_txt.append(str(Path("./images") / f))

            bboxes = []
            segments = []
            keypoints = []
            for ann in anns:
                if ann.get("iscrowd", False):
                    continue
                # The COCO box format is [top left x, top left y, width, height]
                box = np.array(ann["bbox"], dtype=np.float64)
                box[:2] += box[2:] / 2  # xy top-left corner to center
                box[[0, 2]] /= w  # normalize x
                box[[1, 3]] /= h  # normalize y
                if box[2] <= 0 or box[3] <= 0:  # if w <= 0 and h <= 0
                    continue

                cls = coco80[ann["category_id"] - 1] if cls91to80 else ann["category_id"] - 1  # class
                box = [cls] + box.tolist()
                if box not in bboxes:
                    bboxes.append(box)
                    if use_segments and ann.get("segmentation") is not None:
                        if len(ann["segmentation"]) == 0:
                            segments.append([])
                            continue
                        elif len(ann["segmentation"]) > 1:
                            s = merge_multi_segment(ann["segmentation"])
                            s = (np.concatenate(s, axis=0) / np.array([w, h])).reshape(-1).tolist()
                        else:
                            s = [j for i in ann["segmentation"] for j in i]  # all segments concatenated
                            s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()
                        s = [cls] + s
                        segments.append(s)
                    if use_keypoints and ann.get("keypoints") is not None:
                        keypoints.append(
                            box + (np.array(ann["keypoints"]).reshape(-1, 3) / np.array([w, h, 1])).reshape(-1).tolist()
                        )

            # Write
            with open((fn / f).with_suffix(".txt"), "a") as file:
                for i in range(len(bboxes)):
                    if use_keypoints:
                        line = (*(keypoints[i]),)  # cls, box, keypoints
                    else:
                        line = (
                            *(segments[i] if use_segments and len(segments[i]) > 0 else bboxes[i]),
                        )  # cls, box or segments
                    file.write(("%g " * len(line)).rstrip() % line + "\n")

        if lvis:
            with open((Path(save_dir) / json_file.name.replace("lvis_v1_", "").replace(".json", ".txt")), "a") as f:
                for l in image_txt:
                    f.write(f"{l}\n")

    LOGGER.info(f"{'LVIS' if lvis else 'COCO'} data converted successfully.\nResults saved to {save_dir.resolve()}")



ultralytics.data.converter.convert_dota_to_yolo_obb(dota_root_path)

Convierte las anotaciones del conjunto de datos DOTA al formato YOLO OBB (Oriented Bounding Box).

La funci贸n procesa las im谩genes de las carpetas "train" y "val" del conjunto de datos DOTA. Para cada imagen, lee la etiqueta etiqueta asociada del directorio de etiquetas original y escribe nuevas etiquetas en formato YOLO OBB en un nuevo directorio.

Par谩metros:

Nombre Tipo Descripci贸n Por defecto
dota_root_path str

La ruta del directorio ra铆z del conjunto de datos DOTA.

necesario
Ejemplo
from ultralytics.data.converter import convert_dota_to_yolo_obb

convert_dota_to_yolo_obb('path/to/DOTA')
Notas

La estructura de directorios asumida para el conjunto de datos DOTA:

- DOTA
    鈹溾攢 images
    鈹   鈹溾攢 train
    鈹   鈹斺攢 val
    鈹斺攢 labels
        鈹溾攢 train_original
        鈹斺攢 val_original

Tras la ejecuci贸n, la funci贸n organizar谩 las etiquetas en:

- DOTA
    鈹斺攢 labels
        鈹溾攢 train
        鈹斺攢 val
C贸digo fuente en ultralytics/data/converter.py
def convert_dota_to_yolo_obb(dota_root_path: str):
    """
    Converts DOTA dataset annotations to YOLO OBB (Oriented Bounding Box) format.

    The function processes images in the 'train' and 'val' folders of the DOTA dataset. For each image, it reads the
    associated label from the original labels directory and writes new labels in YOLO OBB format to a new directory.

    Args:
        dota_root_path (str): The root directory path of the DOTA dataset.

    Example:
        ```python
        from ultralytics.data.converter import convert_dota_to_yolo_obb

        convert_dota_to_yolo_obb('path/to/DOTA')
        ```

    Notes:
        The directory structure assumed for the DOTA dataset:

            - DOTA
                鈹溾攢 images
                鈹   鈹溾攢 train
                鈹   鈹斺攢 val
                鈹斺攢 labels
                    鈹溾攢 train_original
                    鈹斺攢 val_original

        After execution, the function will organize the labels into:

            - DOTA
                鈹斺攢 labels
                    鈹溾攢 train
                    鈹斺攢 val
    """
    dota_root_path = Path(dota_root_path)

    # Class names to indices mapping
    class_mapping = {
        "plane": 0,
        "ship": 1,
        "storage-tank": 2,
        "baseball-diamond": 3,
        "tennis-court": 4,
        "basketball-court": 5,
        "ground-track-field": 6,
        "harbor": 7,
        "bridge": 8,
        "large-vehicle": 9,
        "small-vehicle": 10,
        "helicopter": 11,
        "roundabout": 12,
        "soccer-ball-field": 13,
        "swimming-pool": 14,
        "container-crane": 15,
        "airport": 16,
        "helipad": 17,
    }

    def convert_label(image_name, image_width, image_height, orig_label_dir, save_dir):
        """Converts a single image's DOTA annotation to YOLO OBB format and saves it to a specified directory."""
        orig_label_path = orig_label_dir / f"{image_name}.txt"
        save_path = save_dir / f"{image_name}.txt"

        with orig_label_path.open("r") as f, save_path.open("w") as g:
            lines = f.readlines()
            for line in lines:
                parts = line.strip().split()
                if len(parts) < 9:
                    continue
                class_name = parts[8]
                class_idx = class_mapping[class_name]
                coords = [float(p) for p in parts[:8]]
                normalized_coords = [
                    coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
                ]
                formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
                g.write(f"{class_idx} {' '.join(formatted_coords)}\n")

    for phase in ["train", "val"]:
        image_dir = dota_root_path / "images" / phase
        orig_label_dir = dota_root_path / "labels" / f"{phase}_original"
        save_dir = dota_root_path / "labels" / phase

        save_dir.mkdir(parents=True, exist_ok=True)

        image_paths = list(image_dir.iterdir())
        for image_path in TQDM(image_paths, desc=f"Processing {phase} images"):
            if image_path.suffix != ".png":
                continue
            image_name_without_ext = image_path.stem
            img = cv2.imread(str(image_path))
            h, w = img.shape[:2]
            convert_label(image_name_without_ext, w, h, orig_label_dir, save_dir)



ultralytics.data.converter.min_index(arr1, arr2)

Encuentra un par de 铆ndices con la distancia m谩s corta entre dos matrices de puntos 2D.

Par谩metros:

Nombre Tipo Descripci贸n Por defecto
arr1 ndarray

Una matriz NumPy de forma (N, 2) que representa N puntos 2D.

necesario
arr2 ndarray

Una matriz NumPy de forma (M, 2) que representa M puntos 2D.

necesario

Devuelve:

Tipo Descripci贸n
tuple

Una tupla que contiene los 铆ndices de los puntos con la distancia m谩s corta en arr1 y arr2 respectivamente.

C贸digo fuente en ultralytics/data/converter.py
def min_index(arr1, arr2):
    """
    Find a pair of indexes with the shortest distance between two arrays of 2D points.

    Args:
        arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
        arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.

    Returns:
        (tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively.
    """
    dis = ((arr1[:, None, :] - arr2[None, :, :]) ** 2).sum(-1)
    return np.unravel_index(np.argmin(dis, axis=None), dis.shape)



ultralytics.data.converter.merge_multi_segment(segments)

Fusiona varios segmentos en una lista conectando las coordenadas con la distancia m铆nima entre cada segmento. Esta funci贸n conecta estas coordenadas con una l铆nea fina para fusionar todos los segmentos en uno.

Par谩metros:

Nombre Tipo Descripci贸n Por defecto
segments List[List]

Segmentaciones originales en el archivo JSON de COCO. Cada elemento es una lista de coordenadas, como [segmentaci贸n1, segmentaci贸n2,...].

necesario

Devuelve:

Nombre Tipo Descripci贸n
s List[ndarray]

Una lista de segmentos conectados representados como matrices NumPy.

C贸digo fuente en ultralytics/data/converter.py
def merge_multi_segment(segments):
    """
    Merge multiple segments into one list by connecting the coordinates with the minimum distance between each segment.
    This function connects these coordinates with a thin line to merge all segments into one.

    Args:
        segments (List[List]): Original segmentations in COCO's JSON file.
                               Each element is a list of coordinates, like [segmentation1, segmentation2,...].

    Returns:
        s (List[np.ndarray]): A list of connected segments represented as NumPy arrays.
    """
    s = []
    segments = [np.array(i).reshape(-1, 2) for i in segments]
    idx_list = [[] for _ in range(len(segments))]

    # Record the indexes with min distance between each segment
    for i in range(1, len(segments)):
        idx1, idx2 = min_index(segments[i - 1], segments[i])
        idx_list[i - 1].append(idx1)
        idx_list[i].append(idx2)

    # Use two round to connect all the segments
    for k in range(2):
        # Forward connection
        if k == 0:
            for i, idx in enumerate(idx_list):
                # Middle segments have two indexes, reverse the index of middle segments
                if len(idx) == 2 and idx[0] > idx[1]:
                    idx = idx[::-1]
                    segments[i] = segments[i][::-1, :]

                segments[i] = np.roll(segments[i], -idx[0], axis=0)
                segments[i] = np.concatenate([segments[i], segments[i][:1]])
                # Deal with the first segment and the last one
                if i in {0, len(idx_list) - 1}:
                    s.append(segments[i])
                else:
                    idx = [0, idx[1] - idx[0]]
                    s.append(segments[i][idx[0] : idx[1] + 1])

        else:
            for i in range(len(idx_list) - 1, -1, -1):
                if i not in {0, len(idx_list) - 1}:
                    idx = idx_list[i]
                    nidx = abs(idx[1] - idx[0])
                    s.append(segments[i][nidx:])
    return s



ultralytics.data.converter.yolo_bbox2segment(im_dir, save_dir=None, sam_model='sam_b.pt')

Convierte el conjunto de datos de detecci贸n de objetos existente (cuadros delimitadores) en un conjunto de datos de segmentaci贸n o cuadro delimitador orientado (OBB) en formato YOLO . Genera datos de segmentaci贸n utilizando el autoanotador SAM seg煤n sea necesario.

Par谩metros:

Nombre Tipo Descripci贸n Por defecto
im_dir str | Path

Ruta al directorio de im谩genes a convertir.

necesario
save_dir str | Path

Ruta para guardar las etiquetas generadas, las etiquetas se guardar谩n en labels-segment en el mismo nivel de directorio de im_dir si save_dir es None. Por defecto: Ninguno.

None
sam_model str

Modelo de segmentaci贸n que se utilizar谩 para los datos de segmentaci贸n intermedios; opcional.

'sam_b.pt'
Notas

La estructura del directorio de entrada asumida para el conjunto de datos:

- im_dir
    鈹溾攢 001.jpg
    鈹溾攢 ..
    鈹斺攢 NNN.jpg
- labels
    鈹溾攢 001.txt
    鈹溾攢 ..
    鈹斺攢 NNN.txt
C贸digo fuente en ultralytics/data/converter.py
def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
    """
    Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
    in YOLO format. Generates segmentation data using SAM auto-annotator as needed.

    Args:
        im_dir (str | Path): Path to image directory to convert.
        save_dir (str | Path): Path to save the generated labels, labels will be saved
            into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
        sam_model (str): Segmentation model to use for intermediate segmentation data; optional.

    Notes:
        The input directory structure assumed for dataset:

            - im_dir
                鈹溾攢 001.jpg
                鈹溾攢 ..
                鈹斺攢 NNN.jpg
            - labels
                鈹溾攢 001.txt
                鈹溾攢 ..
                鈹斺攢 NNN.txt
    """
    from tqdm import tqdm

    from ultralytics import SAM
    from ultralytics.data import YOLODataset
    from ultralytics.utils import LOGGER
    from ultralytics.utils.ops import xywh2xyxy

    # NOTE: add placeholder to pass class index check
    dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))
    if len(dataset.labels[0]["segments"]) > 0:  # if it's segment data
        LOGGER.info("Segmentation labels detected, no need to generate new ones!")
        return

    LOGGER.info("Detection labels detected, generating segment labels by SAM model!")
    sam_model = SAM(sam_model)
    for l in tqdm(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"):
        h, w = l["shape"]
        boxes = l["bboxes"]
        if len(boxes) == 0:  # skip empty labels
            continue
        boxes[:, [0, 2]] *= w
        boxes[:, [1, 3]] *= h
        im = cv2.imread(l["im_file"])
        sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
        l["segments"] = sam_results[0].masks.xyn

    save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
    save_dir.mkdir(parents=True, exist_ok=True)
    for l in dataset.labels:
        texts = []
        lb_name = Path(l["im_file"]).with_suffix(".txt").name
        txt_file = save_dir / lb_name
        cls = l["cls"]
        for i, s in enumerate(l["segments"]):
            line = (int(cls[i]), *s.reshape(-1))
            texts.append(("%g " * len(line)).rstrip() % line)
        if texts:
            with open(txt_file, "a") as f:
                f.writelines(text + "\n" for text in texts)
    LOGGER.info(f"Generated segment labels saved in {save_dir}")





Creado 2023-11-12, Actualizado 2024-05-18
Autores: glenn-jocher (5), Burhan-Q (1), Laughing-q (1)