Skip to content

Reference for ultralytics/data/split_dota.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/split_dota.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.data.split_dota.bbox_iof

bbox_iof(polygon1, bbox2, eps=1e-06)

Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.

Parameters:

Name Type Description Default
polygon1 ndarray

Polygon coordinates with shape (n, 8).

required
bbox2 ndarray

Bounding boxes with shape (n, 4).

required
eps float

Small value to prevent division by zero.

1e-06

Returns:

Type Description
ndarray

IoF scores with shape (n, 1) or (n, m) if bbox2 is (m, 4).

Notes

Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4]. Bounding box format: [x_min, y_min, x_max, y_max].

Source code in ultralytics/data/split_dota.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def bbox_iof(polygon1, bbox2, eps=1e-6):
    """
    Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.

    Args:
        polygon1 (np.ndarray): Polygon coordinates with shape (n, 8).
        bbox2 (np.ndarray): Bounding boxes with shape (n, 4).
        eps (float, optional): Small value to prevent division by zero.

    Returns:
        (np.ndarray): IoF scores with shape (n, 1) or (n, m) if bbox2 is (m, 4).

    Notes:
        Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
        Bounding box format: [x_min, y_min, x_max, y_max].
    """
    check_requirements("shapely")
    from shapely.geometry import Polygon

    polygon1 = polygon1.reshape(-1, 4, 2)
    lt_point = np.min(polygon1, axis=-2)  # left-top
    rb_point = np.max(polygon1, axis=-2)  # right-bottom
    bbox1 = np.concatenate([lt_point, rb_point], axis=-1)

    lt = np.maximum(bbox1[:, None, :2], bbox2[..., :2])
    rb = np.minimum(bbox1[:, None, 2:], bbox2[..., 2:])
    wh = np.clip(rb - lt, 0, np.inf)
    h_overlaps = wh[..., 0] * wh[..., 1]

    left, top, right, bottom = (bbox2[..., i] for i in range(4))
    polygon2 = np.stack([left, top, right, top, right, bottom, left, bottom], axis=-1).reshape(-1, 4, 2)

    sg_polys1 = [Polygon(p) for p in polygon1]
    sg_polys2 = [Polygon(p) for p in polygon2]
    overlaps = np.zeros(h_overlaps.shape)
    for p in zip(*np.nonzero(h_overlaps)):
        overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
    unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
    unions = unions[..., None]

    unions = np.clip(unions, eps, np.inf)
    outputs = overlaps / unions
    if outputs.ndim == 1:
        outputs = outputs[..., None]
    return outputs





ultralytics.data.split_dota.load_yolo_dota

load_yolo_dota(data_root, split='train')

Load DOTA dataset.

Parameters:

Name Type Description Default
data_root str

Data root directory.

required
split str

The split data set, could be train or val.

'train'

Returns:

Type Description
List[Dict]

List of annotation dictionaries containing image information.

Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def load_yolo_dota(data_root, split="train"):
    """
    Load DOTA dataset.

    Args:
        data_root (str): Data root directory.
        split (str): The split data set, could be `train` or `val`.

    Returns:
        (List[Dict]): List of annotation dictionaries containing image information.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
    im_dir = Path(data_root) / "images" / split
    assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
    im_files = glob(str(Path(data_root) / "images" / split / "*"))
    lb_files = img2label_paths(im_files)
    annos = []
    for im_file, lb_file in zip(im_files, lb_files):
        w, h = exif_size(Image.open(im_file))
        with open(lb_file, encoding="utf-8") as f:
            lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
            lb = np.array(lb, dtype=np.float32)
        annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file))
    return annos





ultralytics.data.split_dota.get_windows

get_windows(
    im_size, crop_sizes=(1024,), gaps=(200,), im_rate_thr=0.6, eps=0.01
)

Get the coordinates of windows.

Parameters:

Name Type Description Default
im_size tuple

Original image size, (h, w).

required
crop_sizes List[int]

Crop size of windows.

(1024,)
gaps List[int]

Gap between crops.

(200,)
im_rate_thr float

Threshold of windows areas divided by image areas.

0.6
eps float

Epsilon value for math operations.

0.01

Returns:

Type Description
ndarray

Array of window coordinates with shape (n, 4) where each row is [x_start, y_start, x_stop, y_stop].

Source code in ultralytics/data/split_dota.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_windows(im_size, crop_sizes=(1024,), gaps=(200,), im_rate_thr=0.6, eps=0.01):
    """
    Get the coordinates of windows.

    Args:
        im_size (tuple): Original image size, (h, w).
        crop_sizes (List[int]): Crop size of windows.
        gaps (List[int]): Gap between crops.
        im_rate_thr (float): Threshold of windows areas divided by image areas.
        eps (float): Epsilon value for math operations.

    Returns:
        (np.ndarray): Array of window coordinates with shape (n, 4) where each row is [x_start, y_start, x_stop, y_stop].
    """
    h, w = im_size
    windows = []
    for crop_size, gap in zip(crop_sizes, gaps):
        assert crop_size > gap, f"invalid crop_size gap pair [{crop_size} {gap}]"
        step = crop_size - gap

        xn = 1 if w <= crop_size else ceil((w - crop_size) / step + 1)
        xs = [step * i for i in range(xn)]
        if len(xs) > 1 and xs[-1] + crop_size > w:
            xs[-1] = w - crop_size

        yn = 1 if h <= crop_size else ceil((h - crop_size) / step + 1)
        ys = [step * i for i in range(yn)]
        if len(ys) > 1 and ys[-1] + crop_size > h:
            ys[-1] = h - crop_size

        start = np.array(list(itertools.product(xs, ys)), dtype=np.int64)
        stop = start + crop_size
        windows.append(np.concatenate([start, stop], axis=1))
    windows = np.concatenate(windows, axis=0)

    im_in_wins = windows.copy()
    im_in_wins[:, 0::2] = np.clip(im_in_wins[:, 0::2], 0, w)
    im_in_wins[:, 1::2] = np.clip(im_in_wins[:, 1::2], 0, h)
    im_areas = (im_in_wins[:, 2] - im_in_wins[:, 0]) * (im_in_wins[:, 3] - im_in_wins[:, 1])
    win_areas = (windows[:, 2] - windows[:, 0]) * (windows[:, 3] - windows[:, 1])
    im_rates = im_areas / win_areas
    if not (im_rates > im_rate_thr).any():
        max_rate = im_rates.max()
        im_rates[abs(im_rates - max_rate) < eps] = 1
    return windows[im_rates > im_rate_thr]





ultralytics.data.split_dota.get_window_obj

get_window_obj(anno, windows, iof_thr=0.7)

Get objects for each window.

Source code in ultralytics/data/split_dota.py
147
148
149
150
151
152
153
154
155
156
157
158
def get_window_obj(anno, windows, iof_thr=0.7):
    """Get objects for each window."""
    h, w = anno["ori_size"]
    label = anno["label"]
    if len(label):
        label[:, 1::2] *= w
        label[:, 2::2] *= h
        iofs = bbox_iof(label[:, 1:], windows)
        # Unnormalized and misaligned coordinates
        return [(label[iofs[:, i] >= iof_thr]) for i in range(len(windows))]  # window_anns
    else:
        return [np.zeros((0, 9), dtype=np.float32) for _ in range(len(windows))]  # window_anns





ultralytics.data.split_dota.crop_and_save

crop_and_save(
    anno, windows, window_objs, im_dir, lb_dir, allow_background_images=True
)

Crop images and save new labels.

Parameters:

Name Type Description Default
anno dict

Annotation dict, including filepath, label, ori_size as its keys.

required
windows ndarray

Array of windows coordinates with shape (n, 4).

required
window_objs list

A list of labels inside each window.

required
im_dir str

The output directory path of images.

required
lb_dir str

The output directory path of labels.

required
allow_background_images bool

Whether to include background images without labels.

True
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_images=True):
    """
    Crop images and save new labels.

    Args:
        anno (dict): Annotation dict, including `filepath`, `label`, `ori_size` as its keys.
        windows (np.ndarray): Array of windows coordinates with shape (n, 4).
        window_objs (list): A list of labels inside each window.
        im_dir (str): The output directory path of images.
        lb_dir (str): The output directory path of labels.
        allow_background_images (bool): Whether to include background images without labels.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    im = cv2.imread(anno["filepath"])
    name = Path(anno["filepath"]).stem
    for i, window in enumerate(windows):
        x_start, y_start, x_stop, y_stop = window.tolist()
        new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
        patch_im = im[y_start:y_stop, x_start:x_stop]
        ph, pw = patch_im.shape[:2]

        label = window_objs[i]
        if len(label) or allow_background_images:
            cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im)
        if len(label):
            label[:, 1::2] -= x_start
            label[:, 2::2] -= y_start
            label[:, 1::2] /= pw
            label[:, 2::2] /= ph

            with open(Path(lb_dir) / f"{new_name}.txt", "w", encoding="utf-8") as f:
                for lb in label:
                    formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
                    f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")





ultralytics.data.split_dota.split_images_and_labels

split_images_and_labels(
    data_root, save_dir, split="train", crop_sizes=(1024,), gaps=(200,)
)

Split both images and labels.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
split str

The split data set, could be train or val.

'train'
crop_sizes tuple

Tuple of crop sizes.

(1024,)
gaps tuple

Tuple of gaps between crops.

(200,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - split - labels - split and the output directory structure is: - save_dir - images - split - labels - split

Source code in ultralytics/data/split_dota.py
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=(1024,), gaps=(200,)):
    """
    Split both images and labels.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        split (str): The split data set, could be `train` or `val`.
        crop_sizes (tuple): Tuple of crop sizes.
        gaps (tuple): Tuple of gaps between crops.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - split
                - labels
                    - split
        and the output directory structure is:
            - save_dir
                - images
                    - split
                - labels
                    - split
    """
    im_dir = Path(save_dir) / "images" / split
    im_dir.mkdir(parents=True, exist_ok=True)
    lb_dir = Path(save_dir) / "labels" / split
    lb_dir.mkdir(parents=True, exist_ok=True)

    annos = load_yolo_dota(data_root, split=split)
    for anno in TQDM(annos, total=len(annos), desc=split):
        windows = get_windows(anno["ori_size"], crop_sizes, gaps)
        window_objs = get_window_obj(anno, windows)
        crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))





ultralytics.data.split_dota.split_trainval

split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,))

Split train and val set of DOTA.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
crop_size int

Base crop size.

1024
gap int

Base gap between crops.

200
rates tuple

Scaling rates for crop_size and gap.

(1.0,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val and the output directory structure is: - save_dir - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
    """
    Split train and val set of DOTA.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        crop_size (int): Base crop size.
        gap (int): Base gap between crops.
        rates (tuple): Scaling rates for crop_size and gap.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
        and the output directory structure is:
            - save_dir
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    crop_sizes, gaps = [], []
    for r in rates:
        crop_sizes.append(int(crop_size / r))
        gaps.append(int(gap / r))
    for split in ["train", "val"]:
        split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)





ultralytics.data.split_dota.split_test

split_test(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,))

Split test set of DOTA, labels are not included within this set.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
crop_size int

Base crop size.

1024
gap int

Base gap between crops.

200
rates tuple

Scaling rates for crop_size and gap.

(1.0,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - test and the output directory structure is: - save_dir - images - test

Source code in ultralytics/data/split_dota.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
    """
    Split test set of DOTA, labels are not included within this set.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        crop_size (int): Base crop size.
        gap (int): Base gap between crops.
        rates (tuple): Scaling rates for crop_size and gap.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - test
        and the output directory structure is:
            - save_dir
                - images
                    - test
    """
    crop_sizes, gaps = [], []
    for r in rates:
        crop_sizes.append(int(crop_size / r))
        gaps.append(int(gap / r))
    save_dir = Path(save_dir) / "images" / "test"
    save_dir.mkdir(parents=True, exist_ok=True)

    im_dir = Path(data_root) / "images" / "test"
    assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
    im_files = glob(str(im_dir / "*"))
    for im_file in TQDM(im_files, total=len(im_files), desc="test"):
        w, h = exif_size(Image.open(im_file))
        windows = get_windows((h, w), crop_sizes=crop_sizes, gaps=gaps)
        im = cv2.imread(im_file)
        name = Path(im_file).stem
        for window in windows:
            x_start, y_start, x_stop, y_stop = window.tolist()
            new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
            patch_im = im[y_start:y_stop, x_start:x_stop]
            cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im)





📅 Created 1 year ago ✏️ Updated 7 months ago