Skip to content

Reference for ultralytics/data/split_dota.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/data/split_dota.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.data.split_dota.bbox_iof

bbox_iof(polygon1: ndarray, bbox2: ndarray, eps: float = 1e-06) -> np.ndarray

Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.

Parameters:

Name Type Description Default
polygon1 ndarray

Polygon coordinates with shape (N, 8).

required
bbox2 ndarray

Bounding boxes with shape (N, 4).

required
eps float

Small value to prevent division by zero.

1e-06

Returns:

Type Description
ndarray

IoF scores with shape (N, 1) or (N, M) if bbox2 is (M, 4).

Notes

Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4]. Bounding box format: [x_min, y_min, x_max, y_max].

Source code in ultralytics/data/split_dota.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def bbox_iof(polygon1: np.ndarray, bbox2: np.ndarray, eps: float = 1e-6) -> np.ndarray:
    """
    Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.

    Args:
        polygon1 (np.ndarray): Polygon coordinates with shape (N, 8).
        bbox2 (np.ndarray): Bounding boxes with shape (N, 4).
        eps (float, optional): Small value to prevent division by zero.

    Returns:
        (np.ndarray): IoF scores with shape (N, 1) or (N, M) if bbox2 is (M, 4).

    Notes:
        Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
        Bounding box format: [x_min, y_min, x_max, y_max].
    """
    check_requirements("shapely>=2.0.0")
    from shapely.geometry import Polygon

    polygon1 = polygon1.reshape(-1, 4, 2)
    lt_point = np.min(polygon1, axis=-2)  # left-top
    rb_point = np.max(polygon1, axis=-2)  # right-bottom
    bbox1 = np.concatenate([lt_point, rb_point], axis=-1)

    lt = np.maximum(bbox1[:, None, :2], bbox2[..., :2])
    rb = np.minimum(bbox1[:, None, 2:], bbox2[..., 2:])
    wh = np.clip(rb - lt, 0, np.inf)
    h_overlaps = wh[..., 0] * wh[..., 1]

    left, top, right, bottom = (bbox2[..., i] for i in range(4))
    polygon2 = np.stack([left, top, right, top, right, bottom, left, bottom], axis=-1).reshape(-1, 4, 2)

    sg_polys1 = [Polygon(p) for p in polygon1]
    sg_polys2 = [Polygon(p) for p in polygon2]
    overlaps = np.zeros(h_overlaps.shape)
    for p in zip(*np.nonzero(h_overlaps)):
        overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
    unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
    unions = unions[..., None]

    unions = np.clip(unions, eps, np.inf)
    outputs = overlaps / unions
    if outputs.ndim == 1:
        outputs = outputs[..., None]
    return outputs





ultralytics.data.split_dota.load_yolo_dota

load_yolo_dota(data_root: str, split: str = 'train') -> List[Dict[str, Any]]

Load DOTA dataset annotations and image information.

Parameters:

Name Type Description Default
data_root str

Data root directory.

required
split str

The split data set, could be 'train' or 'val'.

'train'

Returns:

Type Description
List[Dict[str, Any]]

List of annotation dictionaries containing image information.

Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def load_yolo_dota(data_root: str, split: str = "train") -> List[Dict[str, Any]]:
    """
    Load DOTA dataset annotations and image information.

    Args:
        data_root (str): Data root directory.
        split (str, optional): The split data set, could be 'train' or 'val'.

    Returns:
        (List[Dict[str, Any]]): List of annotation dictionaries containing image information.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
    im_dir = Path(data_root) / "images" / split
    assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
    im_files = glob(str(Path(data_root) / "images" / split / "*"))
    lb_files = img2label_paths(im_files)
    annos = []
    for im_file, lb_file in zip(im_files, lb_files):
        w, h = exif_size(Image.open(im_file))
        with open(lb_file, encoding="utf-8") as f:
            lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
            lb = np.array(lb, dtype=np.float32)
        annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file))
    return annos





ultralytics.data.split_dota.get_windows

get_windows(
    im_size: Tuple[int, int],
    crop_sizes: Tuple[int, ...] = (1024,),
    gaps: Tuple[int, ...] = (200,),
    im_rate_thr: float = 0.6,
    eps: float = 0.01,
) -> np.ndarray

Get the coordinates of sliding windows for image cropping.

Parameters:

Name Type Description Default
im_size Tuple[int, int]

Original image size, (H, W).

required
crop_sizes Tuple[int, ...]

Crop size of windows.

(1024,)
gaps Tuple[int, ...]

Gap between crops.

(200,)
im_rate_thr float

Threshold of windows areas divided by image areas.

0.6
eps float

Epsilon value for math operations.

0.01

Returns:

Type Description
ndarray

Array of window coordinates with shape (N, 4) where each row is [x_start, y_start, x_stop, y_stop].

Source code in ultralytics/data/split_dota.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def get_windows(
    im_size: Tuple[int, int],
    crop_sizes: Tuple[int, ...] = (1024,),
    gaps: Tuple[int, ...] = (200,),
    im_rate_thr: float = 0.6,
    eps: float = 0.01,
) -> np.ndarray:
    """
    Get the coordinates of sliding windows for image cropping.

    Args:
        im_size (Tuple[int, int]): Original image size, (H, W).
        crop_sizes (Tuple[int, ...], optional): Crop size of windows.
        gaps (Tuple[int, ...], optional): Gap between crops.
        im_rate_thr (float, optional): Threshold of windows areas divided by image areas.
        eps (float, optional): Epsilon value for math operations.

    Returns:
        (np.ndarray): Array of window coordinates with shape (N, 4) where each row is [x_start, y_start, x_stop, y_stop].
    """
    h, w = im_size
    windows = []
    for crop_size, gap in zip(crop_sizes, gaps):
        assert crop_size > gap, f"invalid crop_size gap pair [{crop_size} {gap}]"
        step = crop_size - gap

        xn = 1 if w <= crop_size else ceil((w - crop_size) / step + 1)
        xs = [step * i for i in range(xn)]
        if len(xs) > 1 and xs[-1] + crop_size > w:
            xs[-1] = w - crop_size

        yn = 1 if h <= crop_size else ceil((h - crop_size) / step + 1)
        ys = [step * i for i in range(yn)]
        if len(ys) > 1 and ys[-1] + crop_size > h:
            ys[-1] = h - crop_size

        start = np.array(list(itertools.product(xs, ys)), dtype=np.int64)
        stop = start + crop_size
        windows.append(np.concatenate([start, stop], axis=1))
    windows = np.concatenate(windows, axis=0)

    im_in_wins = windows.copy()
    im_in_wins[:, 0::2] = np.clip(im_in_wins[:, 0::2], 0, w)
    im_in_wins[:, 1::2] = np.clip(im_in_wins[:, 1::2], 0, h)
    im_areas = (im_in_wins[:, 2] - im_in_wins[:, 0]) * (im_in_wins[:, 3] - im_in_wins[:, 1])
    win_areas = (windows[:, 2] - windows[:, 0]) * (windows[:, 3] - windows[:, 1])
    im_rates = im_areas / win_areas
    if not (im_rates > im_rate_thr).any():
        max_rate = im_rates.max()
        im_rates[abs(im_rates - max_rate) < eps] = 1
    return windows[im_rates > im_rate_thr]





ultralytics.data.split_dota.get_window_obj

get_window_obj(
    anno: Dict[str, Any], windows: ndarray, iof_thr: float = 0.7
) -> List[np.ndarray]

Get objects for each window based on IoF threshold.

Source code in ultralytics/data/split_dota.py
154
155
156
157
158
159
160
161
162
163
164
165
def get_window_obj(anno: Dict[str, Any], windows: np.ndarray, iof_thr: float = 0.7) -> List[np.ndarray]:
    """Get objects for each window based on IoF threshold."""
    h, w = anno["ori_size"]
    label = anno["label"]
    if len(label):
        label[:, 1::2] *= w
        label[:, 2::2] *= h
        iofs = bbox_iof(label[:, 1:], windows)
        # Unnormalized and misaligned coordinates
        return [(label[iofs[:, i] >= iof_thr]) for i in range(len(windows))]  # window_anns
    else:
        return [np.zeros((0, 9), dtype=np.float32) for _ in range(len(windows))]  # window_anns





ultralytics.data.split_dota.crop_and_save

crop_and_save(
    anno: Dict[str, Any],
    windows: ndarray,
    window_objs: List[ndarray],
    im_dir: str,
    lb_dir: str,
    allow_background_images: bool = True,
) -> None

Crop images and save new labels for each window.

Parameters:

Name Type Description Default
anno Dict[str, Any]

Annotation dict, including 'filepath', 'label', 'ori_size' as its keys.

required
windows ndarray

Array of windows coordinates with shape (N, 4).

required
window_objs List[ndarray]

A list of labels inside each window.

required
im_dir str

The output directory path of images.

required
lb_dir str

The output directory path of labels.

required
allow_background_images bool

Whether to include background images without labels.

True
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
def crop_and_save(
    anno: Dict[str, Any],
    windows: np.ndarray,
    window_objs: List[np.ndarray],
    im_dir: str,
    lb_dir: str,
    allow_background_images: bool = True,
) -> None:
    """
    Crop images and save new labels for each window.

    Args:
        anno (Dict[str, Any]): Annotation dict, including 'filepath', 'label', 'ori_size' as its keys.
        windows (np.ndarray): Array of windows coordinates with shape (N, 4).
        window_objs (List[np.ndarray]): A list of labels inside each window.
        im_dir (str): The output directory path of images.
        lb_dir (str): The output directory path of labels.
        allow_background_images (bool, optional): Whether to include background images without labels.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    im = cv2.imread(anno["filepath"])
    name = Path(anno["filepath"]).stem
    for i, window in enumerate(windows):
        x_start, y_start, x_stop, y_stop = window.tolist()
        new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
        patch_im = im[y_start:y_stop, x_start:x_stop]
        ph, pw = patch_im.shape[:2]

        label = window_objs[i]
        if len(label) or allow_background_images:
            cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im)
        if len(label):
            label[:, 1::2] -= x_start
            label[:, 2::2] -= y_start
            label[:, 1::2] /= pw
            label[:, 2::2] /= ph

            with open(Path(lb_dir) / f"{new_name}.txt", "w", encoding="utf-8") as f:
                for lb in label:
                    formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
                    f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")





ultralytics.data.split_dota.split_images_and_labels

split_images_and_labels(
    data_root: str,
    save_dir: str,
    split: str = "train",
    crop_sizes: Tuple[int, ...] = (1024,),
    gaps: Tuple[int, ...] = (200,),
) -> None

Split both images and labels for a given dataset split.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
split str

The split data set, could be 'train' or 'val'.

'train'
crop_sizes Tuple[int, ...]

Tuple of crop sizes.

(1024,)
gaps Tuple[int, ...]

Tuple of gaps between crops.

(200,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - split - labels - split and the output directory structure is: - save_dir - images - split - labels - split

Source code in ultralytics/data/split_dota.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def split_images_and_labels(
    data_root: str,
    save_dir: str,
    split: str = "train",
    crop_sizes: Tuple[int, ...] = (1024,),
    gaps: Tuple[int, ...] = (200,),
) -> None:
    """
    Split both images and labels for a given dataset split.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        split (str, optional): The split data set, could be 'train' or 'val'.
        crop_sizes (Tuple[int, ...], optional): Tuple of crop sizes.
        gaps (Tuple[int, ...], optional): Tuple of gaps between crops.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - split
                - labels
                    - split
        and the output directory structure is:
            - save_dir
                - images
                    - split
                - labels
                    - split
    """
    im_dir = Path(save_dir) / "images" / split
    im_dir.mkdir(parents=True, exist_ok=True)
    lb_dir = Path(save_dir) / "labels" / split
    lb_dir.mkdir(parents=True, exist_ok=True)

    annos = load_yolo_dota(data_root, split=split)
    for anno in TQDM(annos, total=len(annos), desc=split):
        windows = get_windows(anno["ori_size"], crop_sizes, gaps)
        window_objs = get_window_obj(anno, windows)
        crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))





ultralytics.data.split_dota.split_trainval

split_trainval(
    data_root: str,
    save_dir: str,
    crop_size: int = 1024,
    gap: int = 200,
    rates: Tuple[float, ...] = (1.0,),
) -> None

Split train and val sets of DOTA dataset with multiple scaling rates.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
crop_size int

Base crop size.

1024
gap int

Base gap between crops.

200
rates Tuple[float, ...]

Scaling rates for crop_size and gap.

(1.0,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - train - val - labels - train - val and the output directory structure is: - save_dir - images - train - val - labels - train - val

Source code in ultralytics/data/split_dota.py
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
def split_trainval(
    data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: Tuple[float, ...] = (1.0,)
) -> None:
    """
    Split train and val sets of DOTA dataset with multiple scaling rates.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        crop_size (int, optional): Base crop size.
        gap (int, optional): Base gap between crops.
        rates (Tuple[float, ...], optional): Scaling rates for crop_size and gap.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
        and the output directory structure is:
            - save_dir
                - images
                    - train
                    - val
                - labels
                    - train
                    - val
    """
    crop_sizes, gaps = [], []
    for r in rates:
        crop_sizes.append(int(crop_size / r))
        gaps.append(int(gap / r))
    for split in ["train", "val"]:
        split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)





ultralytics.data.split_dota.split_test

split_test(
    data_root: str,
    save_dir: str,
    crop_size: int = 1024,
    gap: int = 200,
    rates: Tuple[float, ...] = (1.0,),
) -> None

Split test set of DOTA dataset, labels are not included within this set.

Parameters:

Name Type Description Default
data_root str

Root directory of the dataset.

required
save_dir str

Directory to save the split dataset.

required
crop_size int

Base crop size.

1024
gap int

Base gap between crops.

200
rates Tuple[float, ...]

Scaling rates for crop_size and gap.

(1.0,)
Notes

The directory structure assumed for the DOTA dataset: - data_root - images - test and the output directory structure is: - save_dir - images - test

Source code in ultralytics/data/split_dota.py
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
def split_test(
    data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: Tuple[float, ...] = (1.0,)
) -> None:
    """
    Split test set of DOTA dataset, labels are not included within this set.

    Args:
        data_root (str): Root directory of the dataset.
        save_dir (str): Directory to save the split dataset.
        crop_size (int, optional): Base crop size.
        gap (int, optional): Base gap between crops.
        rates (Tuple[float, ...], optional): Scaling rates for crop_size and gap.

    Notes:
        The directory structure assumed for the DOTA dataset:
            - data_root
                - images
                    - test
        and the output directory structure is:
            - save_dir
                - images
                    - test
    """
    crop_sizes, gaps = [], []
    for r in rates:
        crop_sizes.append(int(crop_size / r))
        gaps.append(int(gap / r))
    save_dir = Path(save_dir) / "images" / "test"
    save_dir.mkdir(parents=True, exist_ok=True)

    im_dir = Path(data_root) / "images" / "test"
    assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
    im_files = glob(str(im_dir / "*"))
    for im_file in TQDM(im_files, total=len(im_files), desc="test"):
        w, h = exif_size(Image.open(im_file))
        windows = get_windows((h, w), crop_sizes=crop_sizes, gaps=gaps)
        im = cv2.imread(im_file)
        name = Path(im_file).stem
        for window in windows:
            x_start, y_start, x_stop, y_stop = window.tolist()
            new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
            patch_im = im[y_start:y_stop, x_start:x_stop]
            cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im)





📅 Created 1 year ago ✏️ Updated 8 months ago