Skip to content

Reference for ultralytics/models/yolo/segment/val.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.models.yolo.segment.val.SegmentationValidator

SegmentationValidator(
    dataloader=None, save_dir=None, args=None, _callbacks=None
)

Bases: DetectionValidator

A class extending the DetectionValidator class for validation based on a segmentation model.

This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions to compute metrics such as mAP for both detection and segmentation tasks.

Attributes:

Name Type Description
plot_masks list

List to store masks for plotting.

process callable

Function to process masks based on save_json and save_txt flags.

args namespace

Arguments for the validator.

metrics SegmentMetrics

Metrics calculator for segmentation tasks.

stats dict

Dictionary to store statistics during validation.

Examples:

>>> from ultralytics.models.yolo.segment import SegmentationValidator
>>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
>>> validator = SegmentationValidator(args=args)
>>> validator()

Parameters:

Name Type Description Default
dataloader DataLoader

Dataloader to use for validation.

None
save_dir Path

Directory to save results.

None
args namespace

Arguments for the validator.

None
_callbacks list

List of callback functions.

None
Source code in ultralytics/models/yolo/segment/val.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
    """
    Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.

    Args:
        dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
        save_dir (Path, optional): Directory to save results.
        args (namespace, optional): Arguments for the validator.
        _callbacks (list, optional): List of callback functions.
    """
    super().__init__(dataloader, save_dir, args, _callbacks)
    self.plot_masks = None
    self.process = None
    self.args.task = "segment"
    self.metrics = SegmentMetrics(save_dir=self.save_dir)

eval_json

eval_json(stats: Dict[str, Any]) -> Dict[str, Any]

Return COCO-style instance segmentation evaluation metrics.

Source code in ultralytics/models/yolo/segment/val.py
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
    """Return COCO-style instance segmentation evaluation metrics."""
    if self.args.save_json and (self.is_lvis or self.is_coco) and len(self.jdict):
        pred_json = self.save_dir / "predictions.json"  # predictions

        anno_json = (
            self.data["path"]
            / "annotations"
            / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
        )  # annotations

        pkg = "pycocotools" if self.is_coco else "lvis"
        LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            for x in anno_json, pred_json:
                assert x.is_file(), f"{x} file not found"
            check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
            if self.is_coco:
                from pycocotools.coco import COCO  # noqa
                from pycocotools.cocoeval import COCOeval  # noqa

                anno = COCO(str(anno_json))  # init annotations api
                pred = anno.loadRes(str(pred_json))  # init predictions api (must pass string, not Path)
                vals = [COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]
            else:
                from lvis import LVIS, LVISEval

                anno = LVIS(str(anno_json))
                pred = anno._load_json(str(pred_json))
                vals = [LVISEval(anno, pred, "bbox"), LVISEval(anno, pred, "segm")]

            for i, eval in enumerate(vals):
                eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files]  # im to eval
                eval.evaluate()
                eval.accumulate()
                eval.summarize()
                if self.is_lvis:
                    eval.print_results()
                idx = i * 4 + 2
                # update mAP50-95 and mAP50
                stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = (
                    eval.stats[:2] if self.is_coco else [eval.results["AP"], eval.results["AP50"]]
                )
                if self.is_lvis:
                    tag = "B" if i == 0 else "M"
                    stats[f"metrics/APr({tag})"] = eval.results["APr"]
                    stats[f"metrics/APc({tag})"] = eval.results["APc"]
                    stats[f"metrics/APf({tag})"] = eval.results["APf"]

            if self.is_lvis:
                stats["fitness"] = stats["metrics/mAP50-95(B)"]

        except Exception as e:
            LOGGER.warning(f"{pkg} unable to run: {e}")
    return stats

get_desc

get_desc() -> str

Return a formatted description of evaluation metrics.

Source code in ultralytics/models/yolo/segment/val.py
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def get_desc(self) -> str:
    """Return a formatted description of evaluation metrics."""
    return ("%22s" + "%11s" * 10) % (
        "Class",
        "Images",
        "Instances",
        "Box(P",
        "R",
        "mAP50",
        "mAP50-95)",
        "Mask(P",
        "R",
        "mAP50",
        "mAP50-95)",
    )

init_metrics

init_metrics(model: Module) -> None

Initialize metrics and select mask processing function based on save_json flag.

Parameters:

Name Type Description Default
model Module

Model to validate.

required
Source code in ultralytics/models/yolo/segment/val.py
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def init_metrics(self, model: torch.nn.Module) -> None:
    """
    Initialize metrics and select mask processing function based on save_json flag.

    Args:
        model (torch.nn.Module): Model to validate.
    """
    super().init_metrics(model)
    self.plot_masks = []
    if self.args.save_json:
        check_requirements("pycocotools>=2.0.6")
    # More accurate vs faster
    self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
    self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])

plot_predictions

plot_predictions(batch: Dict[str, Any], preds: List[Tensor], ni: int) -> None

Plot batch predictions with masks and bounding boxes.

Parameters:

Name Type Description Default
batch Dict[str, Any]

Batch containing images and annotations.

required
preds List[Tensor]

List of predictions from the model.

required
ni int

Batch index.

required
Source code in ultralytics/models/yolo/segment/val.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
def plot_predictions(self, batch: Dict[str, Any], preds: List[torch.Tensor], ni: int) -> None:
    """
    Plot batch predictions with masks and bounding boxes.

    Args:
        batch (Dict[str, Any]): Batch containing images and annotations.
        preds (List[torch.Tensor]): List of predictions from the model.
        ni (int): Batch index.
    """
    plot_images(
        batch["img"],
        *output_to_target(preds[0], max_det=50),  # not set to self.args.max_det due to slow plotting speed
        torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
        paths=batch["im_file"],
        fname=self.save_dir / f"val_batch{ni}_pred.jpg",
        names=self.names,
        on_plot=self.on_plot,
    )  # pred
    self.plot_masks.clear()

plot_val_samples

plot_val_samples(batch: Dict[str, Any], ni: int) -> None

Plot validation samples with bounding box labels and masks.

Parameters:

Name Type Description Default
batch Dict[str, Any]

Batch containing images and annotations.

required
ni int

Batch index.

required
Source code in ultralytics/models/yolo/segment/val.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def plot_val_samples(self, batch: Dict[str, Any], ni: int) -> None:
    """
    Plot validation samples with bounding box labels and masks.

    Args:
        batch (Dict[str, Any]): Batch containing images and annotations.
        ni (int): Batch index.
    """
    plot_images(
        batch["img"],
        batch["batch_idx"],
        batch["cls"].squeeze(-1),
        batch["bboxes"],
        masks=batch["masks"],
        paths=batch["im_file"],
        fname=self.save_dir / f"val_batch{ni}_labels.jpg",
        names=self.names,
        on_plot=self.on_plot,
    )

postprocess

postprocess(preds: List[Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]

Post-process YOLO predictions and return output detections with proto.

Parameters:

Name Type Description Default
preds List[Tensor]

Raw predictions from the model.

required

Returns:

Name Type Description
p List[Tensor]

Processed detection predictions.

proto Tensor

Prototype masks for segmentation.

Source code in ultralytics/models/yolo/segment/val.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def postprocess(self, preds: List[torch.Tensor]) -> Tuple[List[torch.Tensor], torch.Tensor]:
    """
    Post-process YOLO predictions and return output detections with proto.

    Args:
        preds (List[torch.Tensor]): Raw predictions from the model.

    Returns:
        p (List[torch.Tensor]): Processed detection predictions.
        proto (torch.Tensor): Prototype masks for segmentation.
    """
    p = super().postprocess(preds[0])
    proto = preds[1][-1] if len(preds[1]) == 3 else preds[1]  # second output is len 3 if pt, but only 1 if exported
    return p, proto

pred_to_json

pred_to_json(predn: Tensor, filename: str, pred_masks: Tensor) -> None

Save one JSON result for COCO evaluation.

Parameters:

Name Type Description Default
predn Tensor

Predictions in the format [x1, y1, x2, y2, conf, cls].

required
filename str

Image filename.

required
pred_masks ndarray

Predicted masks.

required

Examples:

>>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
Source code in ultralytics/models/yolo/segment/val.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
def pred_to_json(self, predn: torch.Tensor, filename: str, pred_masks: torch.Tensor) -> None:
    """
    Save one JSON result for COCO evaluation.

    Args:
        predn (torch.Tensor): Predictions in the format [x1, y1, x2, y2, conf, cls].
        filename (str): Image filename.
        pred_masks (numpy.ndarray): Predicted masks.

    Examples:
         >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
    """
    from pycocotools.mask import encode  # noqa

    def single_encode(x):
        """Encode predicted masks as RLE and append results to jdict."""
        rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
        rle["counts"] = rle["counts"].decode("utf-8")
        return rle

    stem = Path(filename).stem
    image_id = int(stem) if stem.isnumeric() else stem
    box = ops.xyxy2xywh(predn[:, :4])  # xywh
    box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
    pred_masks = np.transpose(pred_masks, (2, 0, 1))
    with ThreadPool(NUM_THREADS) as pool:
        rles = pool.map(single_encode, pred_masks)
    for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())):
        self.jdict.append(
            {
                "image_id": image_id,
                "category_id": self.class_map[int(p[5])],
                "bbox": [round(x, 3) for x in b],
                "score": round(p[4], 5),
                "segmentation": rles[i],
            }
        )

preprocess

preprocess(batch: Dict[str, Any]) -> Dict[str, Any]

Preprocess batch of images for YOLO segmentation validation.

Parameters:

Name Type Description Default
batch Dict[str, Any]

Batch containing images and annotations.

required

Returns:

Type Description
Dict[str, Any]

Preprocessed batch.

Source code in ultralytics/models/yolo/segment/val.py
55
56
57
58
59
60
61
62
63
64
65
66
67
def preprocess(self, batch: Dict[str, Any]) -> Dict[str, Any]:
    """
    Preprocess batch of images for YOLO segmentation validation.

    Args:
        batch (Dict[str, Any]): Batch containing images and annotations.

    Returns:
        (Dict[str, Any]): Preprocessed batch.
    """
    batch = super().preprocess(batch)
    batch["masks"] = batch["masks"].to(self.device).float()
    return batch

save_one_txt

save_one_txt(
    predn: Tensor,
    pred_masks: Tensor,
    save_conf: bool,
    shape: Tuple[int, int],
    file: Path,
) -> None

Save YOLO detections to a txt file in normalized coordinates in a specific format.

Parameters:

Name Type Description Default
predn Tensor

Predictions in the format (x1, y1, x2, y2, conf, class).

required
pred_masks Tensor

Predicted masks.

required
save_conf bool

Whether to save confidence scores.

required
shape Tuple[int, int]

Shape of the original image.

required
file Path

File path to save the detections.

required
Source code in ultralytics/models/yolo/segment/val.py
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
def save_one_txt(
    self, predn: torch.Tensor, pred_masks: torch.Tensor, save_conf: bool, shape: Tuple[int, int], file: Path
) -> None:
    """
    Save YOLO detections to a txt file in normalized coordinates in a specific format.

    Args:
        predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
        pred_masks (torch.Tensor): Predicted masks.
        save_conf (bool): Whether to save confidence scores.
        shape (Tuple[int, int]): Shape of the original image.
        file (Path): File path to save the detections.
    """
    from ultralytics.engine.results import Results

    Results(
        np.zeros((shape[0], shape[1]), dtype=np.uint8),
        path=None,
        names=self.names,
        boxes=predn[:, :6],
        masks=pred_masks,
    ).save_txt(file, save_conf=save_conf)

update_metrics

update_metrics(
    preds: Tuple[List[Tensor], Tensor], batch: Dict[str, Any]
) -> None

Update metrics with the current batch predictions and targets.

Parameters:

Name Type Description Default
preds Tuple[List[Tensor], Tensor]

List of predictions from the model.

required
batch Dict[str, Any]

Batch data containing ground truth.

required
Source code in ultralytics/models/yolo/segment/val.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def update_metrics(self, preds: Tuple[List[torch.Tensor], torch.Tensor], batch: Dict[str, Any]) -> None:
    """
    Update metrics with the current batch predictions and targets.

    Args:
        preds (Tuple[List[torch.Tensor], torch.Tensor]): List of predictions from the model.
        batch (Dict[str, Any]): Batch data containing ground truth.
    """
    for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
        self.seen += 1
        npr = len(pred)
        stat = dict(
            conf=torch.zeros(0, device=self.device),
            pred_cls=torch.zeros(0, device=self.device),
            tp=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device),
            tp_m=torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device),
        )
        pbatch = self._prepare_batch(si, batch)
        cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
        nl = len(cls)
        stat["target_cls"] = cls
        stat["target_img"] = cls.unique()
        if npr == 0:
            if nl:
                for k in self.stats.keys():
                    self.stats[k].append(stat[k])
                if self.args.plots:
                    self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
            continue

        # Masks
        gt_masks = pbatch.pop("masks")
        # Predictions
        if self.args.single_cls:
            pred[:, 5] = 0
        predn, pred_masks = self._prepare_pred(pred, pbatch, proto)
        stat["conf"] = predn[:, 4]
        stat["pred_cls"] = predn[:, 5]

        # Evaluate
        if nl:
            stat["tp"] = self._process_batch(predn, bbox, cls)
            stat["tp_m"] = self._process_batch(
                predn, bbox, cls, pred_masks, gt_masks, self.args.overlap_mask, masks=True
            )
        if self.args.plots:
            self.confusion_matrix.process_batch(predn, bbox, cls)

        for k in self.stats.keys():
            self.stats[k].append(stat[k])

        pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
        if self.args.plots and self.batch_i < 3:
            self.plot_masks.append(pred_masks[:50].cpu())  # Limit plotted items for speed
            if pred_masks.shape[0] > 50:
                LOGGER.warning("Limiting validation plots to first 50 items per image for speed...")

        # Save
        if self.args.save_json:
            self.pred_to_json(
                predn,
                batch["im_file"][si],
                ops.scale_image(
                    pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
                    pbatch["ori_shape"],
                    ratio_pad=batch["ratio_pad"][si],
                ),
            )
        if self.args.save_txt:
            self.save_one_txt(
                predn,
                pred_masks,
                self.args.save_conf,
                pbatch["ori_shape"],
                self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
            )





📅 Created 1 year ago ✏️ Updated 8 months ago