Skip to content

Reference for ultralytics/utils/callbacks/comet.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/comet.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.utils.callbacks.comet._get_comet_mode

_get_comet_mode() -> str

Returns the mode of comet set in the environment variables, defaults to 'online' if not set.

Source code in ultralytics/utils/callbacks/comet.py
39
40
41
42
43
44
45
46
47
48
49
50
51
def _get_comet_mode() -> str:
    """Returns the mode of comet set in the environment variables, defaults to 'online' if not set."""
    comet_mode = os.getenv("COMET_MODE")
    if comet_mode is not None:
        LOGGER.warning(
            "The COMET_MODE environment variable is deprecated. "
            "Please use COMET_START_ONLINE to set the Comet experiment mode. "
            "To start an offline Comet experiment, use 'export COMET_START_ONLINE=0'. "
            "If COMET_START_ONLINE is not set or is set to '1', an online Comet experiment will be created."
        )
        return comet_mode

    return "online"





ultralytics.utils.callbacks.comet._get_comet_model_name

_get_comet_model_name() -> str

Returns the model name for Comet from the environment variable COMET_MODEL_NAME or defaults to 'Ultralytics'.

Source code in ultralytics/utils/callbacks/comet.py
54
55
56
def _get_comet_model_name() -> str:
    """Returns the model name for Comet from the environment variable COMET_MODEL_NAME or defaults to 'Ultralytics'."""
    return os.getenv("COMET_MODEL_NAME", "Ultralytics")





ultralytics.utils.callbacks.comet._get_eval_batch_logging_interval

_get_eval_batch_logging_interval() -> int

Get the evaluation batch logging interval from environment variable or use default value 1.

Source code in ultralytics/utils/callbacks/comet.py
59
60
61
def _get_eval_batch_logging_interval() -> int:
    """Get the evaluation batch logging interval from environment variable or use default value 1."""
    return int(os.getenv("COMET_EVAL_BATCH_LOGGING_INTERVAL", 1))





ultralytics.utils.callbacks.comet._get_max_image_predictions_to_log

_get_max_image_predictions_to_log() -> int

Get the maximum number of image predictions to log from the environment variables.

Source code in ultralytics/utils/callbacks/comet.py
64
65
66
def _get_max_image_predictions_to_log() -> int:
    """Get the maximum number of image predictions to log from the environment variables."""
    return int(os.getenv("COMET_MAX_IMAGE_PREDICTIONS", 100))





ultralytics.utils.callbacks.comet._scale_confidence_score

_scale_confidence_score(score: float) -> float

Scales the given confidence score by a factor specified in an environment variable.

Source code in ultralytics/utils/callbacks/comet.py
69
70
71
72
def _scale_confidence_score(score: float) -> float:
    """Scales the given confidence score by a factor specified in an environment variable."""
    scale = float(os.getenv("COMET_MAX_CONFIDENCE_SCORE", 100.0))
    return score * scale





ultralytics.utils.callbacks.comet._should_log_confusion_matrix

_should_log_confusion_matrix() -> bool

Determines if the confusion matrix should be logged based on the environment variable settings.

Source code in ultralytics/utils/callbacks/comet.py
75
76
77
def _should_log_confusion_matrix() -> bool:
    """Determines if the confusion matrix should be logged based on the environment variable settings."""
    return os.getenv("COMET_EVAL_LOG_CONFUSION_MATRIX", "false").lower() == "true"





ultralytics.utils.callbacks.comet._should_log_image_predictions

_should_log_image_predictions() -> bool

Determines whether to log image predictions based on a specified environment variable.

Source code in ultralytics/utils/callbacks/comet.py
80
81
82
def _should_log_image_predictions() -> bool:
    """Determines whether to log image predictions based on a specified environment variable."""
    return os.getenv("COMET_EVAL_LOG_IMAGE_PREDICTIONS", "true").lower() == "true"





ultralytics.utils.callbacks.comet._resume_or_create_experiment

_resume_or_create_experiment(args: SimpleNamespace) -> None

Resumes CometML experiment or creates a new experiment based on args.

Ensures that the experiment object is only created in a single process during distributed training.

Source code in ultralytics/utils/callbacks/comet.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def _resume_or_create_experiment(args: SimpleNamespace) -> None:
    """
    Resumes CometML experiment or creates a new experiment based on args.

    Ensures that the experiment object is only created in a single process during distributed training.
    """
    if RANK not in {-1, 0}:
        return

    # Set environment variable (if not set by the user) to configure the Comet experiment's online mode under the hood.
    # IF COMET_START_ONLINE is set by the user it will override COMET_MODE value.
    if os.getenv("COMET_START_ONLINE") is None:
        comet_mode = _get_comet_mode()
        os.environ["COMET_START_ONLINE"] = "1" if comet_mode != "offline" else "0"

    try:
        _project_name = os.getenv("COMET_PROJECT_NAME", args.project)
        experiment = comet_ml.start(project_name=_project_name)
        experiment.log_parameters(vars(args))
        experiment.log_others(
            {
                "eval_batch_logging_interval": _get_eval_batch_logging_interval(),
                "log_confusion_matrix_on_eval": _should_log_confusion_matrix(),
                "log_image_predictions": _should_log_image_predictions(),
                "max_image_predictions": _get_max_image_predictions_to_log(),
            }
        )
        experiment.log_other("Created from", "ultralytics")

    except Exception as e:
        LOGGER.warning(f"Comet installed but not initialized correctly, not logging this run. {e}")





ultralytics.utils.callbacks.comet._fetch_trainer_metadata

_fetch_trainer_metadata(trainer) -> dict

Returns metadata for YOLO training including epoch and asset saving status.

Source code in ultralytics/utils/callbacks/comet.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def _fetch_trainer_metadata(trainer) -> dict:
    """Returns metadata for YOLO training including epoch and asset saving status."""
    curr_epoch = trainer.epoch + 1

    train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size
    curr_step = curr_epoch * train_num_steps_per_epoch
    final_epoch = curr_epoch == trainer.epochs

    save = trainer.args.save
    save_period = trainer.args.save_period
    save_interval = curr_epoch % save_period == 0
    save_assets = save and save_period > 0 and save_interval and not final_epoch

    return dict(curr_epoch=curr_epoch, curr_step=curr_step, save_assets=save_assets, final_epoch=final_epoch)





ultralytics.utils.callbacks.comet._scale_bounding_box_to_original_image_shape

_scale_bounding_box_to_original_image_shape(
    box, resized_image_shape, original_image_shape, ratio_pad
) -> List[float]

YOLO resizes images during training and the label values are normalized based on this resized shape.

This function rescales the bounding box labels to the original image shape.

Source code in ultralytics/utils/callbacks/comet.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def _scale_bounding_box_to_original_image_shape(
    box, resized_image_shape, original_image_shape, ratio_pad
) -> List[float]:
    """
    YOLO resizes images during training and the label values are normalized based on this resized shape.

    This function rescales the bounding box labels to the original image shape.
    """
    resized_image_height, resized_image_width = resized_image_shape

    # Convert normalized xywh format predictions to xyxy in resized scale format
    box = ops.xywhn2xyxy(box, h=resized_image_height, w=resized_image_width)
    # Scale box predictions from resized image scale back to original image scale
    box = ops.scale_boxes(resized_image_shape, box, original_image_shape, ratio_pad)
    # Convert bounding box format from xyxy to xywh for Comet logging
    box = ops.xyxy2xywh(box)
    # Adjust xy center to correspond top-left corner
    box[:2] -= box[2:] / 2
    box = box.tolist()

    return box





ultralytics.utils.callbacks.comet._format_ground_truth_annotations_for_detection

_format_ground_truth_annotations_for_detection(
    img_idx, image_path, batch, class_name_map=None
) -> Optional[dict]

Format ground truth annotations for object detection.

This function processes ground truth annotations from a batch of images for object detection tasks. It extracts bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for visualization or evaluation.

Parameters:

Name Type Description Default
img_idx int

Index of the image in the batch to process.

required
image_path str | Path

Path to the image file.

required
batch dict

Batch dictionary containing detection data with keys: - 'batch_idx': Tensor of batch indices - 'bboxes': Tensor of bounding boxes in normalized xywh format - 'cls': Tensor of class labels - 'ori_shape': Original image shapes - 'resized_shape': Resized image shapes - 'ratio_pad': Ratio and padding information

required
class_name_map dict | None

Mapping from class indices to class names.

None

Returns:

Type Description
dict | None

Formatted ground truth annotations with the following structure: - 'boxes': List of box coordinates [x, y, width, height] - 'label': Label string with format "gt_{class_name}" - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score) Returns None if no bounding boxes are found for the image.

Source code in ultralytics/utils/callbacks/comet.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None) -> Optional[dict]:
    """
    Format ground truth annotations for object detection.

    This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
    bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
    visualization or evaluation.

    Args:
        img_idx (int): Index of the image in the batch to process.
        image_path (str | Path): Path to the image file.
        batch (dict): Batch dictionary containing detection data with keys:
            - 'batch_idx': Tensor of batch indices
            - 'bboxes': Tensor of bounding boxes in normalized xywh format
            - 'cls': Tensor of class labels
            - 'ori_shape': Original image shapes
            - 'resized_shape': Resized image shapes
            - 'ratio_pad': Ratio and padding information
        class_name_map (dict | None, optional): Mapping from class indices to class names.

    Returns:
        (dict | None): Formatted ground truth annotations with the following structure:
            - 'boxes': List of box coordinates [x, y, width, height]
            - 'label': Label string with format "gt_{class_name}"
            - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
            Returns None if no bounding boxes are found for the image.
    """
    indices = batch["batch_idx"] == img_idx
    bboxes = batch["bboxes"][indices]
    if len(bboxes) == 0:
        LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes labels")
        return None

    cls_labels = batch["cls"][indices].squeeze(1).tolist()
    if class_name_map:
        cls_labels = [str(class_name_map[label]) for label in cls_labels]

    original_image_shape = batch["ori_shape"][img_idx]
    resized_image_shape = batch["resized_shape"][img_idx]
    ratio_pad = batch["ratio_pad"][img_idx]

    data = []
    for box, label in zip(bboxes, cls_labels):
        box = _scale_bounding_box_to_original_image_shape(box, resized_image_shape, original_image_shape, ratio_pad)
        data.append(
            {
                "boxes": [box],
                "label": f"gt_{label}",
                "score": _scale_confidence_score(1.0),
            }
        )

    return {"name": "ground_truth", "data": data}





ultralytics.utils.callbacks.comet._format_prediction_annotations

_format_prediction_annotations(
    image_path, metadata, class_label_map=None, class_map=None
) -> Optional[dict]

Format YOLO predictions for object detection visualization.

Source code in ultralytics/utils/callbacks/comet.py
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def _format_prediction_annotations(image_path, metadata, class_label_map=None, class_map=None) -> Optional[dict]:
    """Format YOLO predictions for object detection visualization."""
    stem = image_path.stem
    image_id = int(stem) if stem.isnumeric() else stem

    predictions = metadata.get(image_id)
    if not predictions:
        LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes predictions")
        return None

    # apply the mapping that was used to map the predicted classes when the JSON was created
    if class_label_map and class_map:
        class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
    try:
        # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
        from pycocotools.mask import decode  # noqa
    except ImportError:
        decode = None

    data = []
    for prediction in predictions:
        boxes = prediction["bbox"]
        score = _scale_confidence_score(prediction["score"])
        cls_label = prediction["category_id"]
        if class_label_map:
            cls_label = str(class_label_map[cls_label])

        annotation_data = {"boxes": [boxes], "label": cls_label, "score": score}

        if decode is not None:
            # do segmentation processing only if we are able to decode it
            segments = prediction.get("segmentation", None)
            if segments is not None:
                segments = _extract_segmentation_annotation(segments, decode)
            if segments is not None:
                annotation_data["points"] = segments

        data.append(annotation_data)

    return {"name": "prediction", "data": data}





ultralytics.utils.callbacks.comet._extract_segmentation_annotation

_extract_segmentation_annotation(
    segmentation_raw: str, decode: Callable
) -> Optional[List[List[Any]]]

Extracts segmentation annotation from compressed segmentations as list of polygons.

Parameters:

Name Type Description Default
segmentation_raw str

Raw segmentation data in compressed format.

required
decode Callable

Function to decode the compressed segmentation data.

required

Returns:

Type Description
Optional[List[List[Any]]]

List of polygon points or None if extraction fails.

Source code in ultralytics/utils/callbacks/comet.py
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) -> Optional[List[List[Any]]]:
    """
    Extracts segmentation annotation from compressed segmentations as list of polygons.

    Args:
        segmentation_raw: Raw segmentation data in compressed format.
        decode: Function to decode the compressed segmentation data.

    Returns:
        (Optional[List[List[Any]]]): List of polygon points or None if extraction fails.
    """
    try:
        mask = decode(segmentation_raw)
        contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        annotations = [np.array(polygon).squeeze() for polygon in contours if len(polygon) >= 3]
        return [annotation.ravel().tolist() for annotation in annotations]
    except Exception as e:
        LOGGER.warning(f"Comet Failed to extract segmentation annotation: {e}")
    return None





ultralytics.utils.callbacks.comet._fetch_annotations

_fetch_annotations(
    img_idx,
    image_path,
    batch,
    prediction_metadata_map,
    class_label_map,
    class_map,
) -> Optional[List]

Join the ground truth and prediction annotations if they exist.

Source code in ultralytics/utils/callbacks/comet.py
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
def _fetch_annotations(
    img_idx, image_path, batch, prediction_metadata_map, class_label_map, class_map
) -> Optional[List]:
    """Join the ground truth and prediction annotations if they exist."""
    ground_truth_annotations = _format_ground_truth_annotations_for_detection(
        img_idx, image_path, batch, class_label_map
    )
    prediction_annotations = _format_prediction_annotations(
        image_path, prediction_metadata_map, class_label_map, class_map
    )

    annotations = [
        annotation for annotation in [ground_truth_annotations, prediction_annotations] if annotation is not None
    ]
    return [annotations] if annotations else None





ultralytics.utils.callbacks.comet._create_prediction_metadata_map

_create_prediction_metadata_map(model_predictions) -> dict

Create metadata map for model predictions by groupings them based on image ID.

Source code in ultralytics/utils/callbacks/comet.py
292
293
294
295
296
297
298
299
def _create_prediction_metadata_map(model_predictions) -> dict:
    """Create metadata map for model predictions by groupings them based on image ID."""
    pred_metadata_map = {}
    for prediction in model_predictions:
        pred_metadata_map.setdefault(prediction["image_id"], [])
        pred_metadata_map[prediction["image_id"]].append(prediction)

    return pred_metadata_map





ultralytics.utils.callbacks.comet._log_confusion_matrix

_log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None

Log the confusion matrix to Comet experiment.

Source code in ultralytics/utils/callbacks/comet.py
302
303
304
305
306
307
308
def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
    """Log the confusion matrix to Comet experiment."""
    conf_mat = trainer.validator.confusion_matrix.matrix
    names = list(trainer.data["names"].values()) + ["background"]
    experiment.log_confusion_matrix(
        matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
    )





ultralytics.utils.callbacks.comet._log_images

_log_images(experiment, image_paths, curr_step, annotations=None) -> None

Log images to the experiment with optional annotations.

This function logs images to a Comet ML experiment, optionally including annotation data for visualization such as bounding boxes or segmentation masks.

Parameters:

Name Type Description Default
experiment Experiment

The Comet ML experiment to log images to.

required
image_paths List[Path]

List of paths to images that will be logged.

required
curr_step int

Current training step/iteration for tracking in the experiment timeline.

required
annotations List[List[dict]]

Nested list of annotation dictionaries for each image. Each annotation contains visualization data like bounding boxes, labels, and confidence scores.

None

Returns:

Type Description
None

None

Source code in ultralytics/utils/callbacks/comet.py
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
def _log_images(experiment, image_paths, curr_step, annotations=None) -> None:
    """
    Log images to the experiment with optional annotations.

    This function logs images to a Comet ML experiment, optionally including annotation data for visualization
    such as bounding boxes or segmentation masks.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log images to.
        image_paths (List[Path]): List of paths to images that will be logged.
        curr_step (int): Current training step/iteration for tracking in the experiment timeline.
        annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
            contains visualization data like bounding boxes, labels, and confidence scores.

    Returns:
        None
    """
    if annotations:
        for image_path, annotation in zip(image_paths, annotations):
            experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation)

    else:
        for image_path in image_paths:
            experiment.log_image(image_path, name=image_path.stem, step=curr_step)





ultralytics.utils.callbacks.comet._log_image_predictions

_log_image_predictions(experiment, validator, curr_step) -> None

Log predicted boxes for a single image during training.

This function logs image predictions to a Comet ML experiment during model validation. It processes validation data and formats both ground truth and prediction annotations for visualization in the Comet dashboard. The function respects configured limits on the number of images to log.

Parameters:

Name Type Description Default
experiment Experiment

The Comet ML experiment to log to.

required
validator BaseValidator

The validator instance containing validation data and predictions.

required
curr_step int

The current training step for logging timeline.

required
Notes

This function uses global state to track the number of logged predictions across calls. It only logs predictions for supported tasks defined in COMET_SUPPORTED_TASKS. The number of logged images is limited by the COMET_MAX_IMAGE_PREDICTIONS environment variable.

Source code in ultralytics/utils/callbacks/comet.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
def _log_image_predictions(experiment, validator, curr_step) -> None:
    """
    Log predicted boxes for a single image during training.

    This function logs image predictions to a Comet ML experiment during model validation. It processes
    validation data and formats both ground truth and prediction annotations for visualization in the Comet
    dashboard. The function respects configured limits on the number of images to log.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log to.
        validator (BaseValidator): The validator instance containing validation data and predictions.
        curr_step (int): The current training step for logging timeline.

    Notes:
        This function uses global state to track the number of logged predictions across calls.
        It only logs predictions for supported tasks defined in COMET_SUPPORTED_TASKS.
        The number of logged images is limited by the COMET_MAX_IMAGE_PREDICTIONS environment variable.
    """
    global _comet_image_prediction_count

    task = validator.args.task
    if task not in COMET_SUPPORTED_TASKS:
        return

    jdict = validator.jdict
    if not jdict:
        return

    predictions_metadata_map = _create_prediction_metadata_map(jdict)
    dataloader = validator.dataloader
    class_label_map = validator.names
    class_map = getattr(validator, "class_map", None)

    batch_logging_interval = _get_eval_batch_logging_interval()
    max_image_predictions = _get_max_image_predictions_to_log()

    for batch_idx, batch in enumerate(dataloader):
        if (batch_idx + 1) % batch_logging_interval != 0:
            continue

        image_paths = batch["im_file"]
        for img_idx, image_path in enumerate(image_paths):
            if _comet_image_prediction_count >= max_image_predictions:
                return

            image_path = Path(image_path)
            annotations = _fetch_annotations(
                img_idx,
                image_path,
                batch,
                predictions_metadata_map,
                class_label_map,
                class_map=class_map,
            )
            _log_images(
                experiment,
                [image_path],
                curr_step,
                annotations=annotations,
            )
            _comet_image_prediction_count += 1





ultralytics.utils.callbacks.comet._log_plots

_log_plots(experiment, trainer) -> None

Log evaluation plots and label plots for the experiment.

This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots for each type.

Parameters:

Name Type Description Default
experiment Experiment

The Comet ML experiment to log plots to.

required
trainer BaseTrainer

The trainer object containing validation metrics and save directory information.

required

Examples:

>>> from ultralytics.utils.callbacks.comet import _log_plots
>>> _log_plots(experiment, trainer)
Source code in ultralytics/utils/callbacks/comet.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
def _log_plots(experiment, trainer) -> None:
    """
    Log evaluation plots and label plots for the experiment.

    This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
    different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
    for each type.

    Args:
        experiment (comet_ml.Experiment): The Comet ML experiment to log plots to.
        trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing validation metrics and save
            directory information.

    Examples:
        >>> from ultralytics.utils.callbacks.comet import _log_plots
        >>> _log_plots(experiment, trainer)
    """
    plot_filenames = None
    if isinstance(trainer.validator.metrics, SegmentMetrics) and trainer.validator.metrics.task == "segment":
        plot_filenames = [
            trainer.save_dir / f"{prefix}{plots}.png"
            for plots in EVALUATION_PLOT_NAMES
            for prefix in SEGMENT_METRICS_PLOT_PREFIX
        ]
    elif isinstance(trainer.validator.metrics, PoseMetrics):
        plot_filenames = [
            trainer.save_dir / f"{prefix}{plots}.png"
            for plots in EVALUATION_PLOT_NAMES
            for prefix in POSE_METRICS_PLOT_PREFIX
        ]
    elif isinstance(trainer.validator.metrics, (DetMetrics, OBBMetrics)):
        plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES]

    if plot_filenames is not None:
        _log_images(experiment, plot_filenames, None)

    confusion_matrix_filenames = [trainer.save_dir / f"{plots}.png" for plots in CONFUSION_MATRIX_PLOT_NAMES]
    _log_images(experiment, confusion_matrix_filenames, None)

    if not isinstance(trainer.validator.metrics, ClassifyMetrics):
        label_plot_filenames = [trainer.save_dir / f"{labels}.jpg" for labels in LABEL_PLOT_NAMES]
        _log_images(experiment, label_plot_filenames, None)





ultralytics.utils.callbacks.comet._log_model

_log_model(experiment, trainer) -> None

Log the best-trained model to Comet.ml.

Source code in ultralytics/utils/callbacks/comet.py
444
445
446
447
def _log_model(experiment, trainer) -> None:
    """Log the best-trained model to Comet.ml."""
    model_name = _get_comet_model_name()
    experiment.log_model(model_name, file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True)





ultralytics.utils.callbacks.comet._log_image_batches

_log_image_batches(experiment, trainer, curr_step: int) -> None

Log samples of images batches for train, validation, and test.

Source code in ultralytics/utils/callbacks/comet.py
450
451
452
453
def _log_image_batches(experiment, trainer, curr_step: int) -> None:
    """Log samples of images batches for train, validation, and test."""
    _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step)
    _log_images(experiment, trainer.save_dir.glob("val_batch*.jpg"), curr_step)





ultralytics.utils.callbacks.comet.on_pretrain_routine_start

on_pretrain_routine_start(trainer) -> None

Creates or resumes a CometML experiment at the start of a YOLO pre-training routine.

Source code in ultralytics/utils/callbacks/comet.py
456
457
458
def on_pretrain_routine_start(trainer) -> None:
    """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine."""
    _resume_or_create_experiment(trainer.args)





ultralytics.utils.callbacks.comet.on_train_epoch_end

on_train_epoch_end(trainer) -> None

Log metrics and save batch images at the end of training epochs.

Source code in ultralytics/utils/callbacks/comet.py
461
462
463
464
465
466
467
468
469
470
471
def on_train_epoch_end(trainer) -> None:
    """Log metrics and save batch images at the end of training epochs."""
    experiment = comet_ml.get_running_experiment()
    if not experiment:
        return

    metadata = _fetch_trainer_metadata(trainer)
    curr_epoch = metadata["curr_epoch"]
    curr_step = metadata["curr_step"]

    experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=curr_step, epoch=curr_epoch)





ultralytics.utils.callbacks.comet.on_fit_epoch_end

on_fit_epoch_end(trainer) -> None

Log model assets at the end of each epoch during training.

This function is called at the end of each training epoch to log metrics, learning rates, and model information to a Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on configuration settings.

The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch, it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled), and image predictions (if enabled).

Parameters:

Name Type Description Default
trainer BaseTrainer

The YOLO trainer object containing training state, metrics, and configuration.

required

Examples:

>>> # Inside a training loop
>>> on_fit_epoch_end(trainer)  # Log metrics and assets to Comet ML
Source code in ultralytics/utils/callbacks/comet.py
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
def on_fit_epoch_end(trainer) -> None:
    """
    Log model assets at the end of each epoch during training.

    This function is called at the end of each training epoch to log metrics, learning rates, and model information
    to a Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on
    configuration settings.

    The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
    it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
    and image predictions (if enabled).

    Args:
        trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.

    Examples:
        >>> # Inside a training loop
        >>> on_fit_epoch_end(trainer)  # Log metrics and assets to Comet ML
    """
    experiment = comet_ml.get_running_experiment()
    if not experiment:
        return

    metadata = _fetch_trainer_metadata(trainer)
    curr_epoch = metadata["curr_epoch"]
    curr_step = metadata["curr_step"]
    save_assets = metadata["save_assets"]

    experiment.log_metrics(trainer.metrics, step=curr_step, epoch=curr_epoch)
    experiment.log_metrics(trainer.lr, step=curr_step, epoch=curr_epoch)
    if curr_epoch == 1:
        from ultralytics.utils.torch_utils import model_info_for_loggers

        experiment.log_metrics(model_info_for_loggers(trainer), step=curr_step, epoch=curr_epoch)

    if not save_assets:
        return

    _log_model(experiment, trainer)
    if _should_log_confusion_matrix():
        _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
    if _should_log_image_predictions():
        _log_image_predictions(experiment, trainer.validator, curr_step)





ultralytics.utils.callbacks.comet.on_train_end

on_train_end(trainer) -> None

Perform operations at the end of training.

Source code in ultralytics/utils/callbacks/comet.py
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
def on_train_end(trainer) -> None:
    """Perform operations at the end of training."""
    experiment = comet_ml.get_running_experiment()
    if not experiment:
        return

    metadata = _fetch_trainer_metadata(trainer)
    curr_epoch = metadata["curr_epoch"]
    curr_step = metadata["curr_step"]
    plots = trainer.args.plots

    _log_model(experiment, trainer)
    if plots:
        _log_plots(experiment, trainer)

    _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch)
    _log_image_predictions(experiment, trainer.validator, curr_step)
    _log_image_batches(experiment, trainer, curr_step)
    experiment.end()

    global _comet_image_prediction_count
    _comet_image_prediction_count = 0





📅 Created 1 year ago ✏️ Updated 2 months ago