Skip to content

Reference for ultralytics/utils/callbacks/tensorboard.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/tensorboard.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.utils.callbacks.tensorboard._log_scalars

_log_scalars(scalars: dict, step: int = 0) -> None

Log scalar values to TensorBoard.

Parameters:

Name Type Description Default
scalars dict

Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the corresponding scalar values.

required
step int

Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.

0

Examples:

>>> # Log training metrics
>>> metrics = {"loss": 0.5, "accuracy": 0.95}
>>> _log_scalars(metrics, step=100)
Source code in ultralytics/utils/callbacks/tensorboard.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
def _log_scalars(scalars: dict, step: int = 0) -> None:
    """
    Log scalar values to TensorBoard.

    Args:
        scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
            corresponding scalar values.
        step (int): Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.

    Examples:
        >>> # Log training metrics
        >>> metrics = {"loss": 0.5, "accuracy": 0.95}
        >>> _log_scalars(metrics, step=100)
    """
    if WRITER:
        for k, v in scalars.items():
            WRITER.add_scalar(k, v, step)





ultralytics.utils.callbacks.tensorboard._log_tensorboard_graph

_log_tensorboard_graph(trainer) -> None

Log model graph to TensorBoard.

This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex approach for models like RTDETR that may require special handling.

Parameters:

Name Type Description Default
trainer BaseTrainer

The trainer object containing the model to visualize. Must have attributes: - model: PyTorch model to visualize - args: Configuration arguments with 'imgsz' attribute

required
Notes

This function requires TensorBoard integration to be enabled and the global WRITER to be initialized. It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different model architectures.

Source code in ultralytics/utils/callbacks/tensorboard.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def _log_tensorboard_graph(trainer) -> None:
    """
    Log model graph to TensorBoard.

    This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
    tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
    approach for models like RTDETR that may require special handling.

    Args:
        trainer (BaseTrainer): The trainer object containing the model to visualize. Must have attributes:
            - model: PyTorch model to visualize
            - args: Configuration arguments with 'imgsz' attribute

    Notes:
        This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
        It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different
        model architectures.
    """
    # Input image
    imgsz = trainer.args.imgsz
    imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
    p = next(trainer.model.parameters())  # for device, type
    im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype)  # input image (must be zeros, not empty)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)  # suppress jit trace warning
        warnings.simplefilter("ignore", category=torch.jit.TracerWarning)  # suppress jit trace warning

        # Try simple method first (YOLO)
        try:
            trainer.model.eval()  # place in .eval() mode to avoid BatchNorm statistics changes
            WRITER.add_graph(torch.jit.trace(torch_utils.de_parallel(trainer.model), im, strict=False), [])
            LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            return

        except Exception:
            # Fallback to TorchScript export steps (RTDETR)
            try:
                model = deepcopy(torch_utils.de_parallel(trainer.model))
                model.eval()
                model = model.fuse(verbose=False)
                for m in model.modules():
                    if hasattr(m, "export"):  # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
                        m.export = True
                        m.format = "torchscript"
                model(im)  # dry run
                WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
                LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            except Exception as e:
                LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure {e}")





ultralytics.utils.callbacks.tensorboard.on_pretrain_routine_start

on_pretrain_routine_start(trainer) -> None

Initialize TensorBoard logging with SummaryWriter.

Source code in ultralytics/utils/callbacks/tensorboard.py
 95
 96
 97
 98
 99
100
101
102
103
def on_pretrain_routine_start(trainer) -> None:
    """Initialize TensorBoard logging with SummaryWriter."""
    if SummaryWriter:
        try:
            global WRITER
            WRITER = SummaryWriter(str(trainer.save_dir))
            LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
        except Exception as e:
            LOGGER.warning(f"{PREFIX}TensorBoard not initialized correctly, not logging this run. {e}")





ultralytics.utils.callbacks.tensorboard.on_train_start

on_train_start(trainer) -> None

Log TensorBoard graph.

Source code in ultralytics/utils/callbacks/tensorboard.py
106
107
108
109
def on_train_start(trainer) -> None:
    """Log TensorBoard graph."""
    if WRITER:
        _log_tensorboard_graph(trainer)





ultralytics.utils.callbacks.tensorboard.on_train_epoch_end

on_train_epoch_end(trainer) -> None

Logs scalar statistics at the end of a training epoch.

Source code in ultralytics/utils/callbacks/tensorboard.py
112
113
114
115
def on_train_epoch_end(trainer) -> None:
    """Logs scalar statistics at the end of a training epoch."""
    _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
    _log_scalars(trainer.lr, trainer.epoch + 1)





ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end

on_fit_epoch_end(trainer) -> None

Logs epoch metrics at end of training epoch.

Source code in ultralytics/utils/callbacks/tensorboard.py
118
119
120
def on_fit_epoch_end(trainer) -> None:
    """Logs epoch metrics at end of training epoch."""
    _log_scalars(trainer.metrics, trainer.epoch + 1)





📅 Created 1 year ago ✏️ Updated 8 months ago