Skip to content

Reference for ultralytics/utils/callbacks/tensorboard.py

Improvements

This page is sourced from https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/tensorboard.py. Have an improvement or example to add? Open a Pull Request — thank you! 🙏


function ultralytics.utils.callbacks.tensorboard._log_scalars

def _log_scalars(scalars: dict, step: int = 0) -> None

Log scalar values to TensorBoard.

Args

NameTypeDescriptionDefault
scalarsdictDictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the corresponding scalar values.required
stepintGlobal step value to record with the scalar values. Used for x-axis in TensorBoard graphs.0

Examples

Log training metrics
>>> metrics = {"loss": 0.5, "accuracy": 0.95}
>>> _log_scalars(metrics, step=100)
Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def _log_scalars(scalars: dict, step: int = 0) -> None:
    """Log scalar values to TensorBoard.

    Args:
        scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
            corresponding scalar values.
        step (int): Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.

    Examples:
        Log training metrics
        >>> metrics = {"loss": 0.5, "accuracy": 0.95}
        >>> _log_scalars(metrics, step=100)
    """
    if WRITER:
        for k, v in scalars.items():
            WRITER.add_scalar(k, v, step)





function ultralytics.utils.callbacks.tensorboard._log_tensorboard_graph

def _log_tensorboard_graph(trainer) -> None

Log model graph to TensorBoard.

This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex approach for models like RTDETR that may require special handling.

Args

NameTypeDescriptionDefault
trainerultralytics.engine.trainer.BaseTrainerThe trainer object containing the model to visualize. Must have attributes model and args with imgsz.required

Notes

This function requires TensorBoard integration to be enabled and the global WRITER to be initialized. It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different model architectures.

Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def _log_tensorboard_graph(trainer) -> None:
    """Log model graph to TensorBoard.

    This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
    tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
    approach for models like RTDETR that may require special handling.

    Args:
        trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing the model to visualize. Must
            have attributes model and args with imgsz.

    Notes:
        This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
        It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different
        model architectures.
    """
    # Input image
    imgsz = trainer.args.imgsz
    imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
    p = next(trainer.model.parameters())  # for device, type
    im = torch.zeros((1, 3, *imgsz), device=p.device, dtype=p.dtype)  # input image (must be zeros, not empty)

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)  # suppress jit trace warning
        warnings.simplefilter("ignore", category=torch.jit.TracerWarning)  # suppress jit trace warning

        # Try simple method first (YOLO)
        try:
            trainer.model.eval()  # place in .eval() mode to avoid BatchNorm statistics changes
            WRITER.add_graph(torch.jit.trace(torch_utils.unwrap_model(trainer.model), im, strict=False), [])
            LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            return

        except Exception:
            # Fallback to TorchScript export steps (RTDETR)
            try:
                model = deepcopy(torch_utils.unwrap_model(trainer.model))
                model.eval()
                model = model.fuse(verbose=False)
                for m in model.modules():
                    if hasattr(m, "export"):  # Detect, RTDETRDecoder (Segment and Pose use Detect base class)
                        m.export = True
                        m.format = "torchscript"
                model(im)  # dry run
                WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
                LOGGER.info(f"{PREFIX}model graph visualization added ✅")
            except Exception as e:
                LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure {e}")





function ultralytics.utils.callbacks.tensorboard.on_pretrain_routine_start

def on_pretrain_routine_start(trainer) -> None

Initialize TensorBoard logging with SummaryWriter.

Args

NameTypeDescriptionDefault
trainerrequired
Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def on_pretrain_routine_start(trainer) -> None:
    """Initialize TensorBoard logging with SummaryWriter."""
    if SummaryWriter:
        try:
            global WRITER
            WRITER = SummaryWriter(str(trainer.save_dir))
            LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
        except Exception as e:
            LOGGER.warning(f"{PREFIX}TensorBoard not initialized correctly, not logging this run. {e}")





function ultralytics.utils.callbacks.tensorboard.on_train_start

def on_train_start(trainer) -> None

Log TensorBoard graph.

Args

NameTypeDescriptionDefault
trainerrequired
Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def on_train_start(trainer) -> None:
    """Log TensorBoard graph."""
    if WRITER:
        _log_tensorboard_graph(trainer)





function ultralytics.utils.callbacks.tensorboard.on_train_epoch_end

def on_train_epoch_end(trainer) -> None

Log scalar statistics at the end of a training epoch.

Args

NameTypeDescriptionDefault
trainerrequired
Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def on_train_epoch_end(trainer) -> None:
    """Log scalar statistics at the end of a training epoch."""
    _log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
    _log_scalars(trainer.lr, trainer.epoch + 1)





function ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end

def on_fit_epoch_end(trainer) -> None

Log epoch metrics at end of training epoch.

Args

NameTypeDescriptionDefault
trainerrequired
Source code in ultralytics/utils/callbacks/tensorboard.pyView on GitHub
def on_fit_epoch_end(trainer) -> None:
    """Log epoch metrics at end of training epoch."""
    _log_scalars(trainer.metrics, trainer.epoch + 1)





📅 Created 2 years ago ✏️ Updated 2 days ago
glenn-jocherjk4eBurhan-Q