Skip to content

Reference for hub_sdk/base/server_clients.py

Note

This file is available at https://github.com/ultralytics/hub-sdk/blob/main/hub_sdk/base/server_clients.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


hub_sdk.base.server_clients.ModelUpload

ModelUpload(headers)

Bases: APIClient

Manages uploading and exporting model files and metrics to Ultralytics HUB and heartbeat updates.

This class handles the communication with Ultralytics HUB API for model-related operations including uploading model checkpoints, metrics, exporting models to different formats, and maintaining heartbeat connections to track model training status.

Attributes:

Name Type Description
name str

Identifier for the model upload instance.

alive bool

Flag indicating if the heartbeat thread should continue running.

agent_id str

Unique identifier for the agent sending heartbeats.

rate_limits Dict

Dictionary containing rate limits for different API operations in seconds.

Parameters:

Name Type Description Default
headers Dict

HTTP headers to use for API requests.

required
Source code in hub_sdk/base/server_clients.py
def __init__(self, headers):
    """
    Initialize ModelUpload with API client configuration.

    Args:
        headers (Dict): HTTP headers to use for API requests.
    """
    super().__init__(f"{HUB_API_ROOT}/v1/models", headers)
    self.name = "model"
    self.alive = True
    self.agent_id = None
    self.rate_limits = {"metrics": 3.0, "ckpt": 900.0, "heartbeat": 300.0}

export

export(id: str, format: str) -> Optional[Response]

Export a model to a specific format.

Parameters:

Name Type Description Default
id str

The unique identifier of the model to be exported.

required
format str

The format to export the model to.

required

Returns:

Type Description
Optional[Response]

Response object from the export request, or None if it fails.

Source code in hub_sdk/base/server_clients.py
def export(self, id: str, format: str) -> Optional[Response]:
    """
    Export a model to a specific format.

    Args:
        id (str): The unique identifier of the model to be exported.
        format (str): The format to export the model to.

    Returns:
        (Optional[Response]): Response object from the export request, or None if it fails.
    """
    try:
        payload = {"format": format}
        endpoint = f"/{id}/export"
        return self.post(endpoint, json=payload)
    except Exception as e:
        self.logger.error(f"Failed to export file for Model({id}): {e}")

predict

predict(id: str, image: str, config: Dict[str, Any]) -> Optional[Response]

Perform a prediction using the specified image and configuration.

Parameters:

Name Type Description Default
id str

Unique identifier for the model to use for prediction.

required
image str

Image path for prediction.

required
config Dict[str, Any]

Configuration parameters for the prediction.

required

Returns:

Type Description
Optional[Response]

Response object from the predict request, or None if upload fails.

Source code in hub_sdk/base/server_clients.py
def predict(self, id: str, image: str, config: Dict[str, Any]) -> Optional[Response]:
    """
    Perform a prediction using the specified image and configuration.

    Args:
        id (str): Unique identifier for the model to use for prediction.
        image (str): Image path for prediction.
        config (Dict[str, Any]): Configuration parameters for the prediction.

    Returns:
        (Optional[Response]): Response object from the predict request, or None if upload fails.
    """
    try:
        base_path = os.getcwd()
        image_path = os.path.join(base_path, image)

        if not os.path.isfile(image_path):
            raise FileNotFoundError(f"Image file not found: {image_path}")

        with open(image_path, "rb") as f:
            image_file = f.read()

        files = {"image": image_file}
        endpoint = f"{HUB_API_ROOT}/v1/predict/{id}"
        return self.post(endpoint, files=files, data=config)

    except Exception as e:
        self.logger.error(f"Failed to predict for Model({id}): {e}")

upload_metrics

upload_metrics(id: str, data: dict) -> Optional[Response]

Upload metrics data for a specific model.

Parameters:

Name Type Description Default
id str

The unique identifier of the model to which the metrics are being uploaded.

required
data dict

The metrics data to upload.

required

Returns:

Type Description
Optional[Response]

Response object from the upload_metrics request, or None if it fails.

Source code in hub_sdk/base/server_clients.py
def upload_metrics(self, id: str, data: dict) -> Optional[Response]:
    """
    Upload metrics data for a specific model.

    Args:
        id (str): The unique identifier of the model to which the metrics are being uploaded.
        data (dict): The metrics data to upload.

    Returns:
        (Optional[Response]): Response object from the upload_metrics request, or None if it fails.
    """
    try:
        payload = {"metrics": data, "type": "metrics"}
        endpoint = f"{HUB_API_ROOT}/v1/models/{id}"
        r = self.post(endpoint, json=payload)
        self.logger.debug("Model metrics uploaded.")
        return r
    except Exception as e:
        self.logger.error(f"Failed to upload metrics for Model({id}): {e}")

upload_model

upload_model(id, epoch, weights, is_best=False, map=0.0, final=False)

Upload a model checkpoint to Ultralytics HUB.

Parameters:

Name Type Description Default
id str

The unique identifier of the model.

required
epoch int

The current training epoch.

required
weights str

Path to the model weights file.

required
is_best bool

Indicates if the current model is the best one so far.

False
map float

Mean average precision of the model.

0.0
final bool

Indicates if the model is the final model after training.

False

Returns:

Type Description
Optional[Response]

Response object from the upload request, or None if it fails.

Source code in hub_sdk/base/server_clients.py
def upload_model(self, id, epoch, weights, is_best=False, map=0.0, final=False):
    """
    Upload a model checkpoint to Ultralytics HUB.

    Args:
        id (str): The unique identifier of the model.
        epoch (int): The current training epoch.
        weights (str): Path to the model weights file.
        is_best (bool): Indicates if the current model is the best one so far.
        map (float): Mean average precision of the model.
        final (bool): Indicates if the model is the final model after training.

    Returns:
        (Optional[Response]): Response object from the upload request, or None if it fails.
    """
    try:
        # Determine the correct file path
        weights_path = weights if os.path.isabs(weights) else os.path.join(os.getcwd(), weights)

        # Check if the file exists
        if not Path(weights_path).is_file():
            raise FileNotFoundError(f"File not found: {weights_path}")

        with open(weights_path, "rb") as f:
            file = f.read()

        # Prepare the endpoint and data
        endpoint = f"/{id}/upload"
        data = {"epoch": epoch, "type": "final" if final else "epoch"}
        files = {"best.pt": file} if final else {"last.pt": file}
        if final:
            data["map"] = map
        else:
            data["isBest"] = bool(is_best)

        # Perform the POST request
        response = self.post(endpoint, data=data, files=files, stream=True)

        # Log the appropriate message
        msg = "Model optimized weights uploaded." if final else "Model checkpoint weights uploaded."
        self.logger.debug(msg)
        return response
    except Exception as e:
        self.logger.error(f"Failed to upload file for {self.name}: {e}")





hub_sdk.base.server_clients.ProjectUpload

ProjectUpload(headers: dict)

Bases: APIClient

Handle project file uploads to Ultralytics HUB via API requests.

This class manages the uploading of project-related files to Ultralytics HUB, providing methods to handle image uploads for projects.

Attributes:

Name Type Description
name str

Identifier for the project upload instance.

Parameters:

Name Type Description Default
headers dict

The headers to use for API requests.

required
Source code in hub_sdk/base/server_clients.py
def __init__(self, headers: dict):
    """
    Initialize the class with the specified headers.

    Args:
        headers (dict): The headers to use for API requests.
    """
    super().__init__(f"{HUB_API_ROOT}/v1/projects", headers)
    self.name = "project"

upload_image

upload_image(id: str, file: str) -> Optional[Response]

Upload a project image to the hub.

Parameters:

Name Type Description Default
id str

The ID of the project to upload the image to.

required
file str

The path to the image file to upload.

required

Returns:

Type Description
Optional[Response]

Response object from the upload image request, or None if it fails.

Source code in hub_sdk/base/server_clients.py
def upload_image(self, id: str, file: str) -> Optional[Response]:
    """
    Upload a project image to the hub.

    Args:
        id (str): The ID of the project to upload the image to.
        file (str): The path to the image file to upload.

    Returns:
        (Optional[Response]): Response object from the upload image request, or None if it fails.
    """
    base_path = os.getcwd()
    file_path = os.path.join(base_path, file)
    file_name = os.path.basename(file_path)

    with open(file_path, "rb") as image_file:
        project_image = image_file.read()
    try:
        files = {"file": (file_name, project_image)}
        endpoint = f"/{id}/upload"
        r = self.post(endpoint, files=files)
        self.logger.debug("Project Image uploaded successfully.")
        return r
    except Exception as e:
        self.logger.error(f"Failed to upload image for {self.name}({id}): {str(e)}")





hub_sdk.base.server_clients.DatasetUpload

DatasetUpload(headers: dict)

Bases: APIClient

Manages uploading dataset files to Ultralytics HUB via API requests.

This class handles the uploading of dataset files to Ultralytics HUB, providing methods to manage dataset uploads.

Attributes:

Name Type Description
name str

Identifier for the dataset upload instance.

Parameters:

Name Type Description Default
headers dict

The headers to use for API requests.

required
Source code in hub_sdk/base/server_clients.py
def __init__(self, headers: dict):
    """
    Initialize the class with the specified headers.

    Args:
        headers (dict): The headers to use for API requests.
    """
    super().__init__(f"{HUB_API_ROOT}/v1/datasets", headers)
    self.name = "dataset"

upload_dataset

upload_dataset(id, file) -> Optional[Response]

Upload a dataset file to the hub.

Parameters:

Name Type Description Default
id str

The ID of the dataset to upload.

required
file str

The path to the dataset file to upload.

required

Returns:

Type Description
Optional[Response]

Response object from the upload dataset request, or None if it fails.

Source code in hub_sdk/base/server_clients.py
def upload_dataset(self, id, file) -> Optional[Response]:
    """
    Upload a dataset file to the hub.

    Args:
        id (str): The ID of the dataset to upload.
        file (str): The path to the dataset file to upload.

    Returns:
        (Optional[Response]): Response object from the upload dataset request, or None if it fails.
    """
    try:
        if Path(f"{file}").is_file():
            with open(file, "rb") as f:
                dataset_file = f.read()
            endpoint = f"/{id}/upload"
            filename = file.split("/")[-1]
            files = {filename: dataset_file}
            r = self.post(endpoint, files=files, stream=True)
            self.logger.debug("Dataset uploaded successfully.")
            return r
    except Exception as e:
        self.logger.error(f"Failed to upload dataset for {self.name}({id}): {str(e)}")





hub_sdk.base.server_clients.is_colab

is_colab()

Check if the current script is running inside a Google Colab notebook.

Returns:

Type Description
bool

True if running inside a Colab notebook, False otherwise.

Source code in hub_sdk/base/server_clients.py
def is_colab():
    """
    Check if the current script is running inside a Google Colab notebook.

    Returns:
        (bool): True if running inside a Colab notebook, False otherwise.
    """
    return "COLAB_RELEASE_TAG" in os.environ or "COLAB_BACKEND_VERSION" in os.environ



📅 Created 1 year ago ✏️ Updated 1 month ago