Reference for ultralytics/models/fastsam/model.py
Improvements
This page is sourced from https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/fastsam/model.py. Have an improvement or example to add? Open a Pull Request — thank you! 🙏
class ultralytics.models.fastsam.model.FastSAM
FastSAM(self, model: str | Path = "FastSAM-x.pt")
Bases: Model
FastSAM model interface for segment anything tasks.
This class extends the base Model class to provide specific functionality for the FastSAM (Fast Segment Anything Model) implementation, allowing for efficient and accurate image segmentation with optional prompting support.
Args
| Name | Type | Description | Default |
|---|---|---|---|
model | str | Path | "FastSAM-x.pt" |
Attributes
| Name | Type | Description |
|---|---|---|
model | str | Path to the pre-trained FastSAM model file. |
task | str | The task type, set to "segment" for FastSAM models. |
Methods
| Name | Description |
|---|---|
task_map | Returns a dictionary mapping segment task to corresponding predictor and validator classes. |
predict | Perform segmentation prediction on image or video source. |
Examples
Initialize FastSAM model and run prediction
>>> from ultralytics import FastSAM
>>> model = FastSAM("FastSAM-x.pt")
>>> results = model.predict("ultralytics/assets/bus.jpg")
Run prediction with bounding box prompts
>>> results = model.predict("image.jpg", bboxes=[[100, 100, 200, 200]])
Source code in ultralytics/models/fastsam/model.py
View on GitHubclass FastSAM(Model):
"""FastSAM model interface for segment anything tasks.
This class extends the base Model class to provide specific functionality for the FastSAM (Fast Segment Anything
Model) implementation, allowing for efficient and accurate image segmentation with optional prompting support.
Attributes:
model (str): Path to the pre-trained FastSAM model file.
task (str): The task type, set to "segment" for FastSAM models.
Methods:
predict: Perform segmentation prediction on image or video source with optional prompts.
task_map: Returns mapping of segment task to predictor and validator classes.
Examples:
Initialize FastSAM model and run prediction
>>> from ultralytics import FastSAM
>>> model = FastSAM("FastSAM-x.pt")
>>> results = model.predict("ultralytics/assets/bus.jpg")
Run prediction with bounding box prompts
>>> results = model.predict("image.jpg", bboxes=[[100, 100, 200, 200]])
"""
def __init__(self, model: str | Path = "FastSAM-x.pt"):
"""Initialize the FastSAM model with the specified pre-trained weights."""
if str(model) == "FastSAM.pt":
model = "FastSAM-x.pt"
assert Path(model).suffix not in {".yaml", ".yml"}, "FastSAM models only support pre-trained models."
super().__init__(model=model, task="segment")
property ultralytics.models.fastsam.model.FastSAM.task_map
def task_map(self) -> dict[str, dict[str, Any]]
Returns a dictionary mapping segment task to corresponding predictor and validator classes.
Source code in ultralytics/models/fastsam/model.py
View on GitHub@property
def task_map(self) -> dict[str, dict[str, Any]]:
"""Returns a dictionary mapping segment task to corresponding predictor and validator classes."""
return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}}
method ultralytics.models.fastsam.model.FastSAM.predict
def predict(
self,
source,
stream: bool = False,
bboxes: list | None = None,
points: list | None = None,
labels: list | None = None,
texts: list | None = None,
**kwargs: Any,
)
Perform segmentation prediction on image or video source.
Supports prompted segmentation with bounding boxes, points, labels, and texts. The method packages these prompts and passes them to the parent class predict method for processing.
Args
| Name | Type | Description | Default |
|---|---|---|---|
source | str | PIL.Image | np.ndarray | Input source for prediction, can be a file path, URL, PIL image, or numpy array. | required |
stream | bool | Whether to enable real-time streaming mode for video inputs. | False |
bboxes | list, optional | Bounding box coordinates for prompted segmentation in format [[x1, y1, x2, y2]]. | None |
points | list, optional | Point coordinates for prompted segmentation in format [[x, y]]. | None |
labels | list, optional | Class labels for prompted segmentation. | None |
texts | list, optional | Text prompts for segmentation guidance. | None |
**kwargs | Any | Additional keyword arguments passed to the predictor. | required |
Returns
| Type | Description |
|---|---|
list | List of Results objects containing the prediction results. |
Source code in ultralytics/models/fastsam/model.py
View on GitHubdef predict(
self,
source,
stream: bool = False,
bboxes: list | None = None,
points: list | None = None,
labels: list | None = None,
texts: list | None = None,
**kwargs: Any,
):
"""Perform segmentation prediction on image or video source.
Supports prompted segmentation with bounding boxes, points, labels, and texts. The method packages these prompts
and passes them to the parent class predict method for processing.
Args:
source (str | PIL.Image | np.ndarray): Input source for prediction, can be a file path, URL, PIL image, or
numpy array.
stream (bool): Whether to enable real-time streaming mode for video inputs.
bboxes (list, optional): Bounding box coordinates for prompted segmentation in format [[x1, y1, x2, y2]].
points (list, optional): Point coordinates for prompted segmentation in format [[x, y]].
labels (list, optional): Class labels for prompted segmentation.
texts (list, optional): Text prompts for segmentation guidance.
**kwargs (Any): Additional keyword arguments passed to the predictor.
Returns:
(list): List of Results objects containing the prediction results.
"""
prompts = dict(bboxes=bboxes, points=points, labels=labels, texts=texts)
return super().predict(source, stream, prompts=prompts, **kwargs)