सामग्री पर जाएं

के लिए संदर्भ ultralytics/nn/modules/head.py

नोट

यह फ़ाइल यहाँ उपलब्ध है https://github.com/ultralytics/ultralytics/बूँद/मुख्य/ultralytics/nn/modules/head.py. यदि आप कोई समस्या देखते हैं तो कृपया पुल अनुरोध का योगदान करके इसे ठीक करने में मदद करें 🛠️। 🙏 धन्यवाद !



ultralytics.nn.modules.head.Detect

का रूप: Module

YOLOv8 पहचान मॉडल के लिए सिर का पता लगाएं।

में स्रोत कोड ultralytics/nn/modules/head.py
19 बांग्लादेश 19 बांग्लादेश 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44454647484950515253 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75767778798081828384858687 88
class Detect(nn.Module):
    """YOLOv8 Detect head for detection models."""

    dynamic = False  # force grid reconstruction
    export = False  # export mode
    shape = None
    anchors = torch.empty(0)  # init
    strides = torch.empty(0)  # init

    def __init__(self, nc=80, ch=()):
        """Initializes the YOLOv8 detection layer with specified number of classes and channels."""
        super().__init__()
        self.nc = nc  # number of classes
        self.nl = len(ch)  # number of detection layers
        self.reg_max = 16  # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
        self.no = nc + self.reg_max * 4  # number of outputs per anchor
        self.stride = torch.zeros(self.nl)  # strides computed during build
        c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100))  # channels
        self.cv2 = nn.ModuleList(
            nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch
        )
        self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
        self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()

    def forward(self, x):
        """Concatenates and returns predicted bounding boxes and class probabilities."""
        for i in range(self.nl):
            x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)
        if self.training:  # Training path
            return x

        # Inference path
        shape = x[0].shape  # BCHW
        x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
        if self.dynamic or self.shape != shape:
            self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
            self.shape = shape

        if self.export and self.format in ("saved_model", "pb", "tflite", "edgetpu", "tfjs"):  # avoid TF FlexSplitV ops
            box = x_cat[:, : self.reg_max * 4]
            cls = x_cat[:, self.reg_max * 4 :]
        else:
            box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)

        if self.export and self.format in ("tflite", "edgetpu"):
            # Precompute normalization factor to increase numerical stability
            # See https://github.com/ultralytics/ultralytics/issues/7371
            grid_h = shape[2]
            grid_w = shape[3]
            grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
            norm = self.strides / (self.stride[0] * grid_size)
            dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
        else:
            dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides

        y = torch.cat((dbox, cls.sigmoid()), 1)
        return y if self.export else (y, x)

    def bias_init(self):
        """Initialize Detect() biases, WARNING: requires stride availability."""
        m = self  # self.model[-1]  # Detect() module
        # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
        # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum())  # nominal class frequency
        for a, b, s in zip(m.cv2, m.cv3, m.stride):  # from
            a[-1].bias.data[:] = 1.0  # box
            b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2)  # cls (.01 objects, 80 classes, 640 img)

    def decode_bboxes(self, bboxes, anchors):
        """Decode bounding boxes."""
        return dist2bbox(bboxes, anchors, xywh=True, dim=1)

__init__(nc=80, ch=())

इनक्वियलाइज़ करता है YOLOv8 कक्षाओं और चैनलों की निर्दिष्ट संख्या के साथ डिटेक्शन परत।

में स्रोत कोड ultralytics/nn/modules/head.py
def __init__(self, nc=80, ch=()):
    """Initializes the YOLOv8 detection layer with specified number of classes and channels."""
    super().__init__()
    self.nc = nc  # number of classes
    self.nl = len(ch)  # number of detection layers
    self.reg_max = 16  # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x)
    self.no = nc + self.reg_max * 4  # number of outputs per anchor
    self.stride = torch.zeros(self.nl)  # strides computed during build
    c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100))  # channels
    self.cv2 = nn.ModuleList(
        nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch
    )
    self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch)
    self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity()

bias_init()

डिटेक्ट() पूर्वाग्रहों को प्रारंभ करें, चेतावनी: स्ट्राइड उपलब्धता की आवश्यकता है।

में स्रोत कोड ultralytics/nn/modules/head.py
def bias_init(self):
    """Initialize Detect() biases, WARNING: requires stride availability."""
    m = self  # self.model[-1]  # Detect() module
    # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1
    # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum())  # nominal class frequency
    for a, b, s in zip(m.cv2, m.cv3, m.stride):  # from
        a[-1].bias.data[:] = 1.0  # box
        b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2)  # cls (.01 objects, 80 classes, 640 img)

decode_bboxes(bboxes, anchors)

बाउंडिंग बॉक्स को डिकोड करें।

में स्रोत कोड ultralytics/nn/modules/head.py
def decode_bboxes(self, bboxes, anchors):
    """Decode bounding boxes."""
    return dist2bbox(bboxes, anchors, xywh=True, dim=1)

forward(x)

अनुमानित बाउंडिंग बॉक्स और वर्ग संभावनाओं को जोड़ता है और लौटाता है।

में स्रोत कोड ultralytics/nn/modules/head.py
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66676869707172 7374 75
def forward(self, x):
    """Concatenates and returns predicted bounding boxes and class probabilities."""
    for i in range(self.nl):
        x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)
    if self.training:  # Training path
        return x

    # Inference path
    shape = x[0].shape  # BCHW
    x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
    if self.dynamic or self.shape != shape:
        self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
        self.shape = shape

    if self.export and self.format in ("saved_model", "pb", "tflite", "edgetpu", "tfjs"):  # avoid TF FlexSplitV ops
        box = x_cat[:, : self.reg_max * 4]
        cls = x_cat[:, self.reg_max * 4 :]
    else:
        box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)

    if self.export and self.format in ("tflite", "edgetpu"):
        # Precompute normalization factor to increase numerical stability
        # See https://github.com/ultralytics/ultralytics/issues/7371
        grid_h = shape[2]
        grid_w = shape[3]
        grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
        norm = self.strides / (self.stride[0] * grid_size)
        dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
    else:
        dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides

    y = torch.cat((dbox, cls.sigmoid()), 1)
    return y if self.export else (y, x)



ultralytics.nn.modules.head.Segment

का रूप: Detect

YOLOv8 विभाजन मॉडल के लिए सेगमेंट हेड।

में स्रोत कोड ultralytics/nn/modules/head.py
91 92 93 94 95 96 97 98  99 100 101 102 103 104  105 106 107 108 109 110   111 112 113114
class Segment(Detect):
    """YOLOv8 Segment head for segmentation models."""

    def __init__(self, nc=80, nm=32, npr=256, ch=()):
        """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers."""
        super().__init__(nc, ch)
        self.nm = nm  # number of masks
        self.npr = npr  # number of protos
        self.proto = Proto(ch[0], self.npr, self.nm)  # protos
        self.detect = Detect.forward

        c4 = max(ch[0] // 4, self.nm)
        self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch)

    def forward(self, x):
        """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
        p = self.proto(x[0])  # mask protos
        bs = p.shape[0]  # batch size

        mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2)  # mask coefficients
        x = self.detect(self, x)
        if self.training:
            return x, mc, p
        return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p))

__init__(nc=80, nm=32, npr=256, ch=())

इनरिजिनियलाइज़ करें YOLO मॉडल विशेषताएँ जैसे मास्क की संख्या, प्रोटोटाइप और कनवल्शन परतें।

में स्रोत कोड ultralytics/nn/modules/head.py
def __init__(self, nc=80, nm=32, npr=256, ch=()):
    """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers."""
    super().__init__(nc, ch)
    self.nm = nm  # number of masks
    self.npr = npr  # number of protos
    self.proto = Proto(ch[0], self.npr, self.nm)  # protos
    self.detect = Detect.forward

    c4 = max(ch[0] // 4, self.nm)
    self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch)

forward(x)

यदि प्रशिक्षण है तो मॉडल आउटपुट और मास्क गुणांक लौटाएं, अन्यथा आउटपुट और मास्क गुणांक लौटाएं।

में स्रोत कोड ultralytics/nn/modules/head.py
105 106 107 108 109110 111 112 113 114
def forward(self, x):
    """Return model outputs and mask coefficients if training, otherwise return outputs and mask coefficients."""
    p = self.proto(x[0])  # mask protos
    bs = p.shape[0]  # batch size

    mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2)  # mask coefficients
    x = self.detect(self, x)
    if self.training:
        return x, mc, p
    return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p))



ultralytics.nn.modules.head.OBB

का रूप: Detect

YOLOv8 रोटेशन मॉडल के साथ पता लगाने के लिए OBB डिटेक्शन हेड।

में स्रोत कोड ultralytics/nn/modules/head.py
117 118 119 120 121 122 123 124 125 126 127 128129 130 131 132 133 134 135 136 137 138 139 140141 142 143 144145
class OBB(Detect):
    """YOLOv8 OBB detection head for detection with rotation models."""

    def __init__(self, nc=80, ne=1, ch=()):
        """Initialize OBB with number of classes `nc` and layer channels `ch`."""
        super().__init__(nc, ch)
        self.ne = ne  # number of extra parameters
        self.detect = Detect.forward

        c4 = max(ch[0] // 4, self.ne)
        self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.ne, 1)) for x in ch)

    def forward(self, x):
        """Concatenates and returns predicted bounding boxes and class probabilities."""
        bs = x[0].shape[0]  # batch size
        angle = torch.cat([self.cv4[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2)  # OBB theta logits
        # NOTE: set `angle` as an attribute so that `decode_bboxes` could use it.
        angle = (angle.sigmoid() - 0.25) * math.pi  # [-pi/4, 3pi/4]
        # angle = angle.sigmoid() * math.pi / 2  # [0, pi/2]
        if not self.training:
            self.angle = angle
        x = self.detect(self, x)
        if self.training:
            return x, angle
        return torch.cat([x, angle], 1) if self.export else (torch.cat([x[0], angle], 1), (x[1], angle))

    def decode_bboxes(self, bboxes, anchors):
        """Decode rotated bounding boxes."""
        return dist2rbox(bboxes, self.angle, anchors, dim=1)

__init__(nc=80, ne=1, ch=())

कक्षाओं की संख्या के साथ OBB प्रारंभ करें nc और परत चैनल ch.

में स्रोत कोड ultralytics/nn/modules/head.py
120 121 122 123 124 125 126 127
def __init__(self, nc=80, ne=1, ch=()):
    """Initialize OBB with number of classes `nc` and layer channels `ch`."""
    super().__init__(nc, ch)
    self.ne = ne  # number of extra parameters
    self.detect = Detect.forward

    c4 = max(ch[0] // 4, self.ne)
    self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.ne, 1)) for x in ch)

decode_bboxes(bboxes, anchors)

घुमाए गए बाउंडिंग बॉक्स को डीकोड करें।

में स्रोत कोड ultralytics/nn/modules/head.py
def decode_bboxes(self, bboxes, anchors):
    """Decode rotated bounding boxes."""
    return dist2rbox(bboxes, self.angle, anchors, dim=1)

forward(x)

अनुमानित बाउंडिंग बॉक्स और वर्ग संभावनाओं को जोड़ता है और लौटाता है।

में स्रोत कोड ultralytics/nn/modules/head.py
129 130 131 132 133 134 135 136 137 138139140141
def forward(self, x):
    """Concatenates and returns predicted bounding boxes and class probabilities."""
    bs = x[0].shape[0]  # batch size
    angle = torch.cat([self.cv4[i](x[i]).view(bs, self.ne, -1) for i in range(self.nl)], 2)  # OBB theta logits
    # NOTE: set `angle` as an attribute so that `decode_bboxes` could use it.
    angle = (angle.sigmoid() - 0.25) * math.pi  # [-pi/4, 3pi/4]
    # angle = angle.sigmoid() * math.pi / 2  # [0, pi/2]
    if not self.training:
        self.angle = angle
    x = self.detect(self, x)
    if self.training:
        return x, angle
    return torch.cat([x, angle], 1) if self.export else (torch.cat([x[0], angle], 1), (x[1], angle))



ultralytics.nn.modules.head.Pose

का रूप: Detect

YOLOv8 कीपॉइंट मॉडल के लिए पोज़ हेड।

में स्रोत कोड ultralytics/nn/modules/head.py
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178179 180 181 182 183 184 185 186
class Pose(Detect):
    """YOLOv8 Pose head for keypoints models."""

    def __init__(self, nc=80, kpt_shape=(17, 3), ch=()):
        """Initialize YOLO network with default parameters and Convolutional Layers."""
        super().__init__(nc, ch)
        self.kpt_shape = kpt_shape  # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
        self.nk = kpt_shape[0] * kpt_shape[1]  # number of keypoints total
        self.detect = Detect.forward

        c4 = max(ch[0] // 4, self.nk)
        self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch)

    def forward(self, x):
        """Perform forward pass through YOLO model and return predictions."""
        bs = x[0].shape[0]  # batch size
        kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1)  # (bs, 17*3, h*w)
        x = self.detect(self, x)
        if self.training:
            return x, kpt
        pred_kpt = self.kpts_decode(bs, kpt)
        return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))

    def kpts_decode(self, bs, kpts):
        """Decodes keypoints."""
        ndim = self.kpt_shape[1]
        if self.export:  # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
            y = kpts.view(bs, *self.kpt_shape, -1)
            a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
            if ndim == 3:
                a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
            return a.view(bs, self.nk, -1)
        else:
            y = kpts.clone()
            if ndim == 3:
                y[:, 2::3] = y[:, 2::3].sigmoid()  # sigmoid (WARNING: inplace .sigmoid_() Apple MPS bug)
            y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides
            y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides
            return y

__init__(nc=80, kpt_shape=(17, 3), ch=())

प्रारंभ YOLO डिफ़ॉल्ट मापदंडों और दृढ़ परतों के साथ नेटवर्क।

में स्रोत कोड ultralytics/nn/modules/head.py
151 152 153 154 155 156 157 158 159
def __init__(self, nc=80, kpt_shape=(17, 3), ch=()):
    """Initialize YOLO network with default parameters and Convolutional Layers."""
    super().__init__(nc, ch)
    self.kpt_shape = kpt_shape  # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
    self.nk = kpt_shape[0] * kpt_shape[1]  # number of keypoints total
    self.detect = Detect.forward

    c4 = max(ch[0] // 4, self.nk)
    self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nk, 1)) for x in ch)

forward(x)

फॉरवर्ड पास थ्रू करें YOLO मॉडल और वापसी भविष्यवाणियां।

में स्रोत कोड ultralytics/nn/modules/head.py
161 162 163 164 165 166 167 168 169
def forward(self, x):
    """Perform forward pass through YOLO model and return predictions."""
    bs = x[0].shape[0]  # batch size
    kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1)  # (bs, 17*3, h*w)
    x = self.detect(self, x)
    if self.training:
        return x, kpt
    pred_kpt = self.kpts_decode(bs, kpt)
    return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt))

kpts_decode(bs, kpts)

कीपॉइंट्स को डीकोड करता है।

में स्रोत कोड ultralytics/nn/modules/head.py
171 172 173 174 175 176 177 178179 180 181 182 183 184 185 186
def kpts_decode(self, bs, kpts):
    """Decodes keypoints."""
    ndim = self.kpt_shape[1]
    if self.export:  # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
        y = kpts.view(bs, *self.kpt_shape, -1)
        a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides
        if ndim == 3:
            a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
        return a.view(bs, self.nk, -1)
    else:
        y = kpts.clone()
        if ndim == 3:
            y[:, 2::3] = y[:, 2::3].sigmoid()  # sigmoid (WARNING: inplace .sigmoid_() Apple MPS bug)
        y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides
        y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides
        return y



ultralytics.nn.modules.head.Classify

का रूप: Module

YOLOv8 वर्गीकरण शीर्ष, यानी एक्स (बी, सी 1,20,20) से एक्स (बी, सी 2)।

में स्रोत कोड ultralytics/nn/modules/head.py
189 190 191 192 193 194 195 196 197 198 199200 201 202 203 204 205 206 207 208
class Classify(nn.Module):
    """YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2)."""

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
        """Initializes YOLOv8 classification head with specified input and output channels, kernel size, stride,
        padding, and groups.
        """
        super().__init__()
        c_ = 1280  # efficientnet_b0 size
        self.conv = Conv(c1, c_, k, s, p, g)
        self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)
        self.drop = nn.Dropout(p=0.0, inplace=True)
        self.linear = nn.Linear(c_, c2)  # to x(b,c2)

    def forward(self, x):
        """Performs a forward pass of the YOLO model on input image data."""
        if isinstance(x, list):
            x = torch.cat(x, 1)
        x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
        return x if self.training else x.softmax(1)

__init__(c1, c2, k=1, s=1, p=None, g=1)

प्रारंभ करता है YOLOv8 निर्दिष्ट इनपुट और आउटपुट चैनलों के साथ वर्गीकरण सिर, कर्नेल आकार, स्ट्राइड, पैडिंग, और समूह।

में स्रोत कोड ultralytics/nn/modules/head.py
192 193 194 195 196 197 198199200 201 
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
    """Initializes YOLOv8 classification head with specified input and output channels, kernel size, stride,
    padding, and groups.
    """
    super().__init__()
    c_ = 1280  # efficientnet_b0 size
    self.conv = Conv(c1, c_, k, s, p, g)
    self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)
    self.drop = nn.Dropout(p=0.0, inplace=True)
    self.linear = nn.Linear(c_, c2)  # to x(b,c2)

forward(x)

का फॉरवर्ड पास करता है YOLO इनपुट छवि डेटा पर मॉडल।

में स्रोत कोड ultralytics/nn/modules/head.py
203 204 205 206 207 208
def forward(self, x):
    """Performs a forward pass of the YOLO model on input image data."""
    if isinstance(x, list):
        x = torch.cat(x, 1)
    x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
    return x if self.training else x.softmax(1)



ultralytics.nn.modules.head.RTDETRDecoder

का रूप: Module

ऑब्जेक्ट डिटेक्शन के लिए रीयल-टाइम डिफॉर्मेबल ट्रांसफार्मर डिकोडर (RTDETRDecoder) मॉड्यूल।

यह डिकोडर मॉड्यूल बाउंडिंग बॉक्स की भविष्यवाणी करने के लिए विकृत convolutions के साथ ट्रांसफार्मर आर्किटेक्चर का उपयोग करता है और एक छवि में वस्तुओं के लिए वर्ग लेबल। यह कई परतों से सुविधाओं को एकीकृत करता है और की एक श्रृंखला के माध्यम से चलता है अंतिम भविष्यवाणियों को आउटपुट करने के लिए ट्रांसफार्मर डिकोडर परतें।

में स्रोत कोड ultralytics/nn/modules/head.py
class RTDETRDecoder(nn.Module):
    """
    Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection.

    This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes
    and class labels for objects in an image. It integrates features from multiple layers and runs through a series of
    Transformer decoder layers to output the final predictions.
    """

    export = False  # export mode

    def __init__(
        self,
        nc=80,
        ch=(512, 1024, 2048),
        hd=256,  # hidden dim
        nq=300,  # num queries
        ndp=4,  # num decoder points
        nh=8,  # num head
        ndl=6,  # num decoder layers
        d_ffn=1024,  # dim of feedforward
        dropout=0.0,
        act=nn.ReLU(),
        eval_idx=-1,
        # Training args
        nd=100,  # num denoising
        label_noise_ratio=0.5,
        box_noise_scale=1.0,
        learnt_init_query=False,
    ):
        """
        Initializes the RTDETRDecoder module with the given parameters.

        Args:
            nc (int): Number of classes. Default is 80.
            ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048).
            hd (int): Dimension of hidden layers. Default is 256.
            nq (int): Number of query points. Default is 300.
            ndp (int): Number of decoder points. Default is 4.
            nh (int): Number of heads in multi-head attention. Default is 8.
            ndl (int): Number of decoder layers. Default is 6.
            d_ffn (int): Dimension of the feed-forward networks. Default is 1024.
            dropout (float): Dropout rate. Default is 0.
            act (nn.Module): Activation function. Default is nn.ReLU.
            eval_idx (int): Evaluation index. Default is -1.
            nd (int): Number of denoising. Default is 100.
            label_noise_ratio (float): Label noise ratio. Default is 0.5.
            box_noise_scale (float): Box noise scale. Default is 1.0.
            learnt_init_query (bool): Whether to learn initial query embeddings. Default is False.
        """
        super().__init__()
        self.hidden_dim = hd
        self.nhead = nh
        self.nl = len(ch)  # num level
        self.nc = nc
        self.num_queries = nq
        self.num_decoder_layers = ndl

        # Backbone feature projection
        self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch)
        # NOTE: simplified version but it's not consistent with .pt weights.
        # self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch)

        # Transformer module
        decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp)
        self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx)

        # Denoising part
        self.denoising_class_embed = nn.Embedding(nc, hd)
        self.num_denoising = nd
        self.label_noise_ratio = label_noise_ratio
        self.box_noise_scale = box_noise_scale

        # Decoder embedding
        self.learnt_init_query = learnt_init_query
        if learnt_init_query:
            self.tgt_embed = nn.Embedding(nq, hd)
        self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2)

        # Encoder head
        self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd))
        self.enc_score_head = nn.Linear(hd, nc)
        self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3)

        # Decoder head
        self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)])
        self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)])

        self._reset_parameters()

    def forward(self, x, batch=None):
        """Runs the forward pass of the module, returning bounding box and classification scores for the input."""
        from ultralytics.models.utils.ops import get_cdn_group

        # Input projection and embedding
        feats, shapes = self._get_encoder_input(x)

        # Prepare denoising training
        dn_embed, dn_bbox, attn_mask, dn_meta = get_cdn_group(
            batch,
            self.nc,
            self.num_queries,
            self.denoising_class_embed.weight,
            self.num_denoising,
            self.label_noise_ratio,
            self.box_noise_scale,
            self.training,
        )

        embed, refer_bbox, enc_bboxes, enc_scores = self._get_decoder_input(feats, shapes, dn_embed, dn_bbox)

        # Decoder
        dec_bboxes, dec_scores = self.decoder(
            embed,
            refer_bbox,
            feats,
            shapes,
            self.dec_bbox_head,
            self.dec_score_head,
            self.query_pos_head,
            attn_mask=attn_mask,
        )
        x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta
        if self.training:
            return x
        # (bs, 300, 4+nc)
        y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1)
        return y if self.export else (y, x)

    def _generate_anchors(self, shapes, grid_size=0.05, dtype=torch.float32, device="cpu", eps=1e-2):
        """Generates anchor bounding boxes for given shapes with specific grid size and validates them."""
        anchors = []
        for i, (h, w) in enumerate(shapes):
            sy = torch.arange(end=h, dtype=dtype, device=device)
            sx = torch.arange(end=w, dtype=dtype, device=device)
            grid_y, grid_x = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx)
            grid_xy = torch.stack([grid_x, grid_y], -1)  # (h, w, 2)

            valid_WH = torch.tensor([w, h], dtype=dtype, device=device)
            grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH  # (1, h, w, 2)
            wh = torch.ones_like(grid_xy, dtype=dtype, device=device) * grid_size * (2.0**i)
            anchors.append(torch.cat([grid_xy, wh], -1).view(-1, h * w, 4))  # (1, h*w, 4)

        anchors = torch.cat(anchors, 1)  # (1, h*w*nl, 4)
        valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)  # 1, h*w*nl, 1
        anchors = torch.log(anchors / (1 - anchors))
        anchors = anchors.masked_fill(~valid_mask, float("inf"))
        return anchors, valid_mask

    def _get_encoder_input(self, x):
        """Processes and returns encoder inputs by getting projection features from input and concatenating them."""
        # Get projection features
        x = [self.input_proj[i](feat) for i, feat in enumerate(x)]
        # Get encoder inputs
        feats = []
        shapes = []
        for feat in x:
            h, w = feat.shape[2:]
            # [b, c, h, w] -> [b, h*w, c]
            feats.append(feat.flatten(2).permute(0, 2, 1))
            # [nl, 2]
            shapes.append([h, w])

        # [b, h*w, c]
        feats = torch.cat(feats, 1)
        return feats, shapes

    def _get_decoder_input(self, feats, shapes, dn_embed=None, dn_bbox=None):
        """Generates and prepares the input required for the decoder from the provided features and shapes."""
        bs = feats.shape[0]
        # Prepare input for decoder
        anchors, valid_mask = self._generate_anchors(shapes, dtype=feats.dtype, device=feats.device)
        features = self.enc_output(valid_mask * feats)  # bs, h*w, 256

        enc_outputs_scores = self.enc_score_head(features)  # (bs, h*w, nc)

        # Query selection
        # (bs, num_queries)
        topk_ind = torch.topk(enc_outputs_scores.max(-1).values, self.num_queries, dim=1).indices.view(-1)
        # (bs, num_queries)
        batch_ind = torch.arange(end=bs, dtype=topk_ind.dtype).unsqueeze(-1).repeat(1, self.num_queries).view(-1)

        # (bs, num_queries, 256)
        top_k_features = features[batch_ind, topk_ind].view(bs, self.num_queries, -1)
        # (bs, num_queries, 4)
        top_k_anchors = anchors[:, topk_ind].view(bs, self.num_queries, -1)

        # Dynamic anchors + static content
        refer_bbox = self.enc_bbox_head(top_k_features) + top_k_anchors

        enc_bboxes = refer_bbox.sigmoid()
        if dn_bbox is not None:
            refer_bbox = torch.cat([dn_bbox, refer_bbox], 1)
        enc_scores = enc_outputs_scores[batch_ind, topk_ind].view(bs, self.num_queries, -1)

        embeddings = self.tgt_embed.weight.unsqueeze(0).repeat(bs, 1, 1) if self.learnt_init_query else top_k_features
        if self.training:
            refer_bbox = refer_bbox.detach()
            if not self.learnt_init_query:
                embeddings = embeddings.detach()
        if dn_embed is not None:
            embeddings = torch.cat([dn_embed, embeddings], 1)

        return embeddings, refer_bbox, enc_bboxes, enc_scores

    # TODO
    def _reset_parameters(self):
        """Initializes or resets the parameters of the model's various components with predefined weights and biases."""
        # Class and bbox head init
        bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
        # NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
        # linear_init(self.enc_score_head)
        constant_(self.enc_score_head.bias, bias_cls)
        constant_(self.enc_bbox_head.layers[-1].weight, 0.0)
        constant_(self.enc_bbox_head.layers[-1].bias, 0.0)
        for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
            # linear_init(cls_)
            constant_(cls_.bias, bias_cls)
            constant_(reg_.layers[-1].weight, 0.0)
            constant_(reg_.layers[-1].bias, 0.0)

        linear_init(self.enc_output[0])
        xavier_uniform_(self.enc_output[0].weight)
        if self.learnt_init_query:
            xavier_uniform_(self.tgt_embed.weight)
        xavier_uniform_(self.query_pos_head.layers[0].weight)
        xavier_uniform_(self.query_pos_head.layers[1].weight)
        for layer in self.input_proj:
            xavier_uniform_(layer[0].weight)

__init__(nc=80, ch=(512, 1024, 2048), hd=256, nq=300, ndp=4, nh=8, ndl=6, d_ffn=1024, dropout=0.0, act=nn.ReLU(), eval_idx=-1, nd=100, label_noise_ratio=0.5, box_noise_scale=1.0, learnt_init_query=False)

दिए गए मापदंडों के साथ RTDETRDecoder मॉड्यूल को इनिशियलाइज़ करता है।

पैरामीटर:

नाम प्रकार विवरण: __________ चूक
nc int

कक्षाओं की संख्या। डिफ़ॉल्ट 80 है।

80
ch tuple

बैकबोन में चैनल नक्शे पेश करते हैं। डिफ़ॉल्ट (512, 1024, 2048) है।

(512, 1024, 2048)
hd int

छिपी हुई परतों का आयाम। डिफ़ॉल्ट 256 है।

256
nq int

क्वेरी बिंदुओं की संख्या. डिफ़ॉल्ट 300 है।

300
ndp int

डिकोडर बिंदुओं की संख्या। डिफ़ॉल्ट 4 है।

4
nh int

बहु-सिर ध्यान में सिर की संख्या। डिफ़ॉल्ट 8 है।

8
ndl int

डिकोडर परतों की संख्या। डिफ़ॉल्ट 6 है।

6
d_ffn int

फ़ीड-फ़ॉरवर्ड नेटवर्क का आयाम. डिफ़ॉल्ट 1024 है।

1024
dropout float

ड्रॉपआउट दर। डिफ़ॉल्ट 0 है।

0.0
act Module

सक्रियण समारोह। डिफ़ॉल्ट nn है. रेलू।

ReLU()
eval_idx int

मूल्यांकन सूचकांक। डिफ़ॉल्ट -1 है।

-1
nd int

इनकार की संख्या। डिफ़ॉल्ट 100 है।

100
label_noise_ratio float

लेबल शोर अनुपात। डिफ़ॉल्ट 0.5 है।

0.5
box_noise_scale float

बॉक्स शोर पैमाने। डिफ़ॉल्ट 1.0 है।

1.0
learnt_init_query bool

प्रारंभिक क्वेरी एम्बेडिंग सीखना है या नहीं। डिफ़ॉल्ट ग़लत है.

False
में स्रोत कोड ultralytics/nn/modules/head.py
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289290 291 292 293 294 295296297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318319 320321 322 323 324 325 326327 328 329 330 331 332 333 334 335 336 337 338339 340341 342
def __init__(
    self,
    nc=80,
    ch=(512, 1024, 2048),
    hd=256,  # hidden dim
    nq=300,  # num queries
    ndp=4,  # num decoder points
    nh=8,  # num head
    ndl=6,  # num decoder layers
    d_ffn=1024,  # dim of feedforward
    dropout=0.0,
    act=nn.ReLU(),
    eval_idx=-1,
    # Training args
    nd=100,  # num denoising
    label_noise_ratio=0.5,
    box_noise_scale=1.0,
    learnt_init_query=False,
):
    """
    Initializes the RTDETRDecoder module with the given parameters.

    Args:
        nc (int): Number of classes. Default is 80.
        ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048).
        hd (int): Dimension of hidden layers. Default is 256.
        nq (int): Number of query points. Default is 300.
        ndp (int): Number of decoder points. Default is 4.
        nh (int): Number of heads in multi-head attention. Default is 8.
        ndl (int): Number of decoder layers. Default is 6.
        d_ffn (int): Dimension of the feed-forward networks. Default is 1024.
        dropout (float): Dropout rate. Default is 0.
        act (nn.Module): Activation function. Default is nn.ReLU.
        eval_idx (int): Evaluation index. Default is -1.
        nd (int): Number of denoising. Default is 100.
        label_noise_ratio (float): Label noise ratio. Default is 0.5.
        box_noise_scale (float): Box noise scale. Default is 1.0.
        learnt_init_query (bool): Whether to learn initial query embeddings. Default is False.
    """
    super().__init__()
    self.hidden_dim = hd
    self.nhead = nh
    self.nl = len(ch)  # num level
    self.nc = nc
    self.num_queries = nq
    self.num_decoder_layers = ndl

    # Backbone feature projection
    self.input_proj = nn.ModuleList(nn.Sequential(nn.Conv2d(x, hd, 1, bias=False), nn.BatchNorm2d(hd)) for x in ch)
    # NOTE: simplified version but it's not consistent with .pt weights.
    # self.input_proj = nn.ModuleList(Conv(x, hd, act=False) for x in ch)

    # Transformer module
    decoder_layer = DeformableTransformerDecoderLayer(hd, nh, d_ffn, dropout, act, self.nl, ndp)
    self.decoder = DeformableTransformerDecoder(hd, decoder_layer, ndl, eval_idx)

    # Denoising part
    self.denoising_class_embed = nn.Embedding(nc, hd)
    self.num_denoising = nd
    self.label_noise_ratio = label_noise_ratio
    self.box_noise_scale = box_noise_scale

    # Decoder embedding
    self.learnt_init_query = learnt_init_query
    if learnt_init_query:
        self.tgt_embed = nn.Embedding(nq, hd)
    self.query_pos_head = MLP(4, 2 * hd, hd, num_layers=2)

    # Encoder head
    self.enc_output = nn.Sequential(nn.Linear(hd, hd), nn.LayerNorm(hd))
    self.enc_score_head = nn.Linear(hd, nc)
    self.enc_bbox_head = MLP(hd, hd, 4, num_layers=3)

    # Decoder head
    self.dec_score_head = nn.ModuleList([nn.Linear(hd, nc) for _ in range(ndl)])
    self.dec_bbox_head = nn.ModuleList([MLP(hd, hd, 4, num_layers=3) for _ in range(ndl)])

    self._reset_parameters()

forward(x, batch=None)

मॉड्यूल के आगे पास चलाता है, इनपुट के लिए बाउंडिंग बॉक्स और वर्गीकरण स्कोर लौटाता है।

में स्रोत कोड ultralytics/nn/modules/head.py
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376377378 379380381
def forward(self, x, batch=None):
    """Runs the forward pass of the module, returning bounding box and classification scores for the input."""
    from ultralytics.models.utils.ops import get_cdn_group

    # Input projection and embedding
    feats, shapes = self._get_encoder_input(x)

    # Prepare denoising training
    dn_embed, dn_bbox, attn_mask, dn_meta = get_cdn_group(
        batch,
        self.nc,
        self.num_queries,
        self.denoising_class_embed.weight,
        self.num_denoising,
        self.label_noise_ratio,
        self.box_noise_scale,
        self.training,
    )

    embed, refer_bbox, enc_bboxes, enc_scores = self._get_decoder_input(feats, shapes, dn_embed, dn_bbox)

    # Decoder
    dec_bboxes, dec_scores = self.decoder(
        embed,
        refer_bbox,
        feats,
        shapes,
        self.dec_bbox_head,
        self.dec_score_head,
        self.query_pos_head,
        attn_mask=attn_mask,
    )
    x = dec_bboxes, dec_scores, enc_bboxes, enc_scores, dn_meta
    if self.training:
        return x
    # (bs, 300, 4+nc)
    y = torch.cat((dec_bboxes.squeeze(0), dec_scores.squeeze(0).sigmoid()), -1)
    return y if self.export else (y, x)





2023-11-12 बनाया गया, अपडेट किया गया 2024-01-05
लेखक: ग्लेन-जोचर (4)