Overslaan naar inhoud

Referentie voor ultralytics/nn/modules/block.py

Opmerking

Dit bestand is beschikbaar op https://github.com/ultralytics/ ultralytics/blob/main/ ultralytics/nn/modules/block .py. Als je een probleem ziet, help het dan oplossen door een Pull Request 🛠️ bij te dragen. Bedankt 🙏!



ultralytics.nn.modules.block.DFL

Basis: Module

Integrale module van Distribution Focal Loss (DFL).

Voorgesteld in Gegeneraliseerd focaal verlies https://ieeexplore.ieee.org/document/9792391

Broncode in ultralytics/nn/modules/block.py
class DFL(nn.Module):
    """
    Integral module of Distribution Focal Loss (DFL).

    Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
    """

    def __init__(self, c1=16):
        """Initialize a convolutional layer with a given number of input channels."""
        super().__init__()
        self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
        x = torch.arange(c1, dtype=torch.float)
        self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
        self.c1 = c1

    def forward(self, x):
        """Applies a transformer layer on input tensor 'x' and returns a tensor."""
        b, _, a = x.shape  # batch, channels, anchors
        return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)

__init__(c1=16)

Initialiseer een convolutionele laag met een gegeven aantal invoerkanalen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1=16):
    """Initialize a convolutional layer with a given number of input channels."""
    super().__init__()
    self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
    x = torch.arange(c1, dtype=torch.float)
    self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
    self.c1 = c1

forward(x)

Past een transformatorlaag toe op invoer tensor 'x' en retourneert een tensor.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Applies a transformer layer on input tensor 'x' and returns a tensor."""
    b, _, a = x.shape  # batch, channels, anchors
    return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)



ultralytics.nn.modules.block.Proto

Basis: Module

YOLOv8 Masker Proto-module voor segmentatiemodellen.

Broncode in ultralytics/nn/modules/block.py
class Proto(nn.Module):
    """YOLOv8 mask Proto module for segmentation models."""

    def __init__(self, c1, c_=256, c2=32):
        """
        Initializes the YOLOv8 mask Proto module with specified number of protos and masks.

        Input arguments are ch_in, number of protos, number of masks.
        """
        super().__init__()
        self.cv1 = Conv(c1, c_, k=3)
        self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True)  # nn.Upsample(scale_factor=2, mode='nearest')
        self.cv2 = Conv(c_, c_, k=3)
        self.cv3 = Conv(c_, c2)

    def forward(self, x):
        """Performs a forward pass through layers using an upsampled input image."""
        return self.cv3(self.cv2(self.upsample(self.cv1(x))))

__init__(c1, c_=256, c2=32)

Initialiseert de Proto-module YOLOv8 mask met een opgegeven aantal proto's en maskers.

Invoer argumenten zijn ch_in, aantal protos, aantal maskers.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c_=256, c2=32):
    """
    Initializes the YOLOv8 mask Proto module with specified number of protos and masks.

    Input arguments are ch_in, number of protos, number of masks.
    """
    super().__init__()
    self.cv1 = Conv(c1, c_, k=3)
    self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True)  # nn.Upsample(scale_factor=2, mode='nearest')
    self.cv2 = Conv(c_, c_, k=3)
    self.cv3 = Conv(c_, c2)

forward(x)

Voert een voorwaartse beweging door lagen uit met behulp van een upsampled invoerafbeelding.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Performs a forward pass through layers using an upsampled input image."""
    return self.cv3(self.cv2(self.upsample(self.cv1(x))))



ultralytics.nn.modules.block.HGStem

Basis: Module

Stamblok van PPHGNetV2 met 5 convoluties en één maxpool2d.

https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py

Broncode in ultralytics/nn/modules/block.py
class HGStem(nn.Module):
    """
    StemBlock of PPHGNetV2 with 5 convolutions and one maxpool2d.

    https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
    """

    def __init__(self, c1, cm, c2):
        """Initialize the SPP layer with input/output channels and specified kernel sizes for max pooling."""
        super().__init__()
        self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU())
        self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU())
        self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU())
        self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU())
        self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU())
        self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True)

    def forward(self, x):
        """Forward pass of a PPHGNetV2 backbone layer."""
        x = self.stem1(x)
        x = F.pad(x, [0, 1, 0, 1])
        x2 = self.stem2a(x)
        x2 = F.pad(x2, [0, 1, 0, 1])
        x2 = self.stem2b(x2)
        x1 = self.pool(x)
        x = torch.cat([x1, x2], dim=1)
        x = self.stem3(x)
        x = self.stem4(x)
        return x

__init__(c1, cm, c2)

Initialiseer de SPP-laag met invoer-/uitvoerkanalen en opgegeven kernelgroottes voor max pooling.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, cm, c2):
    """Initialize the SPP layer with input/output channels and specified kernel sizes for max pooling."""
    super().__init__()
    self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU())
    self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU())
    self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU())
    self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU())
    self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU())
    self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True)

forward(x)

Voorwaartse doorgang van een PPHGNetV2 backbone laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass of a PPHGNetV2 backbone layer."""
    x = self.stem1(x)
    x = F.pad(x, [0, 1, 0, 1])
    x2 = self.stem2a(x)
    x2 = F.pad(x2, [0, 1, 0, 1])
    x2 = self.stem2b(x2)
    x1 = self.pool(x)
    x = torch.cat([x1, x2], dim=1)
    x = self.stem3(x)
    x = self.stem4(x)
    return x



ultralytics.nn.modules.block.HGBlock

Basis: Module

HG_Blok van PPHGNetV2 met 2 convoluties en LightConv.

https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py

Broncode in ultralytics/nn/modules/block.py
class HGBlock(nn.Module):
    """
    HG_Block of PPHGNetV2 with 2 convolutions and LightConv.

    https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
    """

    def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()):
        """Initializes a CSP Bottleneck with 1 convolution using specified input and output channels."""
        super().__init__()
        block = LightConv if lightconv else Conv
        self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n))
        self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act)  # squeeze conv
        self.ec = Conv(c2 // 2, c2, 1, 1, act=act)  # excitation conv
        self.add = shortcut and c1 == c2

    def forward(self, x):
        """Forward pass of a PPHGNetV2 backbone layer."""
        y = [x]
        y.extend(m(y[-1]) for m in self.m)
        y = self.ec(self.sc(torch.cat(y, 1)))
        return y + x if self.add else y

__init__(c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU())

Initialiseert een CSP Bottleneck met 1 convolutie met behulp van opgegeven invoer- en uitvoerkanalen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()):
    """Initializes a CSP Bottleneck with 1 convolution using specified input and output channels."""
    super().__init__()
    block = LightConv if lightconv else Conv
    self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n))
    self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act)  # squeeze conv
    self.ec = Conv(c2 // 2, c2, 1, 1, act=act)  # excitation conv
    self.add = shortcut and c1 == c2

forward(x)

Voorwaartse doorgang van een PPHGNetV2 backbone laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass of a PPHGNetV2 backbone layer."""
    y = [x]
    y.extend(m(y[-1]) for m in self.m)
    y = self.ec(self.sc(torch.cat(y, 1)))
    return y + x if self.add else y



ultralytics.nn.modules.block.SPP

Basis: Module

Ruimtelijke piramide pooling (SPP) laag https://arxiv.org/abs/1406.4729.

Broncode in ultralytics/nn/modules/block.py
class SPP(nn.Module):
    """Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729."""

    def __init__(self, c1, c2, k=(5, 9, 13)):
        """Initialize the SPP layer with input/output channels and pooling kernel sizes."""
        super().__init__()
        c_ = c1 // 2  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
        self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])

    def forward(self, x):
        """Forward pass of the SPP layer, performing spatial pyramid pooling."""
        x = self.cv1(x)
        return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))

__init__(c1, c2, k=(5, 9, 13))

Initialiseer de SPP-laag met invoer-/uitvoerkanalen en pooling kernelgroottes.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, k=(5, 9, 13)):
    """Initialize the SPP layer with input/output channels and pooling kernel sizes."""
    super().__init__()
    c_ = c1 // 2  # hidden channels
    self.cv1 = Conv(c1, c_, 1, 1)
    self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
    self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])

forward(x)

Voorwaartse passage van de SPP-laag, waarbij de ruimtelijke piramide wordt samengevoegd.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass of the SPP layer, performing spatial pyramid pooling."""
    x = self.cv1(x)
    return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))



ultralytics.nn.modules.block.SPPF

Basis: Module

Spatial Pyramid Pooling - Fast (SPPF) laag voor YOLOv5 door Glenn Jocher.

Broncode in ultralytics/nn/modules/block.py
class SPPF(nn.Module):
    """Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher."""

    def __init__(self, c1, c2, k=5):
        """
        Initializes the SPPF layer with given input/output channels and kernel size.

        This module is equivalent to SPP(k=(5, 9, 13)).
        """
        super().__init__()
        c_ = c1 // 2  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_ * 4, c2, 1, 1)
        self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)

    def forward(self, x):
        """Forward pass through Ghost Convolution block."""
        y = [self.cv1(x)]
        y.extend(self.m(y[-1]) for _ in range(3))
        return self.cv2(torch.cat(y, 1))

__init__(c1, c2, k=5)

Initialiseert de SPPF-laag met opgegeven in-/uitvoerkanalen en kernelgrootte.

Deze module is gelijkwaardig aan SPP(k=(5, 9, 13)).

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, k=5):
    """
    Initializes the SPPF layer with given input/output channels and kernel size.

    This module is equivalent to SPP(k=(5, 9, 13)).
    """
    super().__init__()
    c_ = c1 // 2  # hidden channels
    self.cv1 = Conv(c1, c_, 1, 1)
    self.cv2 = Conv(c_ * 4, c2, 1, 1)
    self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)

forward(x)

Voorwaartse passage door Ghost Convolution blok.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through Ghost Convolution block."""
    y = [self.cv1(x)]
    y.extend(self.m(y[-1]) for _ in range(3))
    return self.cv2(torch.cat(y, 1))



ultralytics.nn.modules.block.C1

Basis: Module

CSP Bottleneck met 1 convolutie.

Broncode in ultralytics/nn/modules/block.py
class C1(nn.Module):
    """CSP Bottleneck with 1 convolution."""

    def __init__(self, c1, c2, n=1):
        """Initializes the CSP Bottleneck with configurations for 1 convolution with arguments ch_in, ch_out, number."""
        super().__init__()
        self.cv1 = Conv(c1, c2, 1, 1)
        self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n)))

    def forward(self, x):
        """Applies cross-convolutions to input in the C3 module."""
        y = self.cv1(x)
        return self.m(y) + y

__init__(c1, c2, n=1)

Initialiseert de CSP Bottleneck met configuraties voor 1 convolutie met argumenten ch_in, ch_out, getal.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1):
    """Initializes the CSP Bottleneck with configurations for 1 convolution with arguments ch_in, ch_out, number."""
    super().__init__()
    self.cv1 = Conv(c1, c2, 1, 1)
    self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n)))

forward(x)

Past kruisconvoluties toe op invoer in de C3-module.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Applies cross-convolutions to input in the C3 module."""
    y = self.cv1(x)
    return self.m(y) + y



ultralytics.nn.modules.block.C2

Basis: Module

CSP Bottleneck met 2 convoluties.

Broncode in ultralytics/nn/modules/block.py
class C2(nn.Module):
    """CSP Bottleneck with 2 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initializes the CSP Bottleneck with 2 convolutions module with arguments ch_in, ch_out, number, shortcut,
        groups, expansion.
        """
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv(2 * self.c, c2, 1)  # optional act=FReLU(c2)
        # self.attention = ChannelAttention(2 * self.c)  # or SpatialAttention()
        self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)))

    def forward(self, x):
        """Forward pass through the CSP bottleneck with 2 convolutions."""
        a, b = self.cv1(x).chunk(2, 1)
        return self.cv2(torch.cat((self.m(a), b), 1))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseert de CSP Bottleneck met 2 convoluties module met argumenten ch_in, ch_out, aantal, snelkopp, groepen, uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initializes the CSP Bottleneck with 2 convolutions module with arguments ch_in, ch_out, number, shortcut,
    groups, expansion.
    """
    super().__init__()
    self.c = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, 2 * self.c, 1, 1)
    self.cv2 = Conv(2 * self.c, c2, 1)  # optional act=FReLU(c2)
    # self.attention = ChannelAttention(2 * self.c)  # or SpatialAttention()
    self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)))

forward(x)

Voorwaartse passage door het CSP knelpunt met 2 convoluties.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through the CSP bottleneck with 2 convolutions."""
    a, b = self.cv1(x).chunk(2, 1)
    return self.cv2(torch.cat((self.m(a), b), 1))



ultralytics.nn.modules.block.C2f

Basis: Module

Snellere implementatie van CSP Bottleneck met 2 convoluties.

Broncode in ultralytics/nn/modules/block.py
class C2f(nn.Module):
    """Faster Implementation of CSP Bottleneck with 2 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
        expansion.
        """
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))

    def forward(self, x):
        """Forward pass through C2f layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))

    def forward_split(self, x):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))

__init__(c1, c2, n=1, shortcut=False, g=1, e=0.5)

Initialiseer de CSP-bottlenecklaag met twee convoluties met argumenten ch_in, ch_out, aantal, snelkoppeling, groepen, uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
    """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
    expansion.
    """
    super().__init__()
    self.c = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, 2 * self.c, 1, 1)
    self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
    self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))

forward(x)

Voorwaartse doorgang door C2f-laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through C2f layer."""
    y = list(self.cv1(x).chunk(2, 1))
    y.extend(m(y[-1]) for m in self.m)
    return self.cv2(torch.cat(y, 1))

forward_split(x)

Forward pass met split() in plaats van chunk().

Broncode in ultralytics/nn/modules/block.py
def forward_split(self, x):
    """Forward pass using split() instead of chunk()."""
    y = list(self.cv1(x).split((self.c, self.c), 1))
    y.extend(m(y[-1]) for m in self.m)
    return self.cv2(torch.cat(y, 1))



ultralytics.nn.modules.block.C3

Basis: Module

CSP Bottleneck met 3 convoluties.

Broncode in ultralytics/nn/modules/block.py
class C3(nn.Module):
    """CSP Bottleneck with 3 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c1, c_, 1, 1)
        self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n)))

    def forward(self, x):
        """Forward pass through the CSP bottleneck with 2 convolutions."""
        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseer de CSP Bottleneck met de gegeven kanalen, aantal, snelkoppeling, groepen en uitbreidingswaarden.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values."""
    super().__init__()
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, c_, 1, 1)
    self.cv2 = Conv(c1, c_, 1, 1)
    self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)
    self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n)))

forward(x)

Voorwaartse passage door het CSP knelpunt met 2 convoluties.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through the CSP bottleneck with 2 convolutions."""
    return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))



ultralytics.nn.modules.block.C3x

Basis: C3

C3-module met kruisconvoluties.

Broncode in ultralytics/nn/modules/block.py
class C3x(C3):
    """C3 module with cross-convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initialize C3TR instance and set default parameters."""
        super().__init__(c1, c2, n, shortcut, g, e)
        self.c_ = int(c2 * e)
        self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n)))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

C3TR instantie initialiseren en standaardparameters instellen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initialize C3TR instance and set default parameters."""
    super().__init__(c1, c2, n, shortcut, g, e)
    self.c_ = int(c2 * e)
    self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n)))



ultralytics.nn.modules.block.RepC3

Basis: Module

Rep C3.

Broncode in ultralytics/nn/modules/block.py
class RepC3(nn.Module):
    """Rep C3."""

    def __init__(self, c1, c2, n=3, e=1.0):
        """Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c2, 1, 1)
        self.cv2 = Conv(c1, c2, 1, 1)
        self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)])
        self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity()

    def forward(self, x):
        """Forward pass of RT-DETR neck layer."""
        return self.cv3(self.m(self.cv1(x)) + self.cv2(x))

__init__(c1, c2, n=3, e=1.0)

Initialiseer CSP Bottleneck met een enkele convolutie met invoerkanalen, uitvoerkanalen en aantal.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=3, e=1.0):
    """Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number."""
    super().__init__()
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, c2, 1, 1)
    self.cv2 = Conv(c1, c2, 1, 1)
    self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)])
    self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity()

forward(x)

Voorwaartse passage van RT-DETR halslaag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass of RT-DETR neck layer."""
    return self.cv3(self.m(self.cv1(x)) + self.cv2(x))



ultralytics.nn.modules.block.C3TR

Basis: C3

C3 module met TransformerBlock().

Broncode in ultralytics/nn/modules/block.py
class C3TR(C3):
    """C3 module with TransformerBlock()."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initialize C3Ghost module with GhostBottleneck()."""
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)
        self.m = TransformerBlock(c_, c_, 4, n)

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseer de module C3Ghost met GhostBottleneck().

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initialize C3Ghost module with GhostBottleneck()."""
    super().__init__(c1, c2, n, shortcut, g, e)
    c_ = int(c2 * e)
    self.m = TransformerBlock(c_, c_, 4, n)



ultralytics.nn.modules.block.C3Ghost

Basis: C3

C3 module met GhostBottleneck().

Broncode in ultralytics/nn/modules/block.py
class C3Ghost(C3):
    """C3 module with GhostBottleneck()."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling."""
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)  # hidden channels
        self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseer de module 'SPP' met verschillende poolgroottes voor ruimtelijke piramidepooling.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling."""
    super().__init__(c1, c2, n, shortcut, g, e)
    c_ = int(c2 * e)  # hidden channels
    self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))



ultralytics.nn.modules.block.GhostBottleneck

Basis: Module

Ghost Bottleneck https://github.com/huawei-noah/ghostnet.

Broncode in ultralytics/nn/modules/block.py
class GhostBottleneck(nn.Module):
    """Ghost Bottleneck https://github.com/huawei-noah/ghostnet."""

    def __init__(self, c1, c2, k=3, s=1):
        """Initializes GhostBottleneck module with arguments ch_in, ch_out, kernel, stride."""
        super().__init__()
        c_ = c2 // 2
        self.conv = nn.Sequential(
            GhostConv(c1, c_, 1, 1),  # pw
            DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
            GhostConv(c_, c2, 1, 1, act=False),  # pw-linear
        )
        self.shortcut = (
            nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
        )

    def forward(self, x):
        """Applies skip connection and concatenation to input tensor."""
        return self.conv(x) + self.shortcut(x)

__init__(c1, c2, k=3, s=1)

Initialiseert GhostBottleneck module met argumenten ch_in, ch_out, kernel, stride.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, k=3, s=1):
    """Initializes GhostBottleneck module with arguments ch_in, ch_out, kernel, stride."""
    super().__init__()
    c_ = c2 // 2
    self.conv = nn.Sequential(
        GhostConv(c1, c_, 1, 1),  # pw
        DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
        GhostConv(c_, c2, 1, 1, act=False),  # pw-linear
    )
    self.shortcut = (
        nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
    )

forward(x)

Past overslaan van verbinding en aaneenschakelen toe op invoer tensor.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Applies skip connection and concatenation to input tensor."""
    return self.conv(x) + self.shortcut(x)



ultralytics.nn.modules.block.Bottleneck

Basis: Module

Standaard knelpunt.

Broncode in ultralytics/nn/modules/block.py
class Bottleneck(nn.Module):
    """Standard bottleneck."""

    def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
        """Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and
        expansion.
        """
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, k[0], 1)
        self.cv2 = Conv(c_, c2, k[1], 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        """'forward()' applies the YOLO FPN to input data."""
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))

__init__(c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5)

Initialiseert een knelpuntmodule met gegeven invoer-/uitvoerkanalen, snelkoppelingsoptie, groep, kernels en uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
    """Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and
    expansion.
    """
    super().__init__()
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, c_, k[0], 1)
    self.cv2 = Conv(c_, c2, k[1], 1, g=g)
    self.add = shortcut and c1 == c2

forward(x)

forward()' past de YOLO FPN toe op invoergegevens.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """'forward()' applies the YOLO FPN to input data."""
    return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))



ultralytics.nn.modules.block.BottleneckCSP

Basis: Module

CSP Knelpunt https://github.com/WongKinYiu/CrossStagePartialNetworks.

Broncode in ultralytics/nn/modules/block.py
class BottleneckCSP(nn.Module):
    """CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initializes the CSP Bottleneck given arguments for ch_in, ch_out, number, shortcut, groups, expansion."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
        self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
        self.cv4 = Conv(2 * c_, c2, 1, 1)
        self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
        self.act = nn.SiLU()
        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))

    def forward(self, x):
        """Applies a CSP bottleneck with 3 convolutions."""
        y1 = self.cv3(self.m(self.cv1(x)))
        y2 = self.cv2(x)
        return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseert de CSP Bottleneck gegeven argumenten voor ch_in, ch_out, aantal, snelkoppeling, groepen, uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initializes the CSP Bottleneck given arguments for ch_in, ch_out, number, shortcut, groups, expansion."""
    super().__init__()
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, c_, 1, 1)
    self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
    self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
    self.cv4 = Conv(2 * c_, c2, 1, 1)
    self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
    self.act = nn.SiLU()
    self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))

forward(x)

Past een CSP knelpunt toe met 3 convoluties.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Applies a CSP bottleneck with 3 convolutions."""
    y1 = self.cv3(self.m(self.cv1(x)))
    y2 = self.cv2(x)
    return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))



ultralytics.nn.modules.block.ResNetBlock

Basis: Module

ResNet blok met standaard convolutielagen.

Broncode in ultralytics/nn/modules/block.py
class ResNetBlock(nn.Module):
    """ResNet block with standard convolution layers."""

    def __init__(self, c1, c2, s=1, e=4):
        """Initialize convolution with given parameters."""
        super().__init__()
        c3 = e * c2
        self.cv1 = Conv(c1, c2, k=1, s=1, act=True)
        self.cv2 = Conv(c2, c2, k=3, s=s, p=1, act=True)
        self.cv3 = Conv(c2, c3, k=1, act=False)
        self.shortcut = nn.Sequential(Conv(c1, c3, k=1, s=s, act=False)) if s != 1 or c1 != c3 else nn.Identity()

    def forward(self, x):
        """Forward pass through the ResNet block."""
        return F.relu(self.cv3(self.cv2(self.cv1(x))) + self.shortcut(x))

__init__(c1, c2, s=1, e=4)

Initialiseer convolutie met gegeven parameters.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, s=1, e=4):
    """Initialize convolution with given parameters."""
    super().__init__()
    c3 = e * c2
    self.cv1 = Conv(c1, c2, k=1, s=1, act=True)
    self.cv2 = Conv(c2, c2, k=3, s=s, p=1, act=True)
    self.cv3 = Conv(c2, c3, k=1, act=False)
    self.shortcut = nn.Sequential(Conv(c1, c3, k=1, s=s, act=False)) if s != 1 or c1 != c3 else nn.Identity()

forward(x)

Voorwaarts door het ResNet blok.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through the ResNet block."""
    return F.relu(self.cv3(self.cv2(self.cv1(x))) + self.shortcut(x))



ultralytics.nn.modules.block.ResNetLayer

Basis: Module

ResNet laag met meerdere ResNet blokken.

Broncode in ultralytics/nn/modules/block.py
class ResNetLayer(nn.Module):
    """ResNet layer with multiple ResNet blocks."""

    def __init__(self, c1, c2, s=1, is_first=False, n=1, e=4):
        """Initializes the ResNetLayer given arguments."""
        super().__init__()
        self.is_first = is_first

        if self.is_first:
            self.layer = nn.Sequential(
                Conv(c1, c2, k=7, s=2, p=3, act=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
            )
        else:
            blocks = [ResNetBlock(c1, c2, s, e=e)]
            blocks.extend([ResNetBlock(e * c2, c2, 1, e=e) for _ in range(n - 1)])
            self.layer = nn.Sequential(*blocks)

    def forward(self, x):
        """Forward pass through the ResNet layer."""
        return self.layer(x)

__init__(c1, c2, s=1, is_first=False, n=1, e=4)

Initialiseert de ResNetLayer op basis van argumenten.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, s=1, is_first=False, n=1, e=4):
    """Initializes the ResNetLayer given arguments."""
    super().__init__()
    self.is_first = is_first

    if self.is_first:
        self.layer = nn.Sequential(
            Conv(c1, c2, k=7, s=2, p=3, act=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
    else:
        blocks = [ResNetBlock(c1, c2, s, e=e)]
        blocks.extend([ResNetBlock(e * c2, c2, 1, e=e) for _ in range(n - 1)])
        self.layer = nn.Sequential(*blocks)

forward(x)

Voorwaarts door de ResNet-laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through the ResNet layer."""
    return self.layer(x)



ultralytics.nn.modules.block.MaxSigmoidAttnBlock

Basis: Module

Max Sigmoid aandachtsblok.

Broncode in ultralytics/nn/modules/block.py
class MaxSigmoidAttnBlock(nn.Module):
    """Max Sigmoid attention block."""

    def __init__(self, c1, c2, nh=1, ec=128, gc=512, scale=False):
        """Initializes MaxSigmoidAttnBlock with specified arguments."""
        super().__init__()
        self.nh = nh
        self.hc = c2 // nh
        self.ec = Conv(c1, ec, k=1, act=False) if c1 != ec else None
        self.gl = nn.Linear(gc, ec)
        self.bias = nn.Parameter(torch.zeros(nh))
        self.proj_conv = Conv(c1, c2, k=3, s=1, act=False)
        self.scale = nn.Parameter(torch.ones(1, nh, 1, 1)) if scale else 1.0

    def forward(self, x, guide):
        """Forward process."""
        bs, _, h, w = x.shape

        guide = self.gl(guide)
        guide = guide.view(bs, -1, self.nh, self.hc)
        embed = self.ec(x) if self.ec is not None else x
        embed = embed.view(bs, self.nh, self.hc, h, w)

        aw = torch.einsum("bmchw,bnmc->bmhwn", embed, guide)
        aw = aw.max(dim=-1)[0]
        aw = aw / (self.hc**0.5)
        aw = aw + self.bias[None, :, None, None]
        aw = aw.sigmoid() * self.scale

        x = self.proj_conv(x)
        x = x.view(bs, self.nh, -1, h, w)
        x = x * aw.unsqueeze(2)
        return x.view(bs, -1, h, w)

__init__(c1, c2, nh=1, ec=128, gc=512, scale=False)

Initialiseert MaxSigmoidAttnBlock met opgegeven argumenten.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, nh=1, ec=128, gc=512, scale=False):
    """Initializes MaxSigmoidAttnBlock with specified arguments."""
    super().__init__()
    self.nh = nh
    self.hc = c2 // nh
    self.ec = Conv(c1, ec, k=1, act=False) if c1 != ec else None
    self.gl = nn.Linear(gc, ec)
    self.bias = nn.Parameter(torch.zeros(nh))
    self.proj_conv = Conv(c1, c2, k=3, s=1, act=False)
    self.scale = nn.Parameter(torch.ones(1, nh, 1, 1)) if scale else 1.0

forward(x, guide)

Voorwaarts proces.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x, guide):
    """Forward process."""
    bs, _, h, w = x.shape

    guide = self.gl(guide)
    guide = guide.view(bs, -1, self.nh, self.hc)
    embed = self.ec(x) if self.ec is not None else x
    embed = embed.view(bs, self.nh, self.hc, h, w)

    aw = torch.einsum("bmchw,bnmc->bmhwn", embed, guide)
    aw = aw.max(dim=-1)[0]
    aw = aw / (self.hc**0.5)
    aw = aw + self.bias[None, :, None, None]
    aw = aw.sigmoid() * self.scale

    x = self.proj_conv(x)
    x = x.view(bs, self.nh, -1, h, w)
    x = x * aw.unsqueeze(2)
    return x.view(bs, -1, h, w)



ultralytics.nn.modules.block.C2fAttn

Basis: Module

C2f module met een extra attn module.

Broncode in ultralytics/nn/modules/block.py
class C2fAttn(nn.Module):
    """C2f module with an additional attn module."""

    def __init__(self, c1, c2, n=1, ec=128, nh=1, gc=512, shortcut=False, g=1, e=0.5):
        """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
        expansion.
        """
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((3 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
        self.attn = MaxSigmoidAttnBlock(self.c, self.c, gc=gc, ec=ec, nh=nh)

    def forward(self, x, guide):
        """Forward pass through C2f layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        y.append(self.attn(y[-1], guide))
        return self.cv2(torch.cat(y, 1))

    def forward_split(self, x, guide):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in self.m)
        y.append(self.attn(y[-1], guide))
        return self.cv2(torch.cat(y, 1))

__init__(c1, c2, n=1, ec=128, nh=1, gc=512, shortcut=False, g=1, e=0.5)

Initialiseer de CSP-bottlenecklaag met twee convoluties met argumenten ch_in, ch_out, aantal, snelkoppeling, groepen, uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, ec=128, nh=1, gc=512, shortcut=False, g=1, e=0.5):
    """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
    expansion.
    """
    super().__init__()
    self.c = int(c2 * e)  # hidden channels
    self.cv1 = Conv(c1, 2 * self.c, 1, 1)
    self.cv2 = Conv((3 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
    self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
    self.attn = MaxSigmoidAttnBlock(self.c, self.c, gc=gc, ec=ec, nh=nh)

forward(x, guide)

Voorwaartse doorgang door C2f-laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x, guide):
    """Forward pass through C2f layer."""
    y = list(self.cv1(x).chunk(2, 1))
    y.extend(m(y[-1]) for m in self.m)
    y.append(self.attn(y[-1], guide))
    return self.cv2(torch.cat(y, 1))

forward_split(x, guide)

Forward pass met split() in plaats van chunk().

Broncode in ultralytics/nn/modules/block.py
def forward_split(self, x, guide):
    """Forward pass using split() instead of chunk()."""
    y = list(self.cv1(x).split((self.c, self.c), 1))
    y.extend(m(y[-1]) for m in self.m)
    y.append(self.attn(y[-1], guide))
    return self.cv2(torch.cat(y, 1))



ultralytics.nn.modules.block.ImagePoolingAttn

Basis: Module

ImagePoolingAttn: Verbeter de tekstinbeddingen met beeldbewuste informatie.

Broncode in ultralytics/nn/modules/block.py
class ImagePoolingAttn(nn.Module):
    """ImagePoolingAttn: Enhance the text embeddings with image-aware information."""

    def __init__(self, ec=256, ch=(), ct=512, nh=8, k=3, scale=False):
        """Initializes ImagePoolingAttn with specified arguments."""
        super().__init__()

        nf = len(ch)
        self.query = nn.Sequential(nn.LayerNorm(ct), nn.Linear(ct, ec))
        self.key = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
        self.value = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
        self.proj = nn.Linear(ec, ct)
        self.scale = nn.Parameter(torch.tensor([0.0]), requires_grad=True) if scale else 1.0
        self.projections = nn.ModuleList([nn.Conv2d(in_channels, ec, kernel_size=1) for in_channels in ch])
        self.im_pools = nn.ModuleList([nn.AdaptiveMaxPool2d((k, k)) for _ in range(nf)])
        self.ec = ec
        self.nh = nh
        self.nf = nf
        self.hc = ec // nh
        self.k = k

    def forward(self, x, text):
        """Executes attention mechanism on input tensor x and guide tensor."""
        bs = x[0].shape[0]
        assert len(x) == self.nf
        num_patches = self.k**2
        x = [pool(proj(x)).view(bs, -1, num_patches) for (x, proj, pool) in zip(x, self.projections, self.im_pools)]
        x = torch.cat(x, dim=-1).transpose(1, 2)
        q = self.query(text)
        k = self.key(x)
        v = self.value(x)

        # q = q.reshape(1, text.shape[1], self.nh, self.hc).repeat(bs, 1, 1, 1)
        q = q.reshape(bs, -1, self.nh, self.hc)
        k = k.reshape(bs, -1, self.nh, self.hc)
        v = v.reshape(bs, -1, self.nh, self.hc)

        aw = torch.einsum("bnmc,bkmc->bmnk", q, k)
        aw = aw / (self.hc**0.5)
        aw = F.softmax(aw, dim=-1)

        x = torch.einsum("bmnk,bkmc->bnmc", aw, v)
        x = self.proj(x.reshape(bs, -1, self.ec))
        return x * self.scale + text

__init__(ec=256, ch=(), ct=512, nh=8, k=3, scale=False)

Initialiseert ImagePoolingAttn met opgegeven argumenten.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, ec=256, ch=(), ct=512, nh=8, k=3, scale=False):
    """Initializes ImagePoolingAttn with specified arguments."""
    super().__init__()

    nf = len(ch)
    self.query = nn.Sequential(nn.LayerNorm(ct), nn.Linear(ct, ec))
    self.key = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
    self.value = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec))
    self.proj = nn.Linear(ec, ct)
    self.scale = nn.Parameter(torch.tensor([0.0]), requires_grad=True) if scale else 1.0
    self.projections = nn.ModuleList([nn.Conv2d(in_channels, ec, kernel_size=1) for in_channels in ch])
    self.im_pools = nn.ModuleList([nn.AdaptiveMaxPool2d((k, k)) for _ in range(nf)])
    self.ec = ec
    self.nh = nh
    self.nf = nf
    self.hc = ec // nh
    self.k = k

forward(x, text)

Voert aandachtsmechanisme uit op invoer tensor x en gids tensor.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x, text):
    """Executes attention mechanism on input tensor x and guide tensor."""
    bs = x[0].shape[0]
    assert len(x) == self.nf
    num_patches = self.k**2
    x = [pool(proj(x)).view(bs, -1, num_patches) for (x, proj, pool) in zip(x, self.projections, self.im_pools)]
    x = torch.cat(x, dim=-1).transpose(1, 2)
    q = self.query(text)
    k = self.key(x)
    v = self.value(x)

    # q = q.reshape(1, text.shape[1], self.nh, self.hc).repeat(bs, 1, 1, 1)
    q = q.reshape(bs, -1, self.nh, self.hc)
    k = k.reshape(bs, -1, self.nh, self.hc)
    v = v.reshape(bs, -1, self.nh, self.hc)

    aw = torch.einsum("bnmc,bkmc->bmnk", q, k)
    aw = aw / (self.hc**0.5)
    aw = F.softmax(aw, dim=-1)

    x = torch.einsum("bmnk,bkmc->bnmc", aw, v)
    x = self.proj(x.reshape(bs, -1, self.ec))
    return x * self.scale + text



ultralytics.nn.modules.block.ContrastiveHead

Basis: Module

Contrastive Head for YOLO-World berekent de regio-tekstscores op basis van de overeenkomst tussen beeld- en tekstkenmerken. kenmerken.

Broncode in ultralytics/nn/modules/block.py
class ContrastiveHead(nn.Module):
    """Contrastive Head for YOLO-World compute the region-text scores according to the similarity between image and text
    features.
    """

    def __init__(self):
        """Initializes ContrastiveHead with specified region-text similarity parameters."""
        super().__init__()
        # NOTE: use -10.0 to keep the init cls loss consistency with other losses
        self.bias = nn.Parameter(torch.tensor([-10.0]))
        self.logit_scale = nn.Parameter(torch.ones([]) * torch.tensor(1 / 0.07).log())

    def forward(self, x, w):
        """Forward function of contrastive learning."""
        x = F.normalize(x, dim=1, p=2)
        w = F.normalize(w, dim=-1, p=2)
        x = torch.einsum("bchw,bkc->bkhw", x, w)
        return x * self.logit_scale.exp() + self.bias

__init__()

Initialiseert ContrastiveHead met opgegeven regio-tekst overeenkomstparameters.

Broncode in ultralytics/nn/modules/block.py
def __init__(self):
    """Initializes ContrastiveHead with specified region-text similarity parameters."""
    super().__init__()
    # NOTE: use -10.0 to keep the init cls loss consistency with other losses
    self.bias = nn.Parameter(torch.tensor([-10.0]))
    self.logit_scale = nn.Parameter(torch.ones([]) * torch.tensor(1 / 0.07).log())

forward(x, w)

Voorwaartse functie van contrastief leren.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x, w):
    """Forward function of contrastive learning."""
    x = F.normalize(x, dim=1, p=2)
    w = F.normalize(w, dim=-1, p=2)
    x = torch.einsum("bchw,bkc->bkhw", x, w)
    return x * self.logit_scale.exp() + self.bias



ultralytics.nn.modules.block.BNContrastiveHead

Basis: Module

Batch Norm Contrastief Hoofd voor YOLO-Wereld met gebruik van batch norm in plaats van l2-normalisatie.

Parameters:

Naam Type Beschrijving Standaard
embed_dims int

Afmetingen van tekst- en afbeeldingskenmerken insluiten.

vereist
Broncode in ultralytics/nn/modules/block.py
class BNContrastiveHead(nn.Module):
    """
    Batch Norm Contrastive Head for YOLO-World using batch norm instead of l2-normalization.

    Args:
        embed_dims (int): Embed dimensions of text and image features.
    """

    def __init__(self, embed_dims: int):
        """Initialize ContrastiveHead with region-text similarity parameters."""
        super().__init__()
        self.norm = nn.BatchNorm2d(embed_dims)
        # NOTE: use -10.0 to keep the init cls loss consistency with other losses
        self.bias = nn.Parameter(torch.tensor([-10.0]))
        # use -1.0 is more stable
        self.logit_scale = nn.Parameter(-1.0 * torch.ones([]))

    def forward(self, x, w):
        """Forward function of contrastive learning."""
        x = self.norm(x)
        w = F.normalize(w, dim=-1, p=2)
        x = torch.einsum("bchw,bkc->bkhw", x, w)
        return x * self.logit_scale.exp() + self.bias

__init__(embed_dims)

Initialiseer ContrastiveHead met regio-tekst overeenkomstparameters.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, embed_dims: int):
    """Initialize ContrastiveHead with region-text similarity parameters."""
    super().__init__()
    self.norm = nn.BatchNorm2d(embed_dims)
    # NOTE: use -10.0 to keep the init cls loss consistency with other losses
    self.bias = nn.Parameter(torch.tensor([-10.0]))
    # use -1.0 is more stable
    self.logit_scale = nn.Parameter(-1.0 * torch.ones([]))

forward(x, w)

Voorwaartse functie van contrastief leren.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x, w):
    """Forward function of contrastive learning."""
    x = self.norm(x)
    w = F.normalize(w, dim=-1, p=2)
    x = torch.einsum("bchw,bkc->bkhw", x, w)
    return x * self.logit_scale.exp() + self.bias



ultralytics.nn.modules.block.RepBottleneck

Basis: Bottleneck

Rep knelpunt.

Broncode in ultralytics/nn/modules/block.py
class RepBottleneck(Bottleneck):
    """Rep bottleneck."""

    def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
        """Initializes a RepBottleneck module with customizable in/out channels, shortcut option, groups and expansion
        ratio.
        """
        super().__init__(c1, c2, shortcut, g, k, e)
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = RepConv(c1, c_, k[0], 1)

__init__(c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5)

Initialiseert een RepBottleneck module met aanpasbare in/uit kanalen, snelkoppelingsoptie, groepen en uitbreiding verhouding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
    """Initializes a RepBottleneck module with customizable in/out channels, shortcut option, groups and expansion
    ratio.
    """
    super().__init__(c1, c2, shortcut, g, k, e)
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = RepConv(c1, c_, k[0], 1)



ultralytics.nn.modules.block.RepCSP

Basis: C3

Rep CSP Bottleneck met 3 convoluties.

Broncode in ultralytics/nn/modules/block.py
class RepCSP(C3):
    """Rep CSP Bottleneck with 3 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initializes RepCSP layer with given channels, repetitions, shortcut, groups and expansion ratio."""
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)  # hidden channels
        self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))

__init__(c1, c2, n=1, shortcut=True, g=1, e=0.5)

Initialiseert RepCSP laag met gegeven kanalen, herhalingen, snelkoppelingen, groepen en uitbreidingsratio.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
    """Initializes RepCSP layer with given channels, repetitions, shortcut, groups and expansion ratio."""
    super().__init__(c1, c2, n, shortcut, g, e)
    c_ = int(c2 * e)  # hidden channels
    self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))



ultralytics.nn.modules.block.RepNCSPELAN4

Basis: Module

CSP-ELAN.

Broncode in ultralytics/nn/modules/block.py
class RepNCSPELAN4(nn.Module):
    """CSP-ELAN."""

    def __init__(self, c1, c2, c3, c4, n=1):
        """Initializes CSP-ELAN layer with specified channel sizes, repetitions, and convolutions."""
        super().__init__()
        self.c = c3 // 2
        self.cv1 = Conv(c1, c3, 1, 1)
        self.cv2 = nn.Sequential(RepCSP(c3 // 2, c4, n), Conv(c4, c4, 3, 1))
        self.cv3 = nn.Sequential(RepCSP(c4, c4, n), Conv(c4, c4, 3, 1))
        self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1)

    def forward(self, x):
        """Forward pass through RepNCSPELAN4 layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend((m(y[-1])) for m in [self.cv2, self.cv3])
        return self.cv4(torch.cat(y, 1))

    def forward_split(self, x):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in [self.cv2, self.cv3])
        return self.cv4(torch.cat(y, 1))

__init__(c1, c2, c3, c4, n=1)

Initialiseert de CSP-ELAN laag met gespecificeerde kanaalgroottes, herhalingen en convoluties.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, c3, c4, n=1):
    """Initializes CSP-ELAN layer with specified channel sizes, repetitions, and convolutions."""
    super().__init__()
    self.c = c3 // 2
    self.cv1 = Conv(c1, c3, 1, 1)
    self.cv2 = nn.Sequential(RepCSP(c3 // 2, c4, n), Conv(c4, c4, 3, 1))
    self.cv3 = nn.Sequential(RepCSP(c4, c4, n), Conv(c4, c4, 3, 1))
    self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1)

forward(x)

Voorwaarts door laag RepNCSPELAN4.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through RepNCSPELAN4 layer."""
    y = list(self.cv1(x).chunk(2, 1))
    y.extend((m(y[-1])) for m in [self.cv2, self.cv3])
    return self.cv4(torch.cat(y, 1))

forward_split(x)

Forward pass met split() in plaats van chunk().

Broncode in ultralytics/nn/modules/block.py
def forward_split(self, x):
    """Forward pass using split() instead of chunk()."""
    y = list(self.cv1(x).split((self.c, self.c), 1))
    y.extend(m(y[-1]) for m in [self.cv2, self.cv3])
    return self.cv4(torch.cat(y, 1))



ultralytics.nn.modules.block.ELAN1

Basis: RepNCSPELAN4

ELAN1 module met 4 windingen.

Broncode in ultralytics/nn/modules/block.py
class ELAN1(RepNCSPELAN4):
    """ELAN1 module with 4 convolutions."""

    def __init__(self, c1, c2, c3, c4):
        """Initializes ELAN1 layer with specified channel sizes."""
        super().__init__(c1, c2, c3, c4)
        self.c = c3 // 2
        self.cv1 = Conv(c1, c3, 1, 1)
        self.cv2 = Conv(c3 // 2, c4, 3, 1)
        self.cv3 = Conv(c4, c4, 3, 1)
        self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1)

__init__(c1, c2, c3, c4)

Initialiseert de ELAN1-laag met gespecificeerde kanaalgroottes.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, c3, c4):
    """Initializes ELAN1 layer with specified channel sizes."""
    super().__init__(c1, c2, c3, c4)
    self.c = c3 // 2
    self.cv1 = Conv(c1, c3, 1, 1)
    self.cv2 = Conv(c3 // 2, c4, 3, 1)
    self.cv3 = Conv(c4, c4, 3, 1)
    self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1)



ultralytics.nn.modules.block.AConv

Basis: Module

AConv.

Broncode in ultralytics/nn/modules/block.py
class AConv(nn.Module):
    """AConv."""

    def __init__(self, c1, c2):
        """Initializes AConv module with convolution layers."""
        super().__init__()
        self.cv1 = Conv(c1, c2, 3, 2, 1)

    def forward(self, x):
        """Forward pass through AConv layer."""
        x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True)
        return self.cv1(x)

__init__(c1, c2)

Initialiseert AConv-module met convolutielagen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2):
    """Initializes AConv module with convolution layers."""
    super().__init__()
    self.cv1 = Conv(c1, c2, 3, 2, 1)

forward(x)

Voorwaartse doorgang door AConv-laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through AConv layer."""
    x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True)
    return self.cv1(x)



ultralytics.nn.modules.block.ADown

Basis: Module

Naar beneden.

Broncode in ultralytics/nn/modules/block.py
class ADown(nn.Module):
    """ADown."""

    def __init__(self, c1, c2):
        """Initializes ADown module with convolution layers to downsample input from channels c1 to c2."""
        super().__init__()
        self.c = c2 // 2
        self.cv1 = Conv(c1 // 2, self.c, 3, 2, 1)
        self.cv2 = Conv(c1 // 2, self.c, 1, 1, 0)

    def forward(self, x):
        """Forward pass through ADown layer."""
        x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True)
        x1, x2 = x.chunk(2, 1)
        x1 = self.cv1(x1)
        x2 = torch.nn.functional.max_pool2d(x2, 3, 2, 1)
        x2 = self.cv2(x2)
        return torch.cat((x1, x2), 1)

__init__(c1, c2)

Initialiseert ADown module met convolutielagen om de invoer van kanalen c1 naar c2 te downsamplen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2):
    """Initializes ADown module with convolution layers to downsample input from channels c1 to c2."""
    super().__init__()
    self.c = c2 // 2
    self.cv1 = Conv(c1 // 2, self.c, 3, 2, 1)
    self.cv2 = Conv(c1 // 2, self.c, 1, 1, 0)

forward(x)

Voorwaartse doorgang door ADown laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through ADown layer."""
    x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True)
    x1, x2 = x.chunk(2, 1)
    x1 = self.cv1(x1)
    x2 = torch.nn.functional.max_pool2d(x2, 3, 2, 1)
    x2 = self.cv2(x2)
    return torch.cat((x1, x2), 1)



ultralytics.nn.modules.block.SPPELAN

Basis: Module

SPP-ELAN.

Broncode in ultralytics/nn/modules/block.py
class SPPELAN(nn.Module):
    """SPP-ELAN."""

    def __init__(self, c1, c2, c3, k=5):
        """Initializes SPP-ELAN block with convolution and max pooling layers for spatial pyramid pooling."""
        super().__init__()
        self.c = c3
        self.cv1 = Conv(c1, c3, 1, 1)
        self.cv2 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
        self.cv3 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
        self.cv4 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
        self.cv5 = Conv(4 * c3, c2, 1, 1)

    def forward(self, x):
        """Forward pass through SPPELAN layer."""
        y = [self.cv1(x)]
        y.extend(m(y[-1]) for m in [self.cv2, self.cv3, self.cv4])
        return self.cv5(torch.cat(y, 1))

__init__(c1, c2, c3, k=5)

Initialiseert SPP-ELAN blok met convolutie en max pooling lagen voor ruimtelijke piramide pooling.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, c3, k=5):
    """Initializes SPP-ELAN block with convolution and max pooling layers for spatial pyramid pooling."""
    super().__init__()
    self.c = c3
    self.cv1 = Conv(c1, c3, 1, 1)
    self.cv2 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
    self.cv3 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
    self.cv4 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
    self.cv5 = Conv(4 * c3, c2, 1, 1)

forward(x)

Voorwaarts door SPPELAN laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through SPPELAN layer."""
    y = [self.cv1(x)]
    y.extend(m(y[-1]) for m in [self.cv2, self.cv3, self.cv4])
    return self.cv5(torch.cat(y, 1))



ultralytics.nn.modules.block.CBLinear

Basis: Module

CBLineair.

Broncode in ultralytics/nn/modules/block.py
class CBLinear(nn.Module):
    """CBLinear."""

    def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
        """Initializes the CBLinear module, passing inputs unchanged."""
        super(CBLinear, self).__init__()
        self.c2s = c2s
        self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)

    def forward(self, x):
        """Forward pass through CBLinear layer."""
        return self.conv(x).split(self.c2s, dim=1)

__init__(c1, c2s, k=1, s=1, p=None, g=1)

Initialiseert de CBLinear module en geeft de invoer ongewijzigd door.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
    """Initializes the CBLinear module, passing inputs unchanged."""
    super(CBLinear, self).__init__()
    self.c2s = c2s
    self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)

forward(x)

Voorwaartse doorgang door CBLineaire laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """Forward pass through CBLinear layer."""
    return self.conv(x).split(self.c2s, dim=1)



ultralytics.nn.modules.block.CBFuse

Basis: Module

CBFuse.

Broncode in ultralytics/nn/modules/block.py
class CBFuse(nn.Module):
    """CBFuse."""

    def __init__(self, idx):
        """Initializes CBFuse module with layer index for selective feature fusion."""
        super(CBFuse, self).__init__()
        self.idx = idx

    def forward(self, xs):
        """Forward pass through CBFuse layer."""
        target_size = xs[-1].shape[2:]
        res = [F.interpolate(x[self.idx[i]], size=target_size, mode="nearest") for i, x in enumerate(xs[:-1])]
        return torch.sum(torch.stack(res + xs[-1:]), dim=0)

__init__(idx)

Initialiseert CBFuse module met laagindex voor selectieve samenvoeging van eigenschappen.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, idx):
    """Initializes CBFuse module with layer index for selective feature fusion."""
    super(CBFuse, self).__init__()
    self.idx = idx

forward(xs)

Voorwaartse doorgang door CBFuse laag.

Broncode in ultralytics/nn/modules/block.py
def forward(self, xs):
    """Forward pass through CBFuse layer."""
    target_size = xs[-1].shape[2:]
    res = [F.interpolate(x[self.idx[i]], size=target_size, mode="nearest") for i, x in enumerate(xs[:-1])]
    return torch.sum(torch.stack(res + xs[-1:]), dim=0)



ultralytics.nn.modules.block.RepVGGDW

Basis: Module

RepVGGDW is een klasse die een dieptegewijs scheidbaar convolutioneel blok vertegenwoordigt in de RepVGG-architectuur.

Broncode in ultralytics/nn/modules/block.py
class RepVGGDW(torch.nn.Module):
    """RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture."""

    def __init__(self, ed) -> None:
        """Initializes RepVGGDW with depthwise separable convolutional layers for efficient processing."""
        super().__init__()
        self.conv = Conv(ed, ed, 7, 1, 3, g=ed, act=False)
        self.conv1 = Conv(ed, ed, 3, 1, 1, g=ed, act=False)
        self.dim = ed
        self.act = nn.SiLU()

    def forward(self, x):
        """
        Performs a forward pass of the RepVGGDW block.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor after applying the depth wise separable convolution.
        """
        return self.act(self.conv(x) + self.conv1(x))

    def forward_fuse(self, x):
        """
        Performs a forward pass of the RepVGGDW block without fusing the convolutions.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor after applying the depth wise separable convolution.
        """
        return self.act(self.conv(x))

    @torch.no_grad()
    def fuse(self):
        """
        Fuses the convolutional layers in the RepVGGDW block.

        This method fuses the convolutional layers and updates the weights and biases accordingly.
        """
        conv = fuse_conv_and_bn(self.conv.conv, self.conv.bn)
        conv1 = fuse_conv_and_bn(self.conv1.conv, self.conv1.bn)

        conv_w = conv.weight
        conv_b = conv.bias
        conv1_w = conv1.weight
        conv1_b = conv1.bias

        conv1_w = torch.nn.functional.pad(conv1_w, [2, 2, 2, 2])

        final_conv_w = conv_w + conv1_w
        final_conv_b = conv_b + conv1_b

        conv.weight.data.copy_(final_conv_w)
        conv.bias.data.copy_(final_conv_b)

        self.conv = conv
        del self.conv1

__init__(ed)

Initialiseert RepVGGDW met in diepte scheidbare convolutionele lagen voor een efficiënte verwerking.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, ed) -> None:
    """Initializes RepVGGDW with depthwise separable convolutional layers for efficient processing."""
    super().__init__()
    self.conv = Conv(ed, ed, 7, 1, 3, g=ed, act=False)
    self.conv1 = Conv(ed, ed, 3, 1, 1, g=ed, act=False)
    self.dim = ed
    self.act = nn.SiLU()

forward(x)

Voert een voorwaartse pass van het RepVGGDW-blok uit.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

Invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

Uitvoer tensor Na het toepassen van de diepte scheidbare convolutie.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """
    Performs a forward pass of the RepVGGDW block.

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        (torch.Tensor): Output tensor after applying the depth wise separable convolution.
    """
    return self.act(self.conv(x) + self.conv1(x))

forward_fuse(x)

Voert een voorwaartse doorgang van het RepVGGDW-blok uit zonder de windingen te fuseren.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

Invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

Uitvoer tensor Na het toepassen van de diepte scheidbare convolutie.

Broncode in ultralytics/nn/modules/block.py
def forward_fuse(self, x):
    """
    Performs a forward pass of the RepVGGDW block without fusing the convolutions.

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        (torch.Tensor): Output tensor after applying the depth wise separable convolution.
    """
    return self.act(self.conv(x))

fuse()

Smelt de convolutionele lagen in het RepVGGDW-blok samen.

Deze methode versmelt de convolutionele lagen en actualiseert de weights and biases dienovereenkomstig.

Broncode in ultralytics/nn/modules/block.py
@torch.no_grad()
def fuse(self):
    """
    Fuses the convolutional layers in the RepVGGDW block.

    This method fuses the convolutional layers and updates the weights and biases accordingly.
    """
    conv = fuse_conv_and_bn(self.conv.conv, self.conv.bn)
    conv1 = fuse_conv_and_bn(self.conv1.conv, self.conv1.bn)

    conv_w = conv.weight
    conv_b = conv.bias
    conv1_w = conv1.weight
    conv1_b = conv1.bias

    conv1_w = torch.nn.functional.pad(conv1_w, [2, 2, 2, 2])

    final_conv_w = conv_w + conv1_w
    final_conv_b = conv_b + conv1_b

    conv.weight.data.copy_(final_conv_w)
    conv.bias.data.copy_(final_conv_b)

    self.conv = conv
    del self.conv1



ultralytics.nn.modules.block.CIB

Basis: Module

Conditional Identity Block (CIB) module.

Parameters:

Naam Type Beschrijving Standaard
c1 int

Aantal ingangskanalen.

vereist
c2 int

Aantal uitgangskanalen.

vereist
shortcut bool

Of er een snelkoppelingsverbinding moet worden toegevoegd. De standaardwaarde is True.

True
e float

Schaalfactor voor de verborgen kanalen. De standaardwaarde is 0,5.

0.5
lk bool

Of RepVGGDW moet worden gebruikt voor de derde convolutionele laag. De standaardinstelling is onwaar.

False
Broncode in ultralytics/nn/modules/block.py
class CIB(nn.Module):
    """
    Conditional Identity Block (CIB) module.

    Args:
        c1 (int): Number of input channels.
        c2 (int): Number of output channels.
        shortcut (bool, optional): Whether to add a shortcut connection. Defaults to True.
        e (float, optional): Scaling factor for the hidden channels. Defaults to 0.5.
        lk (bool, optional): Whether to use RepVGGDW for the third convolutional layer. Defaults to False.
    """

    def __init__(self, c1, c2, shortcut=True, e=0.5, lk=False):
        """Initializes the custom model with optional shortcut, scaling factor, and RepVGGDW layer."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = nn.Sequential(
            Conv(c1, c1, 3, g=c1),
            Conv(c1, 2 * c_, 1),
            RepVGGDW(2 * c_) if lk else Conv(2 * c_, 2 * c_, 3, g=2 * c_),
            Conv(2 * c_, c2, 1),
            Conv(c2, c2, 3, g=c2),
        )

        self.add = shortcut and c1 == c2

    def forward(self, x):
        """
        Forward pass of the CIB module.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        """
        return x + self.cv1(x) if self.add else self.cv1(x)

__init__(c1, c2, shortcut=True, e=0.5, lk=False)

Initialiseert het aangepaste model met optionele snelkoppeling, schaalfactor en RepVGGDW-laag.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, shortcut=True, e=0.5, lk=False):
    """Initializes the custom model with optional shortcut, scaling factor, and RepVGGDW layer."""
    super().__init__()
    c_ = int(c2 * e)  # hidden channels
    self.cv1 = nn.Sequential(
        Conv(c1, c1, 3, g=c1),
        Conv(c1, 2 * c_, 1),
        RepVGGDW(2 * c_) if lk else Conv(2 * c_, 2 * c_, 3, g=2 * c_),
        Conv(2 * c_, c2, 1),
        Conv(c2, c2, 3, g=c2),
    )

    self.add = shortcut and c1 == c2

forward(x)

Voorwaartse doorgang van de CIB-module.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

Invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

Uitvoer tensor.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """
    Forward pass of the CIB module.

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        (torch.Tensor): Output tensor.
    """
    return x + self.cv1(x) if self.add else self.cv1(x)



ultralytics.nn.modules.block.C2fCIB

Basis: C2f

De C2fCIB-klasse vertegenwoordigt een convolutioneel blok met C2f- en CIB-modules.

Parameters:

Naam Type Beschrijving Standaard
c1 int

Aantal ingangskanalen.

vereist
c2 int

Aantal uitgangskanalen.

vereist
n int

Aantal te stapelen CIB-modules. Standaard ingesteld op 1.

1
shortcut bool

Of u een snelkoppelingsverbinding wilt gebruiken. De standaardinstelling is onwaar.

False
lk bool

Of u een lokale sleutelverbinding wilt gebruiken. De standaardinstelling is onwaar.

False
g int

Aantal groepen voor gegroepeerde convolutie. Standaard ingesteld op 1.

1
e float

Uitbreidingsverhouding voor CIB-modules. De standaardwaarde is 0,5.

0.5
Broncode in ultralytics/nn/modules/block.py
class C2fCIB(C2f):
    """
    C2fCIB class represents a convolutional block with C2f and CIB modules.

    Args:
        c1 (int): Number of input channels.
        c2 (int): Number of output channels.
        n (int, optional): Number of CIB modules to stack. Defaults to 1.
        shortcut (bool, optional): Whether to use shortcut connection. Defaults to False.
        lk (bool, optional): Whether to use local key connection. Defaults to False.
        g (int, optional): Number of groups for grouped convolution. Defaults to 1.
        e (float, optional): Expansion ratio for CIB modules. Defaults to 0.5.
    """

    def __init__(self, c1, c2, n=1, shortcut=False, lk=False, g=1, e=0.5):
        """Initializes the module with specified parameters for channel, shortcut, local key, groups, and expansion."""
        super().__init__(c1, c2, n, shortcut, g, e)
        self.m = nn.ModuleList(CIB(self.c, self.c, shortcut, e=1.0, lk=lk) for _ in range(n))

__init__(c1, c2, n=1, shortcut=False, lk=False, g=1, e=0.5)

Initialiseert de module met gespecificeerde parameters voor kanaal, snelkoppeling, lokale sleutel, groepen en uitbreiding.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, n=1, shortcut=False, lk=False, g=1, e=0.5):
    """Initializes the module with specified parameters for channel, shortcut, local key, groups, and expansion."""
    super().__init__(c1, c2, n, shortcut, g, e)
    self.m = nn.ModuleList(CIB(self.c, self.c, shortcut, e=1.0, lk=lk) for _ in range(n))



ultralytics.nn.modules.block.Attention

Basis: Module

Aandachtsmodule die zelfaandacht uitvoert op de ingang tensor.

Parameters:

Naam Type Beschrijving Standaard
dim int

De input tensor dimensie.

vereist
num_heads int

Het aantal aandachtskoppen.

8
attn_ratio float

De verhouding tussen de aandachtssleuteldimensie en de hoofddimensie.

0.5

Kenmerken:

Naam Type Beschrijving
num_heads int

Het aantal aandachtskoppen.

head_dim int

De afmeting van elke aandachtskop.

key_dim int

De afmeting van de aandachtssleutel.

scale float

De schaalfactor voor de aandachtsscores.

qkv Conv

Convolutionele laag voor het berekenen van de query, sleutel en waarde.

proj Conv

Convolutionele laag voor het projecteren van de bijgewoonde waarden.

pe Conv

Convolutionele laag voor positionele codering.

Broncode in ultralytics/nn/modules/block.py
class Attention(nn.Module):
    """
    Attention module that performs self-attention on the input tensor.

    Args:
        dim (int): The input tensor dimension.
        num_heads (int): The number of attention heads.
        attn_ratio (float): The ratio of the attention key dimension to the head dimension.

    Attributes:
        num_heads (int): The number of attention heads.
        head_dim (int): The dimension of each attention head.
        key_dim (int): The dimension of the attention key.
        scale (float): The scaling factor for the attention scores.
        qkv (Conv): Convolutional layer for computing the query, key, and value.
        proj (Conv): Convolutional layer for projecting the attended values.
        pe (Conv): Convolutional layer for positional encoding.
    """

    def __init__(self, dim, num_heads=8, attn_ratio=0.5):
        """Initializes multi-head attention module with query, key, and value convolutions and positional encoding."""
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.key_dim = int(self.head_dim * attn_ratio)
        self.scale = self.key_dim**-0.5
        nh_kd = nh_kd = self.key_dim * num_heads
        h = dim + nh_kd * 2
        self.qkv = Conv(dim, h, 1, act=False)
        self.proj = Conv(dim, dim, 1, act=False)
        self.pe = Conv(dim, dim, 3, 1, g=dim, act=False)

    def forward(self, x):
        """
        Forward pass of the Attention module.

        Args:
            x (torch.Tensor): The input tensor.

        Returns:
            (torch.Tensor): The output tensor after self-attention.
        """
        B, C, H, W = x.shape
        N = H * W
        qkv = self.qkv(x)
        q, k, v = qkv.view(B, self.num_heads, self.key_dim * 2 + self.head_dim, N).split(
            [self.key_dim, self.key_dim, self.head_dim], dim=2
        )

        attn = (q.transpose(-2, -1) @ k) * self.scale
        attn = attn.softmax(dim=-1)
        x = (v @ attn.transpose(-2, -1)).view(B, C, H, W) + self.pe(v.reshape(B, C, H, W))
        x = self.proj(x)
        return x

__init__(dim, num_heads=8, attn_ratio=0.5)

Initialiseert de meerkoppige aandachtsmodule met query-, sleutel- en waardeconvoluties en positionele codering.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, dim, num_heads=8, attn_ratio=0.5):
    """Initializes multi-head attention module with query, key, and value convolutions and positional encoding."""
    super().__init__()
    self.num_heads = num_heads
    self.head_dim = dim // num_heads
    self.key_dim = int(self.head_dim * attn_ratio)
    self.scale = self.key_dim**-0.5
    nh_kd = nh_kd = self.key_dim * num_heads
    h = dim + nh_kd * 2
    self.qkv = Conv(dim, h, 1, act=False)
    self.proj = Conv(dim, dim, 1, act=False)
    self.pe = Conv(dim, dim, 3, 1, g=dim, act=False)

forward(x)

Voorwaartse doorgang van de Attention-module.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

De invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

De uitvoer tensor na zelfaandacht.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """
    Forward pass of the Attention module.

    Args:
        x (torch.Tensor): The input tensor.

    Returns:
        (torch.Tensor): The output tensor after self-attention.
    """
    B, C, H, W = x.shape
    N = H * W
    qkv = self.qkv(x)
    q, k, v = qkv.view(B, self.num_heads, self.key_dim * 2 + self.head_dim, N).split(
        [self.key_dim, self.key_dim, self.head_dim], dim=2
    )

    attn = (q.transpose(-2, -1) @ k) * self.scale
    attn = attn.softmax(dim=-1)
    x = (v @ attn.transpose(-2, -1)).view(B, C, H, W) + self.pe(v.reshape(B, C, H, W))
    x = self.proj(x)
    return x



ultralytics.nn.modules.block.PSA

Basis: Module

Position-wise Spatial Attention module.

Parameters:

Naam Type Beschrijving Standaard
c1 int

Aantal ingangskanalen.

vereist
c2 int

Aantal uitgangskanalen.

vereist
e float

Expansiefactor voor de tussenliggende kanalen. De standaardwaarde is 0,5.

0.5

Kenmerken:

Naam Type Beschrijving
c int

Aantal tussenliggende kanalen.

cv1 Conv

1x1 convolutielaag om het aantal ingangskanalen terug te brengen tot 2*c.

cv2 Conv

1x1 convolutielaag om het aantal uitgangskanalen terug te brengen tot c.

attn Attention

Aandachtsmodule voor ruimtelijke aandacht.

ffn Sequential

Feed-forward netwerkmodule.

Broncode in ultralytics/nn/modules/block.py
class PSA(nn.Module):
    """
    Position-wise Spatial Attention module.

    Args:
        c1 (int): Number of input channels.
        c2 (int): Number of output channels.
        e (float): Expansion factor for the intermediate channels. Default is 0.5.

    Attributes:
        c (int): Number of intermediate channels.
        cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c.
        cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c.
        attn (Attention): Attention module for spatial attention.
        ffn (nn.Sequential): Feed-forward network module.
    """

    def __init__(self, c1, c2, e=0.5):
        """Initializes convolution layers, attention module, and feed-forward network with channel reduction."""
        super().__init__()
        assert c1 == c2
        self.c = int(c1 * e)
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv(2 * self.c, c1, 1)

        self.attn = Attention(self.c, attn_ratio=0.5, num_heads=self.c // 64)
        self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False))

    def forward(self, x):
        """
        Forward pass of the PSA module.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor.
        """
        a, b = self.cv1(x).split((self.c, self.c), dim=1)
        b = b + self.attn(b)
        b = b + self.ffn(b)
        return self.cv2(torch.cat((a, b), 1))

__init__(c1, c2, e=0.5)

Initialiseert convolutielagen, aandachtsmodule en feed-forward-netwerk met kanaalreductie.

Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, e=0.5):
    """Initializes convolution layers, attention module, and feed-forward network with channel reduction."""
    super().__init__()
    assert c1 == c2
    self.c = int(c1 * e)
    self.cv1 = Conv(c1, 2 * self.c, 1, 1)
    self.cv2 = Conv(2 * self.c, c1, 1)

    self.attn = Attention(self.c, attn_ratio=0.5, num_heads=self.c // 64)
    self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False))

forward(x)

Voorwaartse doorgang van de PSA-module.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

Invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

Uitvoer tensor.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """
    Forward pass of the PSA module.

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        (torch.Tensor): Output tensor.
    """
    a, b = self.cv1(x).split((self.c, self.c), dim=1)
    b = b + self.attn(b)
    b = b + self.ffn(b)
    return self.cv2(torch.cat((a, b), 1))



ultralytics.nn.modules.block.SCDown

Basis: Module

Broncode in ultralytics/nn/modules/block.py
class SCDown(nn.Module):
    def __init__(self, c1, c2, k, s):
        """
        Spatial Channel Downsample (SCDown) module.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            k (int): Kernel size for the convolutional layer.
            s (int): Stride for the convolutional layer.
        """
        super().__init__()
        self.cv1 = Conv(c1, c2, 1, 1)
        self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False)

    def forward(self, x):
        """
        Forward pass of the SCDown module.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            (torch.Tensor): Output tensor after applying the SCDown module.
        """
        return self.cv2(self.cv1(x))

__init__(c1, c2, k, s)

Spatial Channel Downsample (SCDown)-module.

Parameters:

Naam Type Beschrijving Standaard
c1 int

Aantal ingangskanalen.

vereist
c2 int

Aantal uitgangskanalen.

vereist
k int

Kernelgrootte voor de convolutionele laag.

vereist
s int

Pas voor de convolutionele laag.

vereist
Broncode in ultralytics/nn/modules/block.py
def __init__(self, c1, c2, k, s):
    """
    Spatial Channel Downsample (SCDown) module.

    Args:
        c1 (int): Number of input channels.
        c2 (int): Number of output channels.
        k (int): Kernel size for the convolutional layer.
        s (int): Stride for the convolutional layer.
    """
    super().__init__()
    self.cv1 = Conv(c1, c2, 1, 1)
    self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False)

forward(x)

Voorwaartse doorgang van de SCDown-module.

Parameters:

Naam Type Beschrijving Standaard
x Tensor

Invoer tensor.

vereist

Retourneert:

Type Beschrijving
Tensor

Uitvoer tensor na het toepassen van de SCDown-module.

Broncode in ultralytics/nn/modules/block.py
def forward(self, x):
    """
    Forward pass of the SCDown module.

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        (torch.Tensor): Output tensor after applying the SCDown module.
    """
    return self.cv2(self.cv1(x))





Aangemaakt 2023-11-12, Bijgewerkt 2024-06-20
Auteurs: Burhan-Q (2), Lachen-q (3), glenn-jocher (7)