рд╕рд╛рдордЧреНрд░реА рдкрд░ рдЬрд╛рдПрдВ

рдХреЗ рд▓рд┐рдП рд╕рдВрджрд░реНрдн ultralytics/nn/modules/transformer.py

рдиреЛрдЯ

рдпрд╣ рдлрд╝рд╛рдЗрд▓ рдпрд╣рд╛рдБ рдЙрдкрд▓рдмреНрдз рд╣реИ https://github.com/ultralytics/ultralytics/рдмреВрдБрдж/рдореБрдЦреНрдп/ultralytics/nn/рдореЙрдбреНрдпреВрд▓/transformer.py. рдпрджрд┐ рдЖрдк рдХреЛрдИ рд╕рдорд╕реНрдпрд╛ рджреЗрдЦрддреЗ рд╣реИрдВ рддреЛ рдХреГрдкрдпрд╛ рдкреБрд▓ рдЕрдиреБрд░реЛрдз рдХрд╛ рдпреЛрдЧрджрд╛рди рдХрд░рдХреЗ рдЗрд╕реЗ рдареАрдХ рдХрд░рдиреЗ рдореЗрдВ рдорджрдж рдХрд░реЗрдВ ЁЯЫая╕Пред ЁЯЩП рдзрдиреНрдпрд╡рд╛рдж !



ultralytics.nn.modules.transformer.TransformerEncoderLayer

рдХрд╛ рд░реВрдк: Module

рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдПрдирдХреЛрдбрд░ рдХреА рдПрдХ рдкрд░рдд рдХреЛ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class TransformerEncoderLayer(nn.Module):
    """Defines a single layer of the transformer encoder."""

    def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False):
        """Initialize the TransformerEncoderLayer with specified parameters."""
        super().__init__()
        from ...utils.torch_utils import TORCH_1_9

        if not TORCH_1_9:
            raise ModuleNotFoundError(
                "TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)."
            )
        self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
        # Implementation of Feedforward model
        self.fc1 = nn.Linear(c1, cm)
        self.fc2 = nn.Linear(cm, c1)

        self.norm1 = nn.LayerNorm(c1)
        self.norm2 = nn.LayerNorm(c1)
        self.dropout = nn.Dropout(dropout)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.act = act
        self.normalize_before = normalize_before

    @staticmethod
    def with_pos_embed(tensor, pos=None):
        """Add position embeddings to the tensor if provided."""
        return tensor if pos is None else tensor + pos

    def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
        """Performs forward pass with post-normalization."""
        q = k = self.with_pos_embed(src, pos)
        src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
        src = src + self.dropout1(src2)
        src = self.norm1(src)
        src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
        src = src + self.dropout2(src2)
        return self.norm2(src)

    def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
        """Performs forward pass with pre-normalization."""
        src2 = self.norm1(src)
        q = k = self.with_pos_embed(src2, pos)
        src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
        src = src + self.dropout1(src2)
        src2 = self.norm2(src)
        src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
        return src + self.dropout2(src2)

    def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
        """Forward propagates the input through the encoder module."""
        if self.normalize_before:
            return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
        return self.forward_post(src, src_mask, src_key_padding_mask, pos)

__init__(c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False)

рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде TransformerEncoderLayer рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False):
    """Initialize the TransformerEncoderLayer with specified parameters."""
    super().__init__()
    from ...utils.torch_utils import TORCH_1_9

    if not TORCH_1_9:
        raise ModuleNotFoundError(
            "TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)."
        )
    self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
    # Implementation of Feedforward model
    self.fc1 = nn.Linear(c1, cm)
    self.fc2 = nn.Linear(cm, c1)

    self.norm1 = nn.LayerNorm(c1)
    self.norm2 = nn.LayerNorm(c1)
    self.dropout = nn.Dropout(dropout)
    self.dropout1 = nn.Dropout(dropout)
    self.dropout2 = nn.Dropout(dropout)

    self.act = act
    self.normalize_before = normalize_before

forward(src, src_mask=None, src_key_padding_mask=None, pos=None)

рдлреЙрд░рд╡рд░реНрдб рдПрдирдХреЛрдбрд░ рдореЙрдбреНрдпреВрд▓ рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рдЗрдирдкреБрдЯ рдХрд╛ рдкреНрд░рдЪрд╛рд░ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
    """Forward propagates the input through the encoder module."""
    if self.normalize_before:
        return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
    return self.forward_post(src, src_mask, src_key_padding_mask, pos)

forward_post(src, src_mask=None, src_key_padding_mask=None, pos=None)

рд╕рд╛рдорд╛рдиреНрдпреАрдХрд░рдг рдХреЗ рдмрд╛рдж рдХреЗ рд╕рд╛рде рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
    """Performs forward pass with post-normalization."""
    q = k = self.with_pos_embed(src, pos)
    src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
    src = src + self.dropout1(src2)
    src = self.norm1(src)
    src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
    src = src + self.dropout2(src2)
    return self.norm2(src)

forward_pre(src, src_mask=None, src_key_padding_mask=None, pos=None)

рдкреВрд░реНрд╡-рд╕рд╛рдорд╛рдиреНрдпреАрдХрд░рдг рдХреЗ рд╕рд╛рде рдЖрдЧреЗ рдкрд╛рд╕ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
    """Performs forward pass with pre-normalization."""
    src2 = self.norm1(src)
    q = k = self.with_pos_embed(src2, pos)
    src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
    src = src + self.dropout1(src2)
    src2 = self.norm2(src)
    src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
    return src + self.dropout2(src2)

with_pos_embed(tensor, pos=None) staticmethod

рд╕реНрдерд┐рддрд┐ рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдЬреЛрдбрд╝реЗрдВ tensor рдпрджрд┐ рдкреНрд░рджрд╛рди рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
@staticmethod
def with_pos_embed(tensor, pos=None):
    """Add position embeddings to the tensor if provided."""
    return tensor if pos is None else tensor + pos



ultralytics.nn.modules.transformer.AIFI

рдХрд╛ рд░реВрдк: TransformerEncoderLayer

рдПрдЖрдИрдПрдлрдЖрдИ рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдкрд░рдд рдХреЛ рдкрд░рд┐рднрд╛рд╖рд┐рдд рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class AIFI(TransformerEncoderLayer):
    """Defines the AIFI transformer layer."""

    def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False):
        """Initialize the AIFI instance with specified parameters."""
        super().__init__(c1, cm, num_heads, dropout, act, normalize_before)

    def forward(self, x):
        """Forward pass for the AIFI transformer layer."""
        c, h, w = x.shape[1:]
        pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
        # Flatten [B, C, H, W] to [B, HxW, C]
        x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
        return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous()

    @staticmethod
    def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0):
        """Builds 2D sine-cosine position embedding."""
        assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
        grid_w = torch.arange(w, dtype=torch.float32)
        grid_h = torch.arange(h, dtype=torch.float32)
        grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij")
        pos_dim = embed_dim // 4
        omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
        omega = 1.0 / (temperature**omega)

        out_w = grid_w.flatten()[..., None] @ omega[None]
        out_h = grid_h.flatten()[..., None] @ omega[None]

        return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None]

__init__(c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False)

рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде рдПрдЖрдИрдПрдлрдЖрдИ рдЙрджрд╛рд╣рд░рдг рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False):
    """Initialize the AIFI instance with specified parameters."""
    super().__init__(c1, cm, num_heads, dropout, act, normalize_before)

build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0) staticmethod

2D рд╕рд╛рдЗрди-рдХреЛрд╕рд╛рдЗрди рд╕реНрдерд┐рддрд┐ рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдмрдирд╛рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
@staticmethod
def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0):
    """Builds 2D sine-cosine position embedding."""
    assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
    grid_w = torch.arange(w, dtype=torch.float32)
    grid_h = torch.arange(h, dtype=torch.float32)
    grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij")
    pos_dim = embed_dim // 4
    omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
    omega = 1.0 / (temperature**omega)

    out_w = grid_w.flatten()[..., None] @ omega[None]
    out_h = grid_h.flatten()[..., None] @ omega[None]

    return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None]

forward(x)

рдПрдЖрдИрдПрдлрдЖрдИ рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдкрд░рдд рдХреЗ рд▓рд┐рдП рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x):
    """Forward pass for the AIFI transformer layer."""
    c, h, w = x.shape[1:]
    pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
    # Flatten [B, C, H, W] to [B, HxW, C]
    x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
    return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous()



ultralytics.nn.modules.transformer.TransformerLayer

рдХрд╛ рд░реВрдк: Module

рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдкрд░рдд https://arxiv.org/abs/2010.11929 (рдмреЗрд╣рддрд░ рдкреНрд░рджрд░реНрд╢рди рдХреЗ рд▓рд┐рдП рд▓реЗрдпрд░рдиреЙрд░реНрдо рдкрд░рддреЛрдВ рдХреЛ рд╣рдЯрд╛ рджрд┐рдпрд╛ рдЧрдпрд╛)ред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class TransformerLayer(nn.Module):
    """Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)."""

    def __init__(self, c, num_heads):
        """Initializes a self-attention mechanism using linear transformations and multi-head attention."""
        super().__init__()
        self.q = nn.Linear(c, c, bias=False)
        self.k = nn.Linear(c, c, bias=False)
        self.v = nn.Linear(c, c, bias=False)
        self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
        self.fc1 = nn.Linear(c, c, bias=False)
        self.fc2 = nn.Linear(c, c, bias=False)

    def forward(self, x):
        """Apply a transformer block to the input x and return the output."""
        x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
        return self.fc2(self.fc1(x)) + x

__init__(c, num_heads)

рд░реИрдЦрд┐рдХ рдкрд░рд┐рд╡рд░реНрддрдиреЛрдВ рдФрд░ рдмрд╣реБ-рд╕рд┐рд░ рдзреНрдпрд╛рди рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдПрдХ рдЖрддреНрдо-рдзреНрдпрд╛рди рддрдВрддреНрд░ рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, c, num_heads):
    """Initializes a self-attention mechanism using linear transformations and multi-head attention."""
    super().__init__()
    self.q = nn.Linear(c, c, bias=False)
    self.k = nn.Linear(c, c, bias=False)
    self.v = nn.Linear(c, c, bias=False)
    self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
    self.fc1 = nn.Linear(c, c, bias=False)
    self.fc2 = nn.Linear(c, c, bias=False)

forward(x)

рдЗрдирдкреБрдЯ x рдкрд░ рдПрдХ рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдмреНрд▓реЙрдХ рд▓рд╛рдЧреВ рдХрд░реЗрдВ рдФрд░ рдЖрдЙрдЯрдкреБрдЯ рд╡рд╛рдкрд╕ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x):
    """Apply a transformer block to the input x and return the output."""
    x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
    return self.fc2(self.fc1(x)) + x



ultralytics.nn.modules.transformer.TransformerBlock

рдХрд╛ рд░реВрдк: Module

рджреГрд╖реНрдЯрд┐ рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ https://arxiv.org/abs/2010.11929ред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class TransformerBlock(nn.Module):
    """Vision Transformer https://arxiv.org/abs/2010.11929."""

    def __init__(self, c1, c2, num_heads, num_layers):
        """Initialize a Transformer module with position embedding and specified number of heads and layers."""
        super().__init__()
        self.conv = None
        if c1 != c2:
            self.conv = Conv(c1, c2)
        self.linear = nn.Linear(c2, c2)  # learnable position embedding
        self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
        self.c2 = c2

    def forward(self, x):
        """Forward propagates the input through the bottleneck module."""
        if self.conv is not None:
            x = self.conv(x)
        b, _, w, h = x.shape
        p = x.flatten(2).permute(2, 0, 1)
        return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)

__init__(c1, c2, num_heads, num_layers)

рд╕реНрдерд┐рддрд┐ рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдФрд░ рд╕рд┐рд░ рдФрд░ рдкрд░рддреЛрдВ рдХреА рдирд┐рд░реНрджрд┐рд╖реНрдЯ рд╕рдВрдЦреНрдпрд╛ рдХреЗ рд╕рд╛рде рдПрдХ рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдореЙрдбреНрдпреВрд▓ рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, c1, c2, num_heads, num_layers):
    """Initialize a Transformer module with position embedding and specified number of heads and layers."""
    super().__init__()
    self.conv = None
    if c1 != c2:
        self.conv = Conv(c1, c2)
    self.linear = nn.Linear(c2, c2)  # learnable position embedding
    self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
    self.c2 = c2

forward(x)

рдлреЙрд░рд╡рд░реНрдб рдЕрдбрд╝рдЪрди рдореЙрдбреНрдпреВрд▓ рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рдЗрдирдкреБрдЯ рдХрд╛ рдкреНрд░рдЪрд╛рд░ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x):
    """Forward propagates the input through the bottleneck module."""
    if self.conv is not None:
        x = self.conv(x)
    b, _, w, h = x.shape
    p = x.flatten(2).permute(2, 0, 1)
    return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)



ultralytics.nn.modules.transformer.MLPBlock

рдХрд╛ рд░реВрдк: Module

рдПрдХ рдмрд╣реБ-рдкрд░рдд рдкрд░рд╕реЗрдкреНрдЯреНрд░реЙрди рдХреЗ рдПрдХрд▓ рдмреНрд▓реЙрдХ рдХреЛ рд▓рд╛рдЧреВ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class MLPBlock(nn.Module):
    """Implements a single block of a multi-layer perceptron."""

    def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
        """Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function."""
        super().__init__()
        self.lin1 = nn.Linear(embedding_dim, mlp_dim)
        self.lin2 = nn.Linear(mlp_dim, embedding_dim)
        self.act = act()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Forward pass for the MLPBlock."""
        return self.lin2(self.act(self.lin1(x)))

__init__(embedding_dim, mlp_dim, act=nn.GELU)

рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдЖрдпрд╛рдо, MLP рдЖрдпрд╛рдо рдФрд░ рд╕рдХреНрд░рд┐рдпрдг рдлрд╝рдВрдХреНрд╢рди рдХреЗ рд╕рд╛рде MLPBlock рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
    """Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function."""
    super().__init__()
    self.lin1 = nn.Linear(embedding_dim, mlp_dim)
    self.lin2 = nn.Linear(mlp_dim, embedding_dim)
    self.act = act()

forward(x)

MLPBlock рдХреЗ рд▓рд┐рдП рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """Forward pass for the MLPBlock."""
    return self.lin2(self.act(self.lin1(x)))



ultralytics.nn.modules.transformer.MLP

рдХрд╛ рд░реВрдк: Module

рдПрдХ рд╕рд╛рдзрд╛рд░рдг рдмрд╣реБ-рдкрд░рдд рдкрд░рд╕реЗрдкреНрдЯреНрд░реЙрди (рдЬрд┐рд╕реЗ рдПрдлрдПрдлрдПрди рднреА рдХрд╣рд╛ рдЬрд╛рддрд╛ рд╣реИ) рдХреЛ рд▓рд╛рдЧреВ рдХрд░рддрд╛ рд╣реИред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class MLP(nn.Module):
    """Implements a simple multi-layer perceptron (also called FFN)."""

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        """Initialize the MLP with specified input, hidden, output dimensions and number of layers."""
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

    def forward(self, x):
        """Forward pass for the entire MLP."""
        for i, layer in enumerate(self.layers):
            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x

__init__(input_dim, hidden_dim, output_dim, num_layers)

рдирд┐рд░реНрджрд┐рд╖реНрдЯ рдЗрдирдкреБрдЯ, рдЫрд┐рдкреЗ рд╣реБрдП, рдЖрдЙрдЯрдкреБрдЯ рдЖрдпрд╛рдореЛрдВ рдФрд░ рдкрд░рддреЛрдВ рдХреА рд╕рдВрдЦреНрдпрд╛ рдХреЗ рд╕рд╛рде рдПрдордПрд▓рдкреА рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
    """Initialize the MLP with specified input, hidden, output dimensions and number of layers."""
    super().__init__()
    self.num_layers = num_layers
    h = [hidden_dim] * (num_layers - 1)
    self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

forward(x)

рдкреВрд░реЗ рдПрдордПрд▓рдкреА рдХреЗ рд▓рд┐рдП рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x):
    """Forward pass for the entire MLP."""
    for i, layer in enumerate(self.layers):
        x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
    return x



ultralytics.nn.modules.transformer.LayerNorm2d

рдХрд╛ рд░реВрдк: Module

Detectron2 рдФрд░ ConvNeXt рдХрд╛рд░реНрдпрд╛рдиреНрд╡рдпрди рд╕реЗ рдкреНрд░реЗрд░рд┐рдд 2D рд▓реЗрдпрд░ рдиреЙрд░реНрдорд▓рд╛рдЗрдЬрд╝реЗрд╢рди рдореЙрдбреНрдпреВрд▓ред

рдореЗрдВ рдореВрд▓ рдХрд╛рд░реНрдпрд╛рдиреНрд╡рдпрди https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py рдФрд░ https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.pyред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class LayerNorm2d(nn.Module):
    """
    2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.

    Original implementations in
    https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py
    and
    https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py.
    """

    def __init__(self, num_channels, eps=1e-6):
        """Initialize LayerNorm2d with the given parameters."""
        super().__init__()
        self.weight = nn.Parameter(torch.ones(num_channels))
        self.bias = nn.Parameter(torch.zeros(num_channels))
        self.eps = eps

    def forward(self, x):
        """Perform forward pass for 2D layer normalization."""
        u = x.mean(1, keepdim=True)
        s = (x - u).pow(2).mean(1, keepdim=True)
        x = (x - u) / torch.sqrt(s + self.eps)
        return self.weight[:, None, None] * x + self.bias[:, None, None]

__init__(num_channels, eps=1e-06)

рджрд┐рдП рдЧрдП рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде LayerNorm2d рдХреЛ рдЗрдирд░рд┐рдЬрд┐рдирд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, num_channels, eps=1e-6):
    """Initialize LayerNorm2d with the given parameters."""
    super().__init__()
    self.weight = nn.Parameter(torch.ones(num_channels))
    self.bias = nn.Parameter(torch.zeros(num_channels))
    self.eps = eps

forward(x)

2D рдкрд░рдд рд╕рд╛рдорд╛рдиреНрдпреАрдХрд░рдг рдХреЗ рд▓рд┐рдП рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, x):
    """Perform forward pass for 2D layer normalization."""
    u = x.mean(1, keepdim=True)
    s = (x - u).pow(2).mean(1, keepdim=True)
    x = (x - u) / torch.sqrt(s + self.eps)
    return self.weight[:, None, None] * x + self.bias[:, None, None]



ultralytics.nn.modules.transformer.MSDeformAttn

рдХрд╛ рд░реВрдк: Module

рдорд▓реНрдЯреАрд╕реНрдХреЗрд▓ рд╡рд┐рдХреГрдд рдзреНрдпрд╛рди рдореЙрдбреНрдпреВрд▓ рд╡рд┐рд░реВрдкрдиреАрдп-рдбреАрдИрдЯреАрдЖрд░ рдФрд░ рдкреИрдбрд▓рдбрд┐рдЯреЗрдХреНрд╢рди рдХрд╛рд░реНрдпрд╛рдиреНрд╡рдпрди рдкрд░ рдЖрдзрд╛рд░рд┐рдд рд╣реИред

https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class MSDeformAttn(nn.Module):
    """
    Multiscale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.

    https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
    """

    def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
        """Initialize MSDeformAttn with the given parameters."""
        super().__init__()
        if d_model % n_heads != 0:
            raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
        _d_per_head = d_model // n_heads
        # Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation
        assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`"

        self.im2col_step = 64

        self.d_model = d_model
        self.n_levels = n_levels
        self.n_heads = n_heads
        self.n_points = n_points

        self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
        self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
        self.value_proj = nn.Linear(d_model, d_model)
        self.output_proj = nn.Linear(d_model, d_model)

        self._reset_parameters()

    def _reset_parameters(self):
        """Reset module parameters."""
        constant_(self.sampling_offsets.weight.data, 0.0)
        thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
        grid_init = (
            (grid_init / grid_init.abs().max(-1, keepdim=True)[0])
            .view(self.n_heads, 1, 1, 2)
            .repeat(1, self.n_levels, self.n_points, 1)
        )
        for i in range(self.n_points):
            grid_init[:, :, i, :] *= i + 1
        with torch.no_grad():
            self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
        constant_(self.attention_weights.weight.data, 0.0)
        constant_(self.attention_weights.bias.data, 0.0)
        xavier_uniform_(self.value_proj.weight.data)
        constant_(self.value_proj.bias.data, 0.0)
        xavier_uniform_(self.output_proj.weight.data)
        constant_(self.output_proj.bias.data, 0.0)

    def forward(self, query, refer_bbox, value, value_shapes, value_mask=None):
        """
        Perform forward pass for multiscale deformable attention.

        https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py

        Args:
            query (torch.Tensor): [bs, query_length, C]
            refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
                bottom-right (1, 1), including padding area
            value (torch.Tensor): [bs, value_length, C]
            value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
            value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements

        Returns:
            output (Tensor): [bs, Length_{query}, C]
        """
        bs, len_q = query.shape[:2]
        len_v = value.shape[1]
        assert sum(s[0] * s[1] for s in value_shapes) == len_v

        value = self.value_proj(value)
        if value_mask is not None:
            value = value.masked_fill(value_mask[..., None], float(0))
        value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
        sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
        attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
        attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
        # N, Len_q, n_heads, n_levels, n_points, 2
        num_points = refer_bbox.shape[-1]
        if num_points == 2:
            offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1)
            add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
            sampling_locations = refer_bbox[:, :, None, :, None, :] + add
        elif num_points == 4:
            add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5
            sampling_locations = refer_bbox[:, :, None, :, None, :2] + add
        else:
            raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.")
        output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights)
        return self.output_proj(output)

__init__(d_model=256, n_levels=4, n_heads=8, n_points=4)

рджрд┐рдП рдЧрдП рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде MSDeformAttn рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
    """Initialize MSDeformAttn with the given parameters."""
    super().__init__()
    if d_model % n_heads != 0:
        raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
    _d_per_head = d_model // n_heads
    # Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation
    assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`"

    self.im2col_step = 64

    self.d_model = d_model
    self.n_levels = n_levels
    self.n_heads = n_heads
    self.n_points = n_points

    self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
    self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
    self.value_proj = nn.Linear(d_model, d_model)
    self.output_proj = nn.Linear(d_model, d_model)

    self._reset_parameters()

forward(query, refer_bbox, value, value_shapes, value_mask=None)

рдорд▓реНрдЯреАрд╕реНрдХреЗрд▓ рд╡рд┐рдХреГрдд рдзреНрдпрд╛рди рдХреЗ рд▓рд┐рдП рдлреЙрд░рд╡рд░реНрдб рдкрд╛рд╕ рдХрд░реЗрдВред

https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py

рдкреИрд░рд╛рдореАрдЯрд░:

рдирд╛рдо рдкреНрд░рдХрд╛рд░ рдпрд╛ рдХрд╝рд┐рд╕реНтАНрдо рдЪреВрдХ
query Tensor

[рдмреАрдПрд╕, query_length, рд╕реА]

рдЖрд╡рд╢реНрдпрдХ
refer_bbox Tensor

[рдмреАрдПрд╕, query_length, n_levels, 2], [0, 1] рдореЗрдВ рд╕реАрдорд╛, рдКрдкрд░реА-рдмрд╛рдПрдБ (0,0), рдиреАрдЪреЗ-рджрд╛рдПрдВ (1, 1), рдкреИрдбрд┐рдВрдЧ рдХреНрд╖реЗрддреНрд░ рд╕рд╣рд┐рдд

рдЖрд╡рд╢реНрдпрдХ
value Tensor

[рдмреАрдПрд╕, value_length, рд╕реА]

рдЖрд╡рд╢реНрдпрдХ
value_shapes List

[n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]

рдЖрд╡рд╢реНрдпрдХ
value_mask Tensor

[bs, value_length], рдЧреИрд░-рдкреИрдбрд┐рдВрдЧ рддрддреНрд╡реЛрдВ рдХреЗ рд▓рд┐рдП рд╕рддреНрдп, рдкреИрдбрд┐рдВрдЧ рддрддреНрд╡реЛрдВ рдХреЗ рд▓рд┐рдП рдЧрд▓рдд

None

рджреЗрддрд╛:

рдирд╛рдо рдкреНрд░рдХрд╛рд░ рдпрд╛ рдХрд╝рд┐рд╕реНтАНрдо
output Tensor

[рдмреАрдПрд╕, Length_ {рдХреНрд╡реЗрд░реА}, рд╕реА]

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, query, refer_bbox, value, value_shapes, value_mask=None):
    """
    Perform forward pass for multiscale deformable attention.

    https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py

    Args:
        query (torch.Tensor): [bs, query_length, C]
        refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
            bottom-right (1, 1), including padding area
        value (torch.Tensor): [bs, value_length, C]
        value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
        value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements

    Returns:
        output (Tensor): [bs, Length_{query}, C]
    """
    bs, len_q = query.shape[:2]
    len_v = value.shape[1]
    assert sum(s[0] * s[1] for s in value_shapes) == len_v

    value = self.value_proj(value)
    if value_mask is not None:
        value = value.masked_fill(value_mask[..., None], float(0))
    value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
    sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
    attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
    attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
    # N, Len_q, n_heads, n_levels, n_points, 2
    num_points = refer_bbox.shape[-1]
    if num_points == 2:
        offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1)
        add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
        sampling_locations = refer_bbox[:, :, None, :, None, :] + add
    elif num_points == 4:
        add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5
        sampling_locations = refer_bbox[:, :, None, :, None, :2] + add
    else:
        raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.")
    output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights)
    return self.output_proj(output)



ultralytics.nn.modules.transformer.DeformableTransformerDecoderLayer

рдХрд╛ рд░реВрдк: Module

рд╡рд┐рдХреГрдд рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдбрд┐рдХреЛрдбрд░ рдкрд░рдд рдкреИрдбрд▓рдбрд┐рдЯреЗрдХреНрд╢рди рдФрд░ рд╡рд┐рд░реВрдкрдХ-рдбреАрдИрдЯреАрдЖрд░ рдХрд╛рд░реНрдпрд╛рдиреНрд╡рдпрди рд╕реЗ рдкреНрд░реЗрд░рд┐рдд рд╣реИред

https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class DeformableTransformerDecoderLayer(nn.Module):
    """
    Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations.

    https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
    https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
    """

    def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4):
        """Initialize the DeformableTransformerDecoderLayer with the given parameters."""
        super().__init__()

        # Self attention
        self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
        self.dropout1 = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(d_model)

        # Cross attention
        self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
        self.dropout2 = nn.Dropout(dropout)
        self.norm2 = nn.LayerNorm(d_model)

        # FFN
        self.linear1 = nn.Linear(d_model, d_ffn)
        self.act = act
        self.dropout3 = nn.Dropout(dropout)
        self.linear2 = nn.Linear(d_ffn, d_model)
        self.dropout4 = nn.Dropout(dropout)
        self.norm3 = nn.LayerNorm(d_model)

    @staticmethod
    def with_pos_embed(tensor, pos):
        """Add positional embeddings to the input tensor, if provided."""
        return tensor if pos is None else tensor + pos

    def forward_ffn(self, tgt):
        """Perform forward pass through the Feed-Forward Network part of the layer."""
        tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
        tgt = tgt + self.dropout4(tgt2)
        return self.norm3(tgt)

    def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):
        """Perform the forward pass through the entire decoder layer."""

        # Self attention
        q = k = self.with_pos_embed(embed, query_pos)
        tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
            0
        ].transpose(0, 1)
        embed = embed + self.dropout1(tgt)
        embed = self.norm1(embed)

        # Cross attention
        tgt = self.cross_attn(
            self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask
        )
        embed = embed + self.dropout2(tgt)
        embed = self.norm2(embed)

        # FFN
        return self.forward_ffn(embed)

__init__(d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4)

рджрд┐рдП рдЧрдП рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде DeformableTransformerDecoderLayer рдХреЛ рдЗрдирд┐рд╢рд┐рдпрд▓рд╛рдЗрдЬрд╝ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4):
    """Initialize the DeformableTransformerDecoderLayer with the given parameters."""
    super().__init__()

    # Self attention
    self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
    self.dropout1 = nn.Dropout(dropout)
    self.norm1 = nn.LayerNorm(d_model)

    # Cross attention
    self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
    self.dropout2 = nn.Dropout(dropout)
    self.norm2 = nn.LayerNorm(d_model)

    # FFN
    self.linear1 = nn.Linear(d_model, d_ffn)
    self.act = act
    self.dropout3 = nn.Dropout(dropout)
    self.linear2 = nn.Linear(d_ffn, d_model)
    self.dropout4 = nn.Dropout(dropout)
    self.norm3 = nn.LayerNorm(d_model)

forward(embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None)

рдкреВрд░реЗ рдбрд┐рдХреЛрдбрд░ рдкрд░рдд рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рдЖрдЧреЗ рдкрд╛рд╕ рдкреНрд░рджрд░реНрд╢рди рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):
    """Perform the forward pass through the entire decoder layer."""

    # Self attention
    q = k = self.with_pos_embed(embed, query_pos)
    tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
        0
    ].transpose(0, 1)
    embed = embed + self.dropout1(tgt)
    embed = self.norm1(embed)

    # Cross attention
    tgt = self.cross_attn(
        self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask
    )
    embed = embed + self.dropout2(tgt)
    embed = self.norm2(embed)

    # FFN
    return self.forward_ffn(embed)

forward_ffn(tgt)

рдкрд░рдд рдХреЗ рдлреАрдб-рдлреЙрд░рд╡рд░реНрдб рдиреЗрдЯрд╡рд░реНрдХ рднрд╛рдЧ рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рдЖрдЧреЗ рдкрд╛рд╕ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward_ffn(self, tgt):
    """Perform forward pass through the Feed-Forward Network part of the layer."""
    tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
    tgt = tgt + self.dropout4(tgt2)
    return self.norm3(tgt)

with_pos_embed(tensor, pos) staticmethod

рдЗрдирдкреБрдЯ рдореЗрдВ рд╕реНрдерд┐рддреАрдп рдПрдореНрдмреЗрдбрд┐рдВрдЧ рдЬреЛрдбрд╝реЗрдВ tensor, рдпрджрд┐ рдкреНрд░рджрд╛рди рдХрд┐рдпрд╛ рдЧрдпрд╛ рд╣реЛред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
@staticmethod
def with_pos_embed(tensor, pos):
    """Add positional embeddings to the input tensor, if provided."""
    return tensor if pos is None else tensor + pos



ultralytics.nn.modules.transformer.DeformableTransformerDecoder

рдХрд╛ рд░реВрдк: Module

рдкреИрдбрд▓ рдбрд┐рдЯреЗрдХреНрд╢рди рдХреЗ рдЖрдзрд╛рд░ рдкрд░ рд╡рд┐рдХреГрдд рдЯреНрд░рд╛рдВрд╕рдлрд╛рд░реНрдорд░ рдбрд┐рдХреЛрдбрд░ рдХрд╛ рдХрд╛рд░реНрдпрд╛рдиреНрд╡рдпрдиред

https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
class DeformableTransformerDecoder(nn.Module):
    """
    Implementation of Deformable Transformer Decoder based on PaddleDetection.

    https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
    """

    def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
        """Initialize the DeformableTransformerDecoder with the given parameters."""
        super().__init__()
        self.layers = _get_clones(decoder_layer, num_layers)
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx

    def forward(
        self,
        embed,  # decoder embeddings
        refer_bbox,  # anchor
        feats,  # image features
        shapes,  # feature shapes
        bbox_head,
        score_head,
        pos_mlp,
        attn_mask=None,
        padding_mask=None,
    ):
        """Perform the forward pass through the entire decoder."""
        output = embed
        dec_bboxes = []
        dec_cls = []
        last_refined_bbox = None
        refer_bbox = refer_bbox.sigmoid()
        for i, layer in enumerate(self.layers):
            output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))

            bbox = bbox_head[i](output)
            refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))

            if self.training:
                dec_cls.append(score_head[i](output))
                if i == 0:
                    dec_bboxes.append(refined_bbox)
                else:
                    dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))
            elif i == self.eval_idx:
                dec_cls.append(score_head[i](output))
                dec_bboxes.append(refined_bbox)
                break

            last_refined_bbox = refined_bbox
            refer_bbox = refined_bbox.detach() if self.training else refined_bbox

        return torch.stack(dec_bboxes), torch.stack(dec_cls)

__init__(hidden_dim, decoder_layer, num_layers, eval_idx=-1)

рджрд┐рдП рдЧрдП рдорд╛рдкрджрдВрдбреЛрдВ рдХреЗ рд╕рд╛рде DeformableTransformerDecoder рдХреЛ рдкреНрд░рд╛рд░рдВрдн рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
    """Initialize the DeformableTransformerDecoder with the given parameters."""
    super().__init__()
    self.layers = _get_clones(decoder_layer, num_layers)
    self.num_layers = num_layers
    self.hidden_dim = hidden_dim
    self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx

forward(embed, refer_bbox, feats, shapes, bbox_head, score_head, pos_mlp, attn_mask=None, padding_mask=None)

рдкреВрд░реЗ рдбрд┐рдХреЛрдбрд░ рдХреЗ рдорд╛рдзреНрдпрдо рд╕реЗ рдЖрдЧреЗ рдкрд╛рд╕ рдХрд░реЗрдВред

рдореЗрдВ рд╕реНрд░реЛрдд рдХреЛрдб ultralytics/nn/modules/transformer.py
def forward(
    self,
    embed,  # decoder embeddings
    refer_bbox,  # anchor
    feats,  # image features
    shapes,  # feature shapes
    bbox_head,
    score_head,
    pos_mlp,
    attn_mask=None,
    padding_mask=None,
):
    """Perform the forward pass through the entire decoder."""
    output = embed
    dec_bboxes = []
    dec_cls = []
    last_refined_bbox = None
    refer_bbox = refer_bbox.sigmoid()
    for i, layer in enumerate(self.layers):
        output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))

        bbox = bbox_head[i](output)
        refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))

        if self.training:
            dec_cls.append(score_head[i](output))
            if i == 0:
                dec_bboxes.append(refined_bbox)
            else:
                dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))
        elif i == self.eval_idx:
            dec_cls.append(score_head[i](output))
            dec_bboxes.append(refined_bbox)
            break

        last_refined_bbox = refined_bbox
        refer_bbox = refined_bbox.detach() if self.training else refined_bbox

    return torch.stack(dec_bboxes), torch.stack(dec_cls)





2023-11-12 рдмрдирд╛рдпрд╛ рдЧрдпрд╛, рдЕрдкрдбреЗрдЯ рдХрд┐рдпрд╛ рдЧрдпрд╛ 2023-11-25
рд▓реЗрдЦрдХ: рдЧреНрд▓реЗрди-рдЬреЛрдЪрд░ (3)