Add Conv2() module (#2820)
This commit is contained in:
@ -1,15 +1,28 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
"""
|
||||
Ultralytics modules. Visualize with:
|
||||
|
||||
from ultralytics.nn.modules import *
|
||||
import torch
|
||||
import os
|
||||
|
||||
x = torch.ones(1, 128, 40, 40)
|
||||
m = Conv(128, 128)
|
||||
f = f'{m._get_name()}.onnx'
|
||||
torch.onnx.export(m, x, f)
|
||||
os.system(f'onnxsim {f} {f} && open {f}')
|
||||
"""
|
||||
|
||||
from .block import (C1, C2, C3, C3TR, DFL, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, GhostBottleneck,
|
||||
HGBlock, HGStem, Proto, RepC3)
|
||||
from .conv import (CBAM, ChannelAttention, Concat, Conv, ConvTranspose, DWConv, DWConvTranspose2d, Focus, GhostConv,
|
||||
LightConv, RepConv, SpatialAttention)
|
||||
from .conv import (CBAM, ChannelAttention, Concat, Conv, Conv2, ConvTranspose, DWConv, DWConvTranspose2d, Focus,
|
||||
GhostConv, LightConv, RepConv, SpatialAttention)
|
||||
from .head import Classify, Detect, Pose, RTDETRDecoder, Segment
|
||||
from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d,
|
||||
MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer)
|
||||
|
||||
__all__ = [
|
||||
'Conv', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
|
||||
'Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
|
||||
'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer', 'TransformerBlock', 'MLPBlock',
|
||||
'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost',
|
||||
'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect', 'Segment', 'Pose', 'Classify',
|
||||
|
@ -43,6 +43,27 @@ class Conv(nn.Module):
|
||||
return self.act(self.conv(x))
|
||||
|
||||
|
||||
class Conv2(Conv):
|
||||
"""Simplified RepConv module with Conv fusing."""
|
||||
|
||||
def __init__(self, c1, c2, k=3, s=1, p=None, g=1, d=1, act=True):
|
||||
"""Initialize Conv layer with given arguments including activation."""
|
||||
super().__init__(c1, c2, k, s, p, g=g, d=d, act=act)
|
||||
self.cv2 = nn.Conv2d(c1, c2, 1, s, autopad(1, p, d), groups=g, dilation=d, bias=False) # add 1x1 conv
|
||||
|
||||
def forward(self, x):
|
||||
"""Apply convolution, batch normalization and activation to input tensor."""
|
||||
return self.act(self.bn(self.conv(x) + self.cv2(x)))
|
||||
|
||||
def fuse_convs(self):
|
||||
"""Fuse parallel convolutions."""
|
||||
w = torch.zeros_like(self.conv.weight.data)
|
||||
i = [x // 2 for x in w.shape[2:]]
|
||||
w[:, :, i[0] - 1:i[0], i[1] - 1:i[1]] = self.cv2.weight.data.clone()
|
||||
self.conv.weight.data += w
|
||||
self.__delattr__('cv2')
|
||||
|
||||
|
||||
class LightConv(nn.Module):
|
||||
"""Light convolution with args(ch_in, ch_out, kernel).
|
||||
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py
|
||||
|
Reference in New Issue
Block a user