ultralytics 8.0.111
refactored model.loss()
method (#2911)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Snyk bot <snyk-bot@snyk.io>
This commit is contained in:
@ -21,10 +21,9 @@ from .head import Classify, Detect, Pose, RTDETRDecoder, Segment
|
||||
from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d,
|
||||
MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer)
|
||||
|
||||
__all__ = [
|
||||
'Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
|
||||
'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer', 'TransformerBlock', 'MLPBlock',
|
||||
'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost',
|
||||
'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect', 'Segment', 'Pose', 'Classify',
|
||||
'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI', 'DeformableTransformerDecoder',
|
||||
'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP']
|
||||
__all__ = ('Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus',
|
||||
'GhostConv', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer',
|
||||
'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3',
|
||||
'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect',
|
||||
'Segment', 'Pose', 'Classify', 'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI',
|
||||
'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP')
|
||||
|
@ -10,9 +10,8 @@ import torch.nn.functional as F
|
||||
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv
|
||||
from .transformer import TransformerBlock
|
||||
|
||||
__all__ = [
|
||||
'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck',
|
||||
'Bottleneck', 'BottleneckCSP', 'Proto', 'RepC3']
|
||||
__all__ = ('DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost',
|
||||
'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'RepC3')
|
||||
|
||||
|
||||
class DFL(nn.Module):
|
||||
|
@ -9,9 +9,8 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
__all__ = [
|
||||
'Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv', 'ChannelAttention',
|
||||
'SpatialAttention', 'CBAM', 'Concat', 'RepConv']
|
||||
__all__ = ('Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
|
||||
'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'RepConv')
|
||||
|
||||
|
||||
def autopad(k, p=None, d=1): # kernel, padding, dilation
|
||||
|
@ -16,7 +16,7 @@ from .conv import Conv
|
||||
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
|
||||
from .utils import bias_init_with_prob, linear_init_
|
||||
|
||||
__all__ = ['Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder']
|
||||
__all__ = 'Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder'
|
||||
|
||||
|
||||
class Detect(nn.Module):
|
||||
|
@ -13,9 +13,8 @@ from torch.nn.init import constant_, xavier_uniform_
|
||||
from .conv import Conv
|
||||
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
|
||||
|
||||
__all__ = [
|
||||
'TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI',
|
||||
'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP']
|
||||
__all__ = ('TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI',
|
||||
'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP')
|
||||
|
||||
|
||||
class TransformerEncoderLayer(nn.Module):
|
||||
|
@ -12,7 +12,7 @@ import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.nn.init import uniform_
|
||||
|
||||
__all__ = ['multi_scale_deformable_attn_pytorch', 'inverse_sigmoid']
|
||||
__all__ = 'multi_scale_deformable_attn_pytorch', 'inverse_sigmoid'
|
||||
|
||||
|
||||
def _get_clones(module, n):
|
||||
|
Reference in New Issue
Block a user