Shorten module paths with new 'nn' dir (#96)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
single_channel
Glenn Jocher 2 years ago committed by GitHub
parent 4fb04be20b
commit 48cffa176e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1 +1,5 @@
__version__ = "8.0.0.dev0" __version__ = "8.0.0.dev0"
from ultralytics.yolo.engine.model import YOLO
__all__ = ["__version__", "YOLO"] # allow simpler import

@ -13,6 +13,7 @@ from PIL import Image
from ultralytics.yolo.utils import LOGGER, ROOT from ultralytics.yolo.utils import LOGGER, ROOT
from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version
from ultralytics.yolo.utils.downloads import attempt_download, is_url from ultralytics.yolo.utils.downloads import attempt_download, is_url
from ultralytics.yolo.utils.files import yaml_load
from ultralytics.yolo.utils.ops import xywh2xyxy from ultralytics.yolo.utils.ops import xywh2xyxy
@ -32,8 +33,6 @@ class AutoBackend(nn.Module):
# TensorFlow Lite: *.tflite # TensorFlow Lite: *.tflite
# TensorFlow Edge TPU: *_edgetpu.tflite # TensorFlow Edge TPU: *_edgetpu.tflite
# PaddlePaddle: *_paddle_model # PaddlePaddle: *_paddle_model
from ultralytics.yolo.utils.files import yaml_load
from ultralytics.yolo.utils.modeling import attempt_load_weights
super().__init__() super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights) w = str(weights[0] if isinstance(weights, list) else weights)
@ -54,6 +53,7 @@ class AutoBackend(nn.Module):
model.half() if fp16 else model.float() model.half() if fp16 else model.float()
self.model = model # explicitly assign for to(), cpu(), cuda(), half() self.model = model # explicitly assign for to(), cpu(), cuda(), half()
elif pt: # PyTorch elif pt: # PyTorch
from ultralytics.nn.tasks import attempt_load_weights
model = attempt_load_weights(weights if isinstance(weights, list) else w, model = attempt_load_weights(weights if isinstance(weights, list) else w,
device=device, device=device,
inplace=True, inplace=True,
@ -89,7 +89,7 @@ class AutoBackend(nn.Module):
elif xml: # OpenVINO elif xml: # OpenVINO
LOGGER.info(f'Loading {w} for OpenVINO inference...') LOGGER.info(f'Loading {w} for OpenVINO inference...')
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
from openvino.runtime import Core, Layout, get_batch from openvino.runtime import Core, Layout, get_batch # noqa
ie = Core() ie = Core()
if not Path(w).is_file(): # if not *.xml if not Path(w).is_file(): # if not *.xml
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir

@ -17,6 +17,7 @@ import torch.nn as nn
from PIL import Image, ImageOps from PIL import Image, ImageOps
from torch.cuda import amp from torch.cuda import amp
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.yolo.data.augment import LetterBox from ultralytics.yolo.data.augment import LetterBox
from ultralytics.yolo.utils import LOGGER, colorstr from ultralytics.yolo.utils import LOGGER, colorstr
from ultralytics.yolo.utils.files import increment_path from ultralytics.yolo.utils.files import increment_path
@ -25,8 +26,6 @@ from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
from ultralytics.yolo.utils.tal import dist2bbox, make_anchors from ultralytics.yolo.utils.tal import dist2bbox, make_anchors
from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode
from .autobackend import AutoBackend
# from utils.plots import feature_visualization TODO # from utils.plots import feature_visualization TODO

@ -1,11 +1,19 @@
import contextlib
from copy import deepcopy from copy import deepcopy
from pathlib import Path
import thop import thop
import torch
import torch.nn as nn
import torchvision
import yaml
from ultralytics.yolo.utils.modeling import parse_model from ultralytics.nn.modules import (C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify,
from ultralytics.yolo.utils.modeling.modules import * Concat, Conv, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Ensemble, Focus,
from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, intersect_state_dicts, model_info, GhostBottleneck, GhostConv, Segment)
scale_img, time_sync) from ultralytics.yolo.utils import LOGGER, colorstr
from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, intersect_state_dicts,
make_divisible, model_info, scale_img, time_sync)
class BaseModel(nn.Module): class BaseModel(nn.Module):
@ -75,7 +83,6 @@ class DetectionModel(BaseModel):
if isinstance(cfg, dict): if isinstance(cfg, dict):
self.yaml = cfg # model dict self.yaml = cfg # model dict
else: # is *.yaml else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name self.yaml_file = Path(cfg).name
with open(cfg, encoding='ascii', errors='ignore') as f: with open(cfg, encoding='ascii', errors='ignore') as f:
self.yaml = yaml.safe_load(f) # model dict self.yaml = yaml.safe_load(f) # model dict
@ -166,6 +173,7 @@ class ClassificationModel(BaseModel):
def _from_detection_model(self, model, nc=1000, cutoff=10): def _from_detection_model(self, model, nc=1000, cutoff=10):
# Create a YOLOv5 classification model from a YOLOv5 detection model # Create a YOLOv5 classification model from a YOLOv5 detection model
from ultralytics.nn.autobackend import AutoBackend
if isinstance(model, AutoBackend): if isinstance(model, AutoBackend):
model = model.model # unwrap DetectMultiBackend model = model.model # unwrap DetectMultiBackend
model.model = model.model[:cutoff] # backbone model.model = model.model[:cutoff] # backbone
@ -192,7 +200,6 @@ class ClassificationModel(BaseModel):
@staticmethod @staticmethod
def reshape_outputs(model, nc): def reshape_outputs(model, nc):
# Update a TorchVision classification model to class count 'n' if required # Update a TorchVision classification model to class count 'n' if required
from ultralytics.yolo.utils.modeling.modules import Classify
name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module
if isinstance(m, Classify): # YOLO Classify() head if isinstance(m, Classify): # YOLO Classify() head
if m.linear.out_features != nc: if m.linear.out_features != nc:
@ -210,3 +217,110 @@ class ClassificationModel(BaseModel):
i = types.index(nn.Conv2d) # nn.Conv2d index i = types.index(nn.Conv2d) # nn.Conv2d index
if m[i].out_channels != nc: if m[i].out_channels != nc:
m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None)
# Functions ------------------------------------------------------------------------------------------------------------
def attempt_load_weights(weights, device=None, inplace=True, fuse=True):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
from ultralytics.yolo.utils.downloads import attempt_download
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
# Model compatibility updates
if not hasattr(ckpt, 'stride'):
ckpt.stride = torch.tensor([32.])
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
# Module compatibility updates
for m in model.modules():
t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment):
m.inplace = inplace # torch 1.7.0 compatibility
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
m.recompute_scale_factor = None # torch 1.11.0 compatibility
# Return model
if len(model) == 1:
return model[-1]
# Return detection ensemble
print(f'Ensemble created with {weights}\n')
for k in 'names', 'nc', 'yaml':
setattr(model, k, getattr(model[0], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
return model
def parse_model(d, ch): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}")
nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print
no = nc + 4 # number of outputs = classes + box
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
with contextlib.suppress(NameError):
args[j] = eval(a) if isinstance(a, str) else a # eval strings
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in {
Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP,
C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
# TODO: channel, gw, gd
elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if m is Segment:
args[3] = make_divisible(args[3] * gw, 8)
else:
c2 = ch[f]
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
m.np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type
LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
def get_model(model='s.pt', pretrained=True):
# Load a YOLO model locally, from torchvision, or from Ultralytics assets
if model.endswith(".pt"):
model = model.split(".")[0]
if Path(f"{model}.pt").is_file(): # local file
return attempt_load_weights(f"{model}.pt", device='cpu')
elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
else: # Ultralytics assets
return attempt_load_weights(f"{model}.pt", device='cpu')

@ -1,6 +1,5 @@
import cv2 import cv2
import hydra import hydra
import numpy as np
from ultralytics.yolo.data import build_dataloader from ultralytics.yolo.data import build_dataloader
from ultralytics.yolo.utils import ROOT from ultralytics.yolo.utils import ROOT

@ -1,5 +1,5 @@
from ultralytics.nn.tasks import DetectionModel
from ultralytics.yolo.utils.checks import check_yaml from ultralytics.yolo.utils.checks import check_yaml
from ultralytics.yolo.utils.modeling.tasks import DetectionModel
def test_model_parser(): def test_model_parser():

@ -1,7 +0,0 @@
from ultralytics.yolo import v8
from .engine.model import YOLO
from .engine.trainer import BaseTrainer
from .engine.validator import BaseValidator
__all__ = ["BaseTrainer", "BaseValidator", "YOLO"] # allow simpler import

@ -2,14 +2,13 @@ import torch
import yaml import yaml
from ultralytics import yolo # noqa required for python usage from ultralytics import yolo # noqa required for python usage
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, attempt_load_weights
# from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml # from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils import LOGGER from ultralytics.yolo.utils import LOGGER
from ultralytics.yolo.utils.checks import check_yaml from ultralytics.yolo.utils.checks import check_yaml
from ultralytics.yolo.utils.configs import get_config from ultralytics.yolo.utils.configs import get_config
from ultralytics.yolo.utils.files import yaml_load from ultralytics.yolo.utils.files import yaml_load
from ultralytics.yolo.utils.modeling import attempt_load_weights
from ultralytics.yolo.utils.modeling.tasks import ClassificationModel, DetectionModel, SegmentationModel
from ultralytics.yolo.utils.torch_utils import smart_inference_mode from ultralytics.yolo.utils.torch_utils import smart_inference_mode
# map head: [model, trainer, validator, predictor] # map head: [model, trainer, validator, predictor]

@ -30,13 +30,13 @@ from pathlib import Path
import cv2 import cv2
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadScreenshots, LoadStreams from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadScreenshots, LoadStreams
from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS, check_dataset, check_dataset_yaml from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS, check_dataset, check_dataset_yaml
from ultralytics.yolo.utils import LOGGER, ROOT, colorstr, ops from ultralytics.yolo.utils import LOGGER, ROOT, colorstr, ops
from ultralytics.yolo.utils.checks import check_file, check_imshow from ultralytics.yolo.utils.checks import check_file, check_imshow
from ultralytics.yolo.utils.configs import get_config from ultralytics.yolo.utils.configs import get_config
from ultralytics.yolo.utils.files import increment_path from ultralytics.yolo.utils.files import increment_path
from ultralytics.yolo.utils.modeling.autobackend import AutoBackend
from ultralytics.yolo.utils.torch_utils import check_imgsz, select_device, smart_inference_mode from ultralytics.yolo.utils.torch_utils import check_imgsz, select_device, smart_inference_mode
DEFAULT_CONFIG = ROOT / "yolo/utils/configs/default.yaml" DEFAULT_CONFIG = ROOT / "yolo/utils/configs/default.yaml"
@ -95,7 +95,7 @@ class BasePredictor:
device = select_device(self.args.device) device = select_device(self.args.device)
model = model or self.args.model model = model or self.args.model
self.args.half &= device.type != 'cpu' # half precision only supported on CUDA self.args.half &= device.type != 'cpu' # half precision only supported on CUDA
model = AutoBackend(model, device=device, dnn=self.args.dnn, fp16=self.args.half) # NOTE: not passing data model = AutoBackend(model, device=device, dnn=self.args.dnn, fp16=self.args.half)
stride, pt = model.stride, model.pt stride, pt = model.stride, model.pt
imgsz = check_imgsz(self.args.imgsz, s=stride) # check image size imgsz = check_imgsz(self.args.imgsz, s=stride) # check image size

@ -4,11 +4,11 @@ import torch
from omegaconf import OmegaConf from omegaconf import OmegaConf
from tqdm import tqdm from tqdm import tqdm
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils import LOGGER, TQDM_BAR_FORMAT from ultralytics.yolo.utils import LOGGER, TQDM_BAR_FORMAT
from ultralytics.yolo.utils.files import increment_path from ultralytics.yolo.utils.files import increment_path
from ultralytics.yolo.utils.modeling.autobackend import AutoBackend
from ultralytics.yolo.utils.ops import Profile from ultralytics.yolo.utils.ops import Profile
from ultralytics.yolo.utils.torch_utils import check_imgsz, de_parallel, select_device, smart_inference_mode from ultralytics.yolo.utils.torch_utils import check_imgsz, de_parallel, select_device, smart_inference_mode

@ -1,109 +0,0 @@
import contextlib
import torchvision
from ultralytics.yolo.utils.downloads import attempt_download
from ultralytics.yolo.utils.modeling.modules import *
def attempt_load_weights(weights, device=None, inplace=True, fuse=True):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
ckpt = torch.load(attempt_download(w), map_location='cpu') # load
ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
# Model compatibility updates
if not hasattr(ckpt, 'stride'):
ckpt.stride = torch.tensor([32.])
if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
# Module compatibility updates
for m in model.modules():
t = type(m)
if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment):
m.inplace = inplace # torch 1.7.0 compatibility
elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
m.recompute_scale_factor = None # torch 1.11.0 compatibility
# Return model
if len(model) == 1:
return model[-1]
# Return detection ensemble
print(f'Ensemble created with {weights}\n')
for k in 'names', 'nc', 'yaml':
setattr(model, k, getattr(model[0], k))
model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
return model
def parse_model(d, ch): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<50}{'arguments':<30}")
nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print
no = nc + 4 # number of outputs = classes + box
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
with contextlib.suppress(NameError):
args[j] = eval(a) if isinstance(a, str) else a # eval strings
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in {
Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP,
C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
# TODO: channel, gw, gd
elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if m is Segment:
args[3] = make_divisible(args[3] * gw, 8)
else:
c2 = ch[f]
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
m.np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{m.np:10.0f} {t:<50}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
def get_model(model='s.pt', pretrained=True):
# Load a YOLO model locally, from torchvision, or from Ultralytics assets
if model.endswith(".pt"):
model = model.split(".")[0]
if Path(f"{model}.pt").is_file(): # local file
return attempt_load_weights(f"{model}.pt", device='cpu')
elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
else: # Ultralytics assets
return attempt_load_weights(f"{model}.pt", device='cpu')

@ -6,5 +6,4 @@ ROOT = Path(__file__).parents[0] # yolov8 ROOT
__all__ = ["classify", "segment", "detect"] __all__ = ["classify", "segment", "detect"]
# Patch hydra cli from ultralytics.yolo.utils.configs import hydra_patch # noqa (patch hydra cli)
from ultralytics.yolo.utils.configs import hydra_patch

@ -3,8 +3,7 @@ import torch
from ultralytics.yolo.engine.predictor import BasePredictor from ultralytics.yolo.engine.predictor import BasePredictor
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils import ops from ultralytics.yolo.utils.plotting import Annotator
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
class ClassificationPredictor(BasePredictor): class ClassificationPredictor(BasePredictor):

@ -1,11 +1,10 @@
import hydra import hydra
import torch import torch
from ultralytics.nn.tasks import ClassificationModel, get_model
from ultralytics.yolo import v8 from ultralytics.yolo import v8
from ultralytics.yolo.data import build_classification_dataloader from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer
from ultralytics.yolo.utils.modeling import get_model
from ultralytics.yolo.utils.modeling.tasks import ClassificationModel
class ClassificationTrainer(BaseTrainer): class ClassificationTrainer(BaseTrainer):

@ -2,6 +2,7 @@ import hydra
import torch import torch
import torch.nn as nn import torch.nn as nn
from ultralytics.nn.tasks import DetectionModel
from ultralytics.yolo import v8 from ultralytics.yolo import v8
from ultralytics.yolo.data import build_dataloader from ultralytics.yolo.data import build_dataloader
from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
@ -9,7 +10,6 @@ from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer
from ultralytics.yolo.utils import colorstr from ultralytics.yolo.utils import colorstr
from ultralytics.yolo.utils.loss import BboxLoss from ultralytics.yolo.utils.loss import BboxLoss
from ultralytics.yolo.utils.metrics import smooth_BCE from ultralytics.yolo.utils.metrics import smooth_BCE
from ultralytics.yolo.utils.modeling.tasks import DetectionModel
from ultralytics.yolo.utils.ops import xywh2xyxy from ultralytics.yolo.utils.ops import xywh2xyxy
from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.plotting import plot_images, plot_results
from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors

@ -1,11 +1,9 @@
from pathlib import Path
import hydra import hydra
import torch import torch
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils import ROOT, ops from ultralytics.yolo.utils import ops
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box from ultralytics.yolo.utils.plotting import colors, save_one_box
from ..detect.predict import DetectionPredictor from ..detect.predict import DetectionPredictor

@ -3,10 +3,10 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from ultralytics.nn.tasks import SegmentationModel
from ultralytics.yolo import v8 from ultralytics.yolo import v8
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils.metrics import FocalLoss, bbox_iou, smooth_BCE from ultralytics.yolo.utils.metrics import FocalLoss, bbox_iou, smooth_BCE
from ultralytics.yolo.utils.modeling.tasks import SegmentationModel
from ultralytics.yolo.utils.ops import crop_mask, xywh2xyxy from ultralytics.yolo.utils.ops import crop_mask, xywh2xyxy
from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.plotting import plot_images, plot_results
from ultralytics.yolo.utils.torch_utils import de_parallel from ultralytics.yolo.utils.torch_utils import de_parallel

Loading…
Cancel
Save