diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 7eeb1e9..825e90c 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1 +1,5 @@ __version__ = "8.0.0.dev0" + +from ultralytics.yolo.engine.model import YOLO + +__all__ = ["__version__", "YOLO"] # allow simpler import diff --git a/ultralytics/nn/__init__.py b/ultralytics/nn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ultralytics/yolo/utils/modeling/autobackend.py b/ultralytics/nn/autobackend.py similarity index 99% rename from ultralytics/yolo/utils/modeling/autobackend.py rename to ultralytics/nn/autobackend.py index d5366a5..b635d00 100644 --- a/ultralytics/yolo/utils/modeling/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -13,6 +13,7 @@ from PIL import Image from ultralytics.yolo.utils import LOGGER, ROOT from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version from ultralytics.yolo.utils.downloads import attempt_download, is_url +from ultralytics.yolo.utils.files import yaml_load from ultralytics.yolo.utils.ops import xywh2xyxy @@ -32,8 +33,6 @@ class AutoBackend(nn.Module): # TensorFlow Lite: *.tflite # TensorFlow Edge TPU: *_edgetpu.tflite # PaddlePaddle: *_paddle_model - from ultralytics.yolo.utils.files import yaml_load - from ultralytics.yolo.utils.modeling import attempt_load_weights super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) @@ -54,6 +53,7 @@ class AutoBackend(nn.Module): model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif pt: # PyTorch + from ultralytics.nn.tasks import attempt_load_weights model = attempt_load_weights(weights if isinstance(weights, list) else w, device=device, inplace=True, @@ -89,7 +89,7 @@ class AutoBackend(nn.Module): elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core, Layout, get_batch + from openvino.runtime import Core, Layout, get_batch # noqa ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir diff --git a/ultralytics/yolo/utils/modeling/modules.py b/ultralytics/nn/modules.py similarity index 99% rename from ultralytics/yolo/utils/modeling/modules.py rename to ultralytics/nn/modules.py index 06270a6..c72f38b 100644 --- a/ultralytics/yolo/utils/modeling/modules.py +++ b/ultralytics/nn/modules.py @@ -17,6 +17,7 @@ import torch.nn as nn from PIL import Image, ImageOps from torch.cuda import amp +from ultralytics.nn.autobackend import AutoBackend from ultralytics.yolo.data.augment import LetterBox from ultralytics.yolo.utils import LOGGER, colorstr from ultralytics.yolo.utils.files import increment_path @@ -25,8 +26,6 @@ from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box from ultralytics.yolo.utils.tal import dist2bbox, make_anchors from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode -from .autobackend import AutoBackend - # from utils.plots import feature_visualization TODO diff --git a/ultralytics/yolo/utils/modeling/tasks.py b/ultralytics/nn/tasks.py similarity index 62% rename from ultralytics/yolo/utils/modeling/tasks.py rename to ultralytics/nn/tasks.py index 59e502c..0d259e6 100644 --- a/ultralytics/yolo/utils/modeling/tasks.py +++ b/ultralytics/nn/tasks.py @@ -1,11 +1,19 @@ +import contextlib from copy import deepcopy +from pathlib import Path import thop +import torch +import torch.nn as nn +import torchvision +import yaml -from ultralytics.yolo.utils.modeling import parse_model -from ultralytics.yolo.utils.modeling.modules import * -from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, intersect_state_dicts, model_info, - scale_img, time_sync) +from ultralytics.nn.modules import (C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify, + Concat, Conv, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Ensemble, Focus, + GhostBottleneck, GhostConv, Segment) +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, initialize_weights, intersect_state_dicts, + make_divisible, model_info, scale_img, time_sync) class BaseModel(nn.Module): @@ -75,7 +83,6 @@ class DetectionModel(BaseModel): if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml - import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict @@ -166,6 +173,7 @@ class ClassificationModel(BaseModel): def _from_detection_model(self, model, nc=1000, cutoff=10): # Create a YOLOv5 classification model from a YOLOv5 detection model + from ultralytics.nn.autobackend import AutoBackend if isinstance(model, AutoBackend): model = model.model # unwrap DetectMultiBackend model.model = model.model[:cutoff] # backbone @@ -192,7 +200,6 @@ class ClassificationModel(BaseModel): @staticmethod def reshape_outputs(model, nc): # Update a TorchVision classification model to class count 'n' if required - from ultralytics.yolo.utils.modeling.modules import Classify name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module if isinstance(m, Classify): # YOLO Classify() head if m.linear.out_features != nc: @@ -210,3 +217,110 @@ class ClassificationModel(BaseModel): i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != nc: m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) + + +# Functions ------------------------------------------------------------------------------------------------------------ + + +def attempt_load_weights(weights, device=None, inplace=True, fuse=True): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + from ultralytics.yolo.utils.downloads import attempt_download + + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt = torch.load(attempt_download(w), map_location='cpu') # load + ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + if not hasattr(ckpt, 'stride'): + ckpt.stride = torch.tensor([32.]) + if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): + ckpt.names = dict(enumerate(ckpt.names)) # convert to dict + + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode + + # Module compatibility updates + for m in model.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): + m.inplace = inplace # torch 1.7.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model + if len(model) == 1: + return model[-1] + + # Return detection ensemble + print(f'Ensemble created with {weights}\n') + for k in 'names', 'nc', 'yaml': + setattr(model, k, getattr(model[0], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' + return model + + +def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLOv5 model.yaml dictionary + LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") + nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print + no = nc + 4 # number of outputs = classes + box + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + with contextlib.suppress(NameError): + args[j] = eval(a) if isinstance(a, str) else a # eval strings + + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in { + Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP, + C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in {BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x}: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + # TODO: channel, gw, gd + elif m in {Detect, Segment}: + args.append([ch[x] for x in f]) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + m.np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type + LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +def get_model(model='s.pt', pretrained=True): + # Load a YOLO model locally, from torchvision, or from Ultralytics assets + if model.endswith(".pt"): + model = model.split(".")[0] + + if Path(f"{model}.pt").is_file(): # local file + return attempt_load_weights(f"{model}.pt", device='cpu') + elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 + return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None) + else: # Ultralytics assets + return attempt_load_weights(f"{model}.pt", device='cpu') diff --git a/ultralytics/tests/data/dataloader/yolodetection.py b/ultralytics/tests/data/dataloader/yolodetection.py index 515633f..e30ea61 100644 --- a/ultralytics/tests/data/dataloader/yolodetection.py +++ b/ultralytics/tests/data/dataloader/yolodetection.py @@ -1,6 +1,5 @@ import cv2 import hydra -import numpy as np from ultralytics.yolo.data import build_dataloader from ultralytics.yolo.utils import ROOT diff --git a/ultralytics/tests/functional/test_loaders.py b/ultralytics/tests/functional/test_loaders.py index 6a80e72..ea481c3 100644 --- a/ultralytics/tests/functional/test_loaders.py +++ b/ultralytics/tests/functional/test_loaders.py @@ -1,5 +1,5 @@ +from ultralytics.nn.tasks import DetectionModel from ultralytics.yolo.utils.checks import check_yaml -from ultralytics.yolo.utils.modeling.tasks import DetectionModel def test_model_parser(): diff --git a/ultralytics/yolo/__init__.py b/ultralytics/yolo/__init__.py index 37f36e0..e69de29 100644 --- a/ultralytics/yolo/__init__.py +++ b/ultralytics/yolo/__init__.py @@ -1,7 +0,0 @@ -from ultralytics.yolo import v8 - -from .engine.model import YOLO -from .engine.trainer import BaseTrainer -from .engine.validator import BaseValidator - -__all__ = ["BaseTrainer", "BaseValidator", "YOLO"] # allow simpler import diff --git a/ultralytics/yolo/engine/model.py b/ultralytics/yolo/engine/model.py index ba72812..1ecee36 100644 --- a/ultralytics/yolo/engine/model.py +++ b/ultralytics/yolo/engine/model.py @@ -2,14 +2,13 @@ import torch import yaml from ultralytics import yolo # noqa required for python usage +from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, attempt_load_weights # from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.utils import LOGGER from ultralytics.yolo.utils.checks import check_yaml from ultralytics.yolo.utils.configs import get_config from ultralytics.yolo.utils.files import yaml_load -from ultralytics.yolo.utils.modeling import attempt_load_weights -from ultralytics.yolo.utils.modeling.tasks import ClassificationModel, DetectionModel, SegmentationModel from ultralytics.yolo.utils.torch_utils import smart_inference_mode # map head: [model, trainer, validator, predictor] diff --git a/ultralytics/yolo/engine/predictor.py b/ultralytics/yolo/engine/predictor.py index 3e894e6..21c55d6 100644 --- a/ultralytics/yolo/engine/predictor.py +++ b/ultralytics/yolo/engine/predictor.py @@ -30,13 +30,13 @@ from pathlib import Path import cv2 +from ultralytics.nn.autobackend import AutoBackend from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages, LoadScreenshots, LoadStreams from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS, check_dataset, check_dataset_yaml from ultralytics.yolo.utils import LOGGER, ROOT, colorstr, ops from ultralytics.yolo.utils.checks import check_file, check_imshow from ultralytics.yolo.utils.configs import get_config from ultralytics.yolo.utils.files import increment_path -from ultralytics.yolo.utils.modeling.autobackend import AutoBackend from ultralytics.yolo.utils.torch_utils import check_imgsz, select_device, smart_inference_mode DEFAULT_CONFIG = ROOT / "yolo/utils/configs/default.yaml" @@ -95,7 +95,7 @@ class BasePredictor: device = select_device(self.args.device) model = model or self.args.model self.args.half &= device.type != 'cpu' # half precision only supported on CUDA - model = AutoBackend(model, device=device, dnn=self.args.dnn, fp16=self.args.half) # NOTE: not passing data + model = AutoBackend(model, device=device, dnn=self.args.dnn, fp16=self.args.half) stride, pt = model.stride, model.pt imgsz = check_imgsz(self.args.imgsz, s=stride) # check image size diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py index 03bf951..4c1c09b 100644 --- a/ultralytics/yolo/engine/validator.py +++ b/ultralytics/yolo/engine/validator.py @@ -4,11 +4,11 @@ import torch from omegaconf import OmegaConf from tqdm import tqdm +from ultralytics.nn.autobackend import AutoBackend from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.utils import LOGGER, TQDM_BAR_FORMAT from ultralytics.yolo.utils.files import increment_path -from ultralytics.yolo.utils.modeling.autobackend import AutoBackend from ultralytics.yolo.utils.ops import Profile from ultralytics.yolo.utils.torch_utils import check_imgsz, de_parallel, select_device, smart_inference_mode diff --git a/ultralytics/yolo/utils/modeling/__init__.py b/ultralytics/yolo/utils/modeling/__init__.py deleted file mode 100644 index 9cedfa2..0000000 --- a/ultralytics/yolo/utils/modeling/__init__.py +++ /dev/null @@ -1,109 +0,0 @@ -import contextlib - -import torchvision - -from ultralytics.yolo.utils.downloads import attempt_download -from ultralytics.yolo.utils.modeling.modules import * - - -def attempt_load_weights(weights, device=None, inplace=True, fuse=True): - # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a - - model = Ensemble() - for w in weights if isinstance(weights, list) else [weights]: - ckpt = torch.load(attempt_download(w), map_location='cpu') # load - ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model - - # Model compatibility updates - if not hasattr(ckpt, 'stride'): - ckpt.stride = torch.tensor([32.]) - if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): - ckpt.names = dict(enumerate(ckpt.names)) # convert to dict - - model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode - - # Module compatibility updates - for m in model.modules(): - t = type(m) - if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): - m.inplace = inplace # torch 1.7.0 compatibility - elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): - m.recompute_scale_factor = None # torch 1.11.0 compatibility - - # Return model - if len(model) == 1: - return model[-1] - - # Return detection ensemble - print(f'Ensemble created with {weights}\n') - for k in 'names', 'nc', 'yaml': - setattr(model, k, getattr(model[0], k)) - model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride - assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' - return model - - -def parse_model(d, ch): # model_dict, input_channels(3) - # Parse a YOLOv5 model.yaml dictionary - LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<50}{'arguments':<30}") - nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') - if act: - Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() - LOGGER.info(f"{colorstr('activation:')} {act}") # print - no = nc + 4 # number of outputs = classes + box - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - with contextlib.suppress(NameError): - args[j] = eval(a) if isinstance(a, str) else a # eval strings - - n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in { - Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, BottleneckCSP, - C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in {BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x}: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[x] for x in f) - # TODO: channel, gw, gd - elif m in {Detect, Segment}: - args.append([ch[x] for x in f]) - if m is Segment: - args[3] = make_divisible(args[3] * gw, 8) - else: - c2 = ch[f] - - m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - m.np = sum(x.numel() for x in m_.parameters()) # number params - m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type - LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{m.np:10.0f} {t:<50}{str(args):<30}') # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -def get_model(model='s.pt', pretrained=True): - # Load a YOLO model locally, from torchvision, or from Ultralytics assets - if model.endswith(".pt"): - model = model.split(".")[0] - - if Path(f"{model}.pt").is_file(): # local file - return attempt_load_weights(f"{model}.pt", device='cpu') - elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 - return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None) - else: # Ultralytics assets - return attempt_load_weights(f"{model}.pt", device='cpu') diff --git a/ultralytics/yolo/v8/__init__.py b/ultralytics/yolo/v8/__init__.py index 97834a1..8efc544 100644 --- a/ultralytics/yolo/v8/__init__.py +++ b/ultralytics/yolo/v8/__init__.py @@ -6,5 +6,4 @@ ROOT = Path(__file__).parents[0] # yolov8 ROOT __all__ = ["classify", "segment", "detect"] -# Patch hydra cli -from ultralytics.yolo.utils.configs import hydra_patch +from ultralytics.yolo.utils.configs import hydra_patch # noqa (patch hydra cli) diff --git a/ultralytics/yolo/v8/classify/predict.py b/ultralytics/yolo/v8/classify/predict.py index 9f07b5a..9b6e112 100644 --- a/ultralytics/yolo/v8/classify/predict.py +++ b/ultralytics/yolo/v8/classify/predict.py @@ -3,8 +3,7 @@ import torch from ultralytics.yolo.engine.predictor import BasePredictor from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG -from ultralytics.yolo.utils import ops -from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box +from ultralytics.yolo.utils.plotting import Annotator class ClassificationPredictor(BasePredictor): diff --git a/ultralytics/yolo/v8/classify/train.py b/ultralytics/yolo/v8/classify/train.py index 2b98de6..d78007f 100644 --- a/ultralytics/yolo/v8/classify/train.py +++ b/ultralytics/yolo/v8/classify/train.py @@ -1,11 +1,10 @@ import hydra import torch +from ultralytics.nn.tasks import ClassificationModel, get_model from ultralytics.yolo import v8 from ultralytics.yolo.data import build_classification_dataloader from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer -from ultralytics.yolo.utils.modeling import get_model -from ultralytics.yolo.utils.modeling.tasks import ClassificationModel class ClassificationTrainer(BaseTrainer): diff --git a/ultralytics/yolo/v8/detect/train.py b/ultralytics/yolo/v8/detect/train.py index d7fd5a7..5a86904 100644 --- a/ultralytics/yolo/v8/detect/train.py +++ b/ultralytics/yolo/v8/detect/train.py @@ -2,6 +2,7 @@ import hydra import torch import torch.nn as nn +from ultralytics.nn.tasks import DetectionModel from ultralytics.yolo import v8 from ultralytics.yolo.data import build_dataloader from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader @@ -9,7 +10,6 @@ from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer from ultralytics.yolo.utils import colorstr from ultralytics.yolo.utils.loss import BboxLoss from ultralytics.yolo.utils.metrics import smooth_BCE -from ultralytics.yolo.utils.modeling.tasks import DetectionModel from ultralytics.yolo.utils.ops import xywh2xyxy from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors diff --git a/ultralytics/yolo/v8/segment/predict.py b/ultralytics/yolo/v8/segment/predict.py index 4403635..bd0384f 100644 --- a/ultralytics/yolo/v8/segment/predict.py +++ b/ultralytics/yolo/v8/segment/predict.py @@ -1,11 +1,9 @@ -from pathlib import Path - import hydra import torch from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG -from ultralytics.yolo.utils import ROOT, ops -from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box +from ultralytics.yolo.utils import ops +from ultralytics.yolo.utils.plotting import colors, save_one_box from ..detect.predict import DetectionPredictor diff --git a/ultralytics/yolo/v8/segment/train.py b/ultralytics/yolo/v8/segment/train.py index 40071d3..49e9f7f 100644 --- a/ultralytics/yolo/v8/segment/train.py +++ b/ultralytics/yolo/v8/segment/train.py @@ -3,10 +3,10 @@ import torch import torch.nn as nn import torch.nn.functional as F +from ultralytics.nn.tasks import SegmentationModel from ultralytics.yolo import v8 -from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG, BaseTrainer +from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.utils.metrics import FocalLoss, bbox_iou, smooth_BCE -from ultralytics.yolo.utils.modeling.tasks import SegmentationModel from ultralytics.yolo.utils.ops import crop_mask, xywh2xyxy from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.torch_utils import de_parallel