ultralytics 8.0.54 TFLite export improvements and fixes (#1447)

Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-03-16 15:42:44 +01:00
committed by GitHub
parent 30fc4b537f
commit 701fba4770
30 changed files with 198 additions and 166 deletions

View File

@ -54,11 +54,10 @@ CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay',
'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou') # fractional floats limited to 0.0 - 1.0
CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
'line_thickness', 'workspace', 'nbs', 'save_period')
CFG_BOOL_KEYS = ('save', 'exist_ok', 'pretrained', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect',
'cos_lr', 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show',
'save_txt', 'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment',
'agnostic_nms', 'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms',
'v5loader')
CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect', 'cos_lr',
'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt',
'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'agnostic_nms',
'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader')
# Define valid tasks and modes
MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark'
@ -290,6 +289,8 @@ def entrypoint(debug=''):
from ultralytics.yolo.engine.model import YOLO
overrides['model'] = model
model = YOLO(model, task=task)
if isinstance(overrides.get('pretrained'), str):
model.load(overrides['pretrained'])
# Task Update
if task != model.task:

View File

@ -188,7 +188,7 @@ class Exporter:
m.dynamic = self.args.dynamic
m.export = True
m.format = self.args.format
elif isinstance(m, C2f) and not edgetpu:
elif isinstance(m, C2f) and not any((saved_model, pb, tflite, edgetpu, tfjs)):
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
m.forward = m.forward_split

View File

@ -8,8 +8,8 @@ from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, Segmentat
guess_model_task, nn)
from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.engine.exporter import Exporter
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, ONLINE, RANK, ROOT,
callbacks, is_git_dir, is_pip_package, yaml_load)
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks,
is_git_dir, yaml_load)
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml
from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS
from ultralytics.yolo.utils.torch_utils import smart_inference_mode
@ -153,16 +153,10 @@ class YOLO:
f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only "
f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.")
def _check_pip_update(self):
@smart_inference_mode()
def reset_weights(self):
"""
Inform user of ultralytics package update availability
"""
if ONLINE and is_pip_package():
check_pip_update_available()
def reset(self):
"""
Resets the model modules.
Resets the model modules parameters to randomly initialized values, losing all training information.
"""
self._check_is_pytorch_model()
for m in self.model.modules():
@ -170,6 +164,18 @@ class YOLO:
m.reset_parameters()
for p in self.model.parameters():
p.requires_grad = True
return self
@smart_inference_mode()
def load(self, weights='yolov8n.pt'):
"""
Transfers parameters with matching names and shapes from 'weights' to model.
"""
self._check_is_pytorch_model()
if isinstance(weights, (str, Path)):
weights, self.ckpt = attempt_load_one_weight(weights)
self.model.load(weights)
return self
def info(self, verbose=False):
"""
@ -299,7 +305,7 @@ class YOLO:
**kwargs (Any): Any number of arguments representing the training configuration.
"""
self._check_is_pytorch_model()
self._check_pip_update()
check_pip_update_available()
overrides = self.overrides.copy()
overrides.update(kwargs)
if kwargs.get('cfg'):

View File

@ -48,7 +48,7 @@ class Results:
self.probs = probs if probs is not None else None
self.names = names
self.path = path
self._keys = [k for k in ('boxes', 'masks', 'probs') if getattr(self, k) is not None]
self._keys = ('boxes', 'masks', 'probs')
def pandas(self):
pass
@ -56,7 +56,7 @@ class Results:
def __getitem__(self, idx):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys:
for k in self.keys:
setattr(r, k, getattr(self, k)[idx])
return r
@ -70,30 +70,30 @@ class Results:
def cpu(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys:
for k in self.keys:
setattr(r, k, getattr(self, k).cpu())
return r
def numpy(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys:
for k in self.keys:
setattr(r, k, getattr(self, k).numpy())
return r
def cuda(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys:
for k in self.keys:
setattr(r, k, getattr(self, k).cuda())
return r
def to(self, *args, **kwargs):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys:
for k in self.keys:
setattr(r, k, getattr(self, k).to(*args, **kwargs))
return r
def __len__(self):
for k in self._keys:
for k in self.keys:
return len(getattr(self, k))
def __str__(self):
@ -107,6 +107,10 @@ class Results:
name = self.__class__.__name__
raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
@property
def keys(self):
return [k for k in self._keys if getattr(self, k) is not None]
def plot(self, show_conf=True, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
"""
Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image.

View File

@ -46,14 +46,14 @@ HELP_MSG = \
from ultralytics import YOLO
# Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch
model = YOLO('yolov8n.yaml') # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Use the model
results = model.train(data="coco128.yaml", epochs=3) # train the model
results = model.val() # evaluate model performance on the validation set
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
success = model.export(format="onnx") # export the model to ONNX format
results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
success = model.export(format='onnx') # export the model to ONNX format
3. Use the command line interface (CLI):

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, GPL-3.0 license
"""
AutoBatch utils
Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.
"""
from copy import deepcopy
@ -13,18 +13,35 @@ from ultralytics.yolo.utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True):
# Check YOLOv5 training batch size
"""
Check YOLO training batch size using the autobatch() function.
Args:
model (torch.nn.Module): YOLO model to check batch size for.
imgsz (int): Image size used for training.
amp (bool): If True, use automatic mixed precision (AMP) for training.
Returns:
int: Optimal batch size computed using the autobatch() function.
"""
with torch.cuda.amp.autocast(amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
def autobatch(model, imgsz=640, fraction=0.7, batch_size=16):
# Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
# Usage:
# import torch
# from utils.autobatch import autobatch
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
# print(autobatch(model))
def autobatch(model, imgsz=640, fraction=0.67, batch_size=16):
"""
Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.
Args:
model: YOLO model to compute batch size for.
imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640.
fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.67.
batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16.
Returns:
int: The optimal batch size.
"""
# Check device
prefix = colorstr('AutoBatch: ')

View File

@ -1,5 +1,5 @@
# Ultralytics YOLO 🚀, GPL-3.0 license
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
try:
from torch.utils.tensorboard import SummaryWriter
@ -18,11 +18,14 @@ def _log_scalars(scalars, step=0):
def on_pretrain_routine_start(trainer):
global writer
try:
writer = SummaryWriter(str(trainer.save_dir))
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
if SummaryWriter:
try:
global writer
writer = SummaryWriter(str(trainer.save_dir))
prefix = colorstr('TensorBoard: ')
LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
def on_batch_end(trainer):

View File

@ -20,8 +20,8 @@ import requests
import torch
from matplotlib import font_manager
from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ROOT, USER_CONFIG_DIR, TryExcept, colorstr, downloads, emojis,
is_colab, is_docker, is_jupyter, is_online)
from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, TryExcept, colorstr, downloads,
emojis, is_colab, is_docker, is_jupyter, is_online, is_pip_package)
def is_ascii(s) -> bool:
@ -141,12 +141,14 @@ def check_pip_update_available():
Returns:
bool: True if an update is available, False otherwise.
"""
from ultralytics import __version__
latest = check_latest_pypi_version()
if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available
LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 '
f"Update with 'pip install -U ultralytics'")
return True
if ONLINE and is_pip_package():
with contextlib.suppress(ConnectionError):
from ultralytics import __version__
latest = check_latest_pypi_version()
if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available
LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 '
f"Update with 'pip install -U ultralytics'")
return True
return False
@ -235,11 +237,11 @@ def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
suffix = (suffix, )
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}'
assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}'
def check_yolov5u_filename(file: str, verbose: bool = True):

View File

@ -76,7 +76,7 @@ class DetectionPredictor(BasePredictor):
if self.args.save_crop:
save_one_box(d.xyxy,
imc,
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg',
BGR=True)
return log_string

View File

@ -58,10 +58,9 @@ class DetectionTrainer(BaseTrainer):
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
def get_model(self, cfg=None, weights=None, verbose=True):
model = DetectionModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1)
model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
return model
def get_validator(self):

View File

@ -90,7 +90,7 @@ class SegmentationPredictor(DetectionPredictor):
if self.args.save_crop:
save_one_box(d.xyxy,
imc,
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg',
BGR=True)
return log_string