ultralytics 8.0.18
new python callbacks and minor fixes (#580)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jeroen Rombouts <36196499+jarombouts@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
@ -69,31 +69,25 @@ from ultralytics.nn.modules import Detect, Segment
|
||||
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages
|
||||
from ultralytics.yolo.data.utils import check_dataset
|
||||
from ultralytics.yolo.data.utils import check_det_dataset
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, callbacks, colorstr, get_default_args, yaml_save
|
||||
from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version, check_yaml
|
||||
from ultralytics.yolo.utils.files import file_size
|
||||
from ultralytics.yolo.utils.ops import Profile
|
||||
from ultralytics.yolo.utils.torch_utils import guess_task_from_head, select_device, smart_inference_mode
|
||||
from ultralytics.yolo.utils.torch_utils import guess_task_from_model_yaml, select_device, smart_inference_mode
|
||||
|
||||
MACOS = platform.system() == 'Darwin' # macOS environment
|
||||
|
||||
|
||||
def export_formats():
|
||||
# YOLOv8 export formats
|
||||
x = [
|
||||
['PyTorch', '-', '.pt', True, True],
|
||||
['TorchScript', 'torchscript', '.torchscript', True, True],
|
||||
['ONNX', 'onnx', '.onnx', True, True],
|
||||
['OpenVINO', 'openvino', '_openvino_model', True, False],
|
||||
['TensorRT', 'engine', '.engine', False, True],
|
||||
['CoreML', 'coreml', '.mlmodel', True, False],
|
||||
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
||||
['TensorFlow GraphDef', 'pb', '.pb', True, True],
|
||||
['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
||||
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
||||
['TensorFlow.js', 'tfjs', '_web_model', False, False],
|
||||
['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
|
||||
x = [['PyTorch', '-', '.pt', True, True], ['TorchScript', 'torchscript', '.torchscript', True, True],
|
||||
['ONNX', 'onnx', '.onnx', True, True], ['OpenVINO', 'openvino', '_openvino_model', True, False],
|
||||
['TensorRT', 'engine', '.engine', False, True], ['CoreML', 'coreml', '.mlmodel', True, False],
|
||||
['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
|
||||
['TensorFlow GraphDef', 'pb', '.pb', True, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False],
|
||||
['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
|
||||
['TensorFlow.js', 'tfjs', '_web_model', False, False], ['PaddlePaddle', 'paddle', '_paddle_model', True, True]]
|
||||
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
|
||||
|
||||
|
||||
@ -135,7 +129,7 @@ class Exporter:
|
||||
overrides (dict, optional): Configuration overrides. Defaults to None.
|
||||
"""
|
||||
self.args = get_cfg(cfg, overrides)
|
||||
self.callbacks = defaultdict(list, {k: [v] for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
callbacks.add_integration_callbacks(self)
|
||||
|
||||
@smart_inference_mode()
|
||||
@ -241,7 +235,7 @@ class Exporter:
|
||||
# Finish
|
||||
f = [str(x) for x in f if x] # filter out '' and None
|
||||
if any(f):
|
||||
task = guess_task_from_head(model.yaml["head"][-1][-2])
|
||||
task = guess_task_from_model_yaml(model)
|
||||
s = "-WARNING ⚠️ not yet supported for YOLOv8 exported models"
|
||||
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
|
||||
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
|
||||
@ -570,7 +564,7 @@ class Exporter:
|
||||
if n >= n_images:
|
||||
break
|
||||
|
||||
dataset = LoadImages(check_dataset(check_yaml(data))['train'], imgsz=imgsz, auto=False)
|
||||
dataset = LoadImages(check_det_dataset(check_yaml(data))['train'], imgsz=imgsz, auto=False)
|
||||
converter.representative_dataset = lambda: representative_dataset_gen(dataset, n_images=100)
|
||||
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
|
||||
converter.target_spec.supported_types = []
|
||||
|
@ -6,9 +6,9 @@ from ultralytics import yolo # noqa
|
||||
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.engine.exporter import Exporter
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, yaml_load
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, callbacks, yaml_load
|
||||
from ultralytics.yolo.utils.checks import check_yaml
|
||||
from ultralytics.yolo.utils.torch_utils import guess_task_from_head, smart_inference_mode
|
||||
from ultralytics.yolo.utils.torch_utils import guess_task_from_model_yaml, smart_inference_mode
|
||||
|
||||
# Map head to model, trainer, validator, and predictor classes
|
||||
MODEL_MAP = {
|
||||
@ -68,7 +68,7 @@ class YOLO:
|
||||
"""
|
||||
cfg = check_yaml(cfg) # check YAML
|
||||
cfg_dict = yaml_load(cfg, append_filename=True) # model dict
|
||||
self.task = guess_task_from_head(cfg_dict["head"][-1][-2])
|
||||
self.task = guess_task_from_model_yaml(cfg_dict)
|
||||
self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = \
|
||||
self._guess_ops_from_task(self.task)
|
||||
self.model = self.ModelClass(cfg_dict, verbose=verbose) # initialize
|
||||
@ -228,6 +228,12 @@ class YOLO:
|
||||
"""
|
||||
return self.model.names
|
||||
|
||||
def add_callback(self, event: str, func):
|
||||
"""
|
||||
Add callback
|
||||
"""
|
||||
callbacks.default_callbacks[event].append(func)
|
||||
|
||||
@staticmethod
|
||||
def _reset_ckpt_args(args):
|
||||
args.pop("project", None)
|
||||
|
@ -88,7 +88,7 @@ class BasePredictor:
|
||||
self.vid_path, self.vid_writer = None, None
|
||||
self.annotator = None
|
||||
self.data_path = None
|
||||
self.callbacks = defaultdict(list, {k: [v] for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
callbacks.add_integration_callbacks(self)
|
||||
|
||||
def preprocess(self, img):
|
||||
@ -172,16 +172,17 @@ class BasePredictor:
|
||||
# setup source. Run every time predict is called
|
||||
self.setup_source(source)
|
||||
# check if save_dir/ label file exists
|
||||
if self.args.save:
|
||||
if self.args.save or self.args.save_txt:
|
||||
(self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
|
||||
# warmup model
|
||||
if not self.done_warmup:
|
||||
self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.bs, 3, *self.imgsz))
|
||||
self.done_warmup = True
|
||||
|
||||
self.seen, self.windows, self.dt = 0, [], (ops.Profile(), ops.Profile(), ops.Profile())
|
||||
self.seen, self.windows, self.dt, self.batch = 0, [], (ops.Profile(), ops.Profile(), ops.Profile()), None
|
||||
for batch in self.dataset:
|
||||
self.run_callbacks("on_predict_batch_start")
|
||||
self.batch = batch
|
||||
path, im, im0s, vid_cap, s = batch
|
||||
visualize = increment_path(self.save_dir / Path(path).stem, mkdir=True) if self.args.visualize else False
|
||||
with self.dt[0]:
|
||||
@ -195,13 +196,13 @@ class BasePredictor:
|
||||
|
||||
# postprocess
|
||||
with self.dt[2]:
|
||||
results = self.postprocess(preds, im, im0s, self.classes)
|
||||
self.results = self.postprocess(preds, im, im0s, self.classes)
|
||||
for i in range(len(im)):
|
||||
p, im0 = (path[i], im0s[i]) if self.webcam or self.from_img else (path, im0s)
|
||||
p = Path(p)
|
||||
|
||||
if verbose or self.args.save or self.args.save_txt or self.args.show:
|
||||
s += self.write_results(i, results, (p, im, im0))
|
||||
s += self.write_results(i, self.results, (p, im, im0))
|
||||
|
||||
if self.args.show:
|
||||
self.show(p)
|
||||
@ -209,22 +210,21 @@ class BasePredictor:
|
||||
if self.args.save:
|
||||
self.save_preds(vid_cap, i, str(self.save_dir / p.name))
|
||||
|
||||
yield from results
|
||||
self.run_callbacks("on_predict_batch_end")
|
||||
yield from self.results
|
||||
|
||||
# Print time (inference-only)
|
||||
if verbose:
|
||||
LOGGER.info(f"{s}{'' if len(preds) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms")
|
||||
|
||||
self.run_callbacks("on_predict_batch_end")
|
||||
|
||||
# Print results
|
||||
if verbose and self.seen:
|
||||
t = tuple(x.t / self.seen * 1E3 for x in self.dt) # speeds per image
|
||||
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms postprocess per image at shape '
|
||||
f'{(1, 3, *self.imgsz)}' % t)
|
||||
if self.args.save_txt or self.args.save:
|
||||
s = f"\n{len(list(self.save_dir.glob('labels/*.txt')))} labels saved to {self.save_dir / 'labels'}" \
|
||||
if self.args.save_txt else ''
|
||||
nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels
|
||||
s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else ''
|
||||
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}")
|
||||
|
||||
self.run_callbacks("on_predict_end")
|
||||
|
@ -20,19 +20,18 @@ from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.optim import lr_scheduler
|
||||
from tqdm import tqdm
|
||||
|
||||
import ultralytics.yolo.utils as utils
|
||||
from ultralytics import __version__
|
||||
from ultralytics.nn.tasks import attempt_load_one_weight
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
|
||||
from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG_PATH, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr,
|
||||
yaml_save)
|
||||
emojis, yaml_save)
|
||||
from ultralytics.yolo.utils.autobatch import check_train_batch_size
|
||||
from ultralytics.yolo.utils.checks import check_file, check_imgsz, print_args
|
||||
from ultralytics.yolo.utils.dist import ddp_cleanup, generate_ddp_command
|
||||
from ultralytics.yolo.utils.files import get_latest_run, increment_path
|
||||
from ultralytics.yolo.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle,
|
||||
strip_optimizer)
|
||||
select_device, strip_optimizer)
|
||||
|
||||
|
||||
class BaseTrainer:
|
||||
@ -81,7 +80,7 @@ class BaseTrainer:
|
||||
overrides (dict, optional): Configuration overrides. Defaults to None.
|
||||
"""
|
||||
self.args = get_cfg(cfg, overrides)
|
||||
self.device = utils.torch_utils.select_device(self.args.device, self.args.batch)
|
||||
self.device = select_device(self.args.device, self.args.batch)
|
||||
self.check_resume()
|
||||
self.console = LOGGER
|
||||
self.validator = None
|
||||
@ -120,9 +119,11 @@ class BaseTrainer:
|
||||
self.model = self.args.model
|
||||
self.data = self.args.data
|
||||
if self.data.endswith(".yaml"):
|
||||
self.data = check_dataset_yaml(self.data)
|
||||
self.data = check_det_dataset(self.data)
|
||||
elif self.args.task == 'classify':
|
||||
self.data = check_cls_dataset(self.data)
|
||||
else:
|
||||
self.data = check_dataset(self.data)
|
||||
raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' not found ❌"))
|
||||
self.trainset, self.testset = self.get_dataset(self.data)
|
||||
self.ema = None
|
||||
|
||||
@ -140,7 +141,7 @@ class BaseTrainer:
|
||||
self.plot_idx = [0, 1, 2]
|
||||
|
||||
# Callbacks
|
||||
self.callbacks = defaultdict(list, {k: [v] for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
if RANK in {0, -1}:
|
||||
callbacks.add_integration_callbacks(self)
|
||||
|
||||
|
@ -9,8 +9,8 @@ from tqdm import tqdm
|
||||
|
||||
from ultralytics.nn.autobackend import AutoBackend
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG_PATH, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks
|
||||
from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG_PATH, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, emojis
|
||||
from ultralytics.yolo.utils.checks import check_imgsz
|
||||
from ultralytics.yolo.utils.files import increment_path
|
||||
from ultralytics.yolo.utils.ops import Profile
|
||||
@ -70,7 +70,7 @@ class BaseValidator:
|
||||
if self.args.conf is None:
|
||||
self.args.conf = 0.001 # default conf=0.001
|
||||
|
||||
self.callbacks = defaultdict(list, {k: [v] for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
|
||||
|
||||
@smart_inference_mode()
|
||||
def __call__(self, trainer=None, model=None):
|
||||
@ -109,9 +109,11 @@ class BaseValidator:
|
||||
f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
if isinstance(self.args.data, str) and self.args.data.endswith(".yaml"):
|
||||
self.data = check_dataset_yaml(self.args.data)
|
||||
self.data = check_det_dataset(self.args.data)
|
||||
elif self.args.task == 'classify':
|
||||
self.data = check_cls_dataset(self.args.data)
|
||||
else:
|
||||
self.data = check_dataset(self.args.data)
|
||||
raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' not found ❌"))
|
||||
|
||||
if self.device.type == 'cpu':
|
||||
self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
|
||||
|
Reference in New Issue
Block a user