Check PyTorch model status for all YOLO methods (#945)

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Ayush Chaurasia <ayush.chaurarsia@gmail.com>
This commit is contained in:
Glenn Jocher
2023-02-13 15:08:08 +04:00
committed by GitHub
parent fd5be10c66
commit 20fe708f31
21 changed files with 180 additions and 106 deletions

View File

@ -48,7 +48,6 @@ TensorFlow.js:
$ ln -s ../../yolov5/yolov8n_web_model public/yolov8n_web_model
$ npm start
"""
import contextlib
import json
import os
import platform
@ -74,7 +73,7 @@ from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, __version__, callbacks,
from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version, check_yaml
from ultralytics.yolo.utils.files import file_size
from ultralytics.yolo.utils.ops import Profile
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode
from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode, get_latest_opset
MACOS = platform.system() == 'Darwin' # macOS environment
@ -97,6 +96,10 @@ def export_formats():
return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
EXPORT_FORMATS_LIST = list(export_formats()['Argument'][1:])
EXPORT_FORMATS_TABLE = str(export_formats())
def try_export(inner_func):
# YOLOv8 export decorator, i..e @try_export
inner_args = get_default_args(inner_func)
@ -244,7 +247,7 @@ class Exporter:
agnostic_nms=self.args.agnostic_nms)
if edgetpu:
f[8], _ = self._export_edgetpu()
self._add_tflite_metadata(f[8] or f[7], num_outputs=len(self.output_shape))
self._add_tflite_metadata(f[8] or f[7])
if tfjs:
f[9], _ = self._export_tfjs()
if paddle: # PaddlePaddle
@ -253,11 +256,11 @@ class Exporter:
# Finish
f = [str(x) for x in f if x] # filter out '' and None
if any(f):
s = "-WARNING ⚠️ not yet supported for YOLOv8 exported models"
f = str(Path(f[-1]))
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nPredict: yolo task={model.task} mode=predict model={f[-1]} {s}"
f"\nValidate: yolo task={model.task} mode=val model={f[-1]} {s}"
f"\nPredict: yolo task={model.task} mode=predict model={f}"
f"\nValidate: yolo task={model.task} mode=val model={f}"
f"\nVisualize: https://netron.app")
self.run_callbacks("on_export_end")
@ -304,7 +307,7 @@ class Exporter:
self.im.cpu() if dynamic else self.im,
f,
verbose=False,
opset_version=self.args.opset,
opset_version=self.args.opset or get_latest_opset(),
do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
input_names=['images'],
output_names=output_names,
@ -507,6 +510,10 @@ class Exporter:
# Export to TF SavedModel
subprocess.run(f'onnx2tf -i {onnx} --output_signaturedefs -o {f}', shell=True)
# Add TFLite metadata
for tflite_file in Path(f).rglob('*.tflite'):
self._add_tflite_metadata(tflite_file)
# Load saved_model
keras_model = tf.saved_model.load(f, tags=None, options=None)
@ -661,44 +668,47 @@ class Exporter:
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}', f_json.read_text())
r'"Identity_3": {"name": "Identity_3"}}}',
f_json.read_text(),
)
j.write(subst)
return f, None
def _add_tflite_metadata(self, file, num_outputs):
def _add_tflite_metadata(self, file):
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
with contextlib.suppress(ImportError):
# check_requirements('tflite_support')
from tflite_support import flatbuffers # noqa
from tflite_support import metadata as _metadata # noqa
from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa
check_requirements('tflite_support')
tmp_file = Path('/tmp/meta.txt')
with open(tmp_file, 'w') as meta_f:
meta_f.write(str(self.metadata))
from tflite_support import flatbuffers # noqa
from tflite_support import metadata as _metadata # noqa
from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa
model_meta = _metadata_fb.ModelMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = tmp_file.name
model_meta.associatedFiles = [label_file]
tmp_file = Path('/tmp/meta.txt')
with open(tmp_file, 'w') as meta_f:
meta_f.write(str(self.metadata))
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
model_meta.subgraphMetadata = [subgraph]
model_meta = _metadata_fb.ModelMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = tmp_file.name
model_meta.associatedFiles = [label_file]
b = flatbuffers.Builder(0)
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * len(self.output_shape)
model_meta.subgraphMetadata = [subgraph]
populator = _metadata.MetadataPopulator.with_model_file(file)
populator.load_metadata_buffer(metadata_buf)
populator.load_associated_files([str(tmp_file)])
populator.populate()
tmp_file.unlink()
b = flatbuffers.Builder(0)
b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
populator = _metadata.MetadataPopulator.with_model_file(file)
populator.load_metadata_buffer(metadata_buf)
populator.load_associated_files([str(tmp_file)])
populator.populate()
tmp_file.unlink()
def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')):
# YOLOv8 CoreML pipeline

View File

@ -6,11 +6,11 @@ from typing import List
from ultralytics import yolo # noqa
from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight,
guess_model_task)
guess_model_task, nn)
from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.engine.exporter import Exporter
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, callbacks, yaml_load
from ultralytics.yolo.utils.checks import check_imgsz, check_yaml
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_yaml
from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS
from ultralytics.yolo.utils.torch_utils import smart_inference_mode
@ -55,19 +55,16 @@ class YOLO:
self.cfg = None # if loaded from *.yaml
self.ckpt_path = None
self.overrides = {} # overrides for trainer object
self.metrics_data = None
# Load or create new YOLO model
suffix = Path(model).suffix
if not suffix and Path(model).stem in GITHUB_ASSET_STEMS:
model, suffix = Path(model).with_suffix('.pt'), '.pt' # add suffix, i.e. yolov8n -> yolov8n.pt
try:
if suffix == '.yaml':
self._new(model)
else:
self._load(model)
except Exception as e:
raise NotImplementedError(f"Unable to load model='{model}'. "
f"As an example try model='yolov8n.pt' or model='yolov8n.yaml'") from e
if suffix == '.yaml':
self._new(model)
else:
self._load(model)
def __call__(self, source=None, stream=False, **kwargs):
return self.predict(source, stream, **kwargs)
@ -100,15 +97,27 @@ class YOLO:
self.overrides = self.model.args
self._reset_ckpt_args(self.overrides)
else:
check_file(weights)
self.model, self.ckpt = weights, None
self.task = guess_model_task(weights)
self.ckpt_path = weights
self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = self._assign_ops_from_task()
def _check_is_pytorch_model(self):
"""
Raises TypeError is model is not a PyTorch model
"""
if not isinstance(self.model, nn.Module):
raise TypeError(f"model='{self.model}' must be a PyTorch model, but is a different type. PyTorch models "
f"can be used to train, val, predict and export, i.e. "
f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only "
f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.")
def reset(self):
"""
Resets the model modules.
"""
self._check_is_pytorch_model()
for m in self.model.modules():
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
@ -122,9 +131,11 @@ class YOLO:
Args:
verbose (bool): Controls verbosity.
"""
self._check_is_pytorch_model()
self.model.info(verbose=verbose)
def fuse(self):
self._check_is_pytorch_model()
self.model.fuse()
def predict(self, source=None, stream=False, **kwargs):
@ -176,6 +187,8 @@ class YOLO:
validator = self.ValidatorClass(args=args)
validator(model=self.model)
self.metrics_data = validator.metrics
return validator.metrics
@smart_inference_mode()
@ -186,7 +199,7 @@ class YOLO:
Args:
**kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs
"""
self._check_is_pytorch_model()
overrides = self.overrides.copy()
overrides.update(kwargs)
args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides)
@ -196,7 +209,7 @@ class YOLO:
if args.batch == DEFAULT_CFG.batch:
args.batch = 1 # default to 1 if not modified
exporter = Exporter(overrides=args)
exporter(model=self.model)
return exporter(model=self.model)
def train(self, **kwargs):
"""
@ -205,6 +218,7 @@ class YOLO:
Args:
**kwargs (Any): Any number of arguments representing the training configuration.
"""
self._check_is_pytorch_model()
overrides = self.overrides.copy()
overrides.update(kwargs)
if kwargs.get("cfg"):
@ -226,6 +240,7 @@ class YOLO:
if RANK in {0, -1}:
self.model, _ = attempt_load_one_weight(str(self.trainer.best))
self.overrides = self.model.args
self.metrics_data = self.trainer.validator.metrics
def to(self, device):
"""
@ -234,15 +249,14 @@ class YOLO:
Args:
device (str): device
"""
self._check_is_pytorch_model()
self.model.to(device)
def _assign_ops_from_task(self):
model_class, train_lit, val_lit, pred_lit = MODEL_MAP[self.task]
# warning: eval is unsafe. Use with caution
trainer_class = eval(train_lit.replace("TYPE", f"{self.type}"))
validator_class = eval(val_lit.replace("TYPE", f"{self.type}"))
predictor_class = eval(pred_lit.replace("TYPE", f"{self.type}"))
return model_class, trainer_class, validator_class, predictor_class
@property
@ -250,7 +264,7 @@ class YOLO:
"""
Returns class names of the loaded model.
"""
return self.model.names
return self.model.names if hasattr(self.model, 'names') else None
@property
def transforms(self):
@ -259,6 +273,16 @@ class YOLO:
"""
return self.model.transforms if hasattr(self.model, 'transforms') else None
@property
def metrics(self):
"""
Returns metrics if computed
"""
if not self.metrics_data:
LOGGER.info("No metrics data found! Run training or validation operation first.")
return self.metrics_data
@staticmethod
def add_callback(event: str, func):
"""
@ -269,5 +293,5 @@ class YOLO:
@staticmethod
def _reset_ckpt_args(args):
for arg in 'augment', 'verbose', 'project', 'name', 'exist_ok', 'resume', 'batch', 'epochs', 'cache', \
'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots':
'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset':
args.pop(arg, None)

View File

@ -35,6 +35,7 @@ import torch
from ultralytics.nn.autobackend import AutoBackend
from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.data import load_inference_source
from ultralytics.yolo.data.augment import classify_transforms
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, SETTINGS, callbacks, colorstr, ops
from ultralytics.yolo.utils.checks import check_imgsz, check_imshow
from ultralytics.yolo.utils.files import increment_path
@ -121,8 +122,12 @@ class BasePredictor:
def setup_source(self, source):
self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2) # check image size
if self.args.task == 'classify':
transforms = getattr(self.model.model, 'transforms', classify_transforms(self.imgsz[0]))
else: # predict, segment
transforms = None
self.dataset = load_inference_source(source=source,
transforms=getattr(self.model.model, 'transforms', None),
transforms=transforms,
imgsz=self.imgsz,
vid_stride=self.args.vid_stride,
stride=self.model.stride,

View File

@ -217,19 +217,18 @@ class BaseTrainer:
# Optimizer
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
self.args.weight_decay *= self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
self.optimizer = self.build_optimizer(model=self.model,
name=self.args.optimizer,
lr=self.args.lr0,
momentum=self.args.momentum,
decay=self.args.weight_decay)
decay=weight_decay)
# Scheduler
if self.args.cos_lr:
self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
else:
self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf # linear
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf)
self.scheduler.last_epoch = self.start_epoch - 1 # do not move
self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False
# dataloaders
@ -242,6 +241,7 @@ class BaseTrainer:
self.metrics = dict(zip(metric_keys, [0] * len(metric_keys))) # TODO: init metrics for plot_results()?
self.ema = ModelEMA(self.model)
self.resume_training(ckpt)
self.scheduler.last_epoch = self.start_epoch - 1 # do not move
self.run_callbacks("on_pretrain_routine_end")
def _do_train(self, rank=-1, world_size=1):
@ -555,6 +555,12 @@ class BaseTrainer:
self.epochs += ckpt['epoch'] # finetune additional epochs
self.best_fitness = best_fitness
self.start_epoch = start_epoch
if start_epoch > (self.epochs - self.args.close_mosaic):
self.console.info("Closing dataloader mosaic")
if hasattr(self.train_loader.dataset, 'mosaic'):
self.train_loader.dataset.mosaic = False
if hasattr(self.train_loader.dataset, 'close_mosaic'):
self.train_loader.dataset.close_mosaic(hyp=self.args)
@staticmethod
def build_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5):