Fix yolo checks as a package bug in Colab (#972)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Sergio Sanchez <sergio.ssm.97@gmail.com>
This commit is contained in:
Glenn Jocher
2023-02-14 19:53:51 +04:00
committed by GitHub
parent bdc6cd4d8b
commit 1ad7e79033
12 changed files with 72 additions and 30 deletions

View File

@ -216,6 +216,9 @@ def entrypoint(debug=''):
overrides = {} # basic overrides, i.e. imgsz=320
for a in merge_equals_args(args): # merge spaces around '=' sign
if a.startswith('--'):
LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.")
a = a[2:]
if '=' in a:
try:
re.sub(r' *= *', '=', a) # remove spaces around equals sign
@ -263,7 +266,7 @@ def entrypoint(debug=''):
mode = DEFAULT_CFG.mode or 'predict'
LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {modes}. Using default 'mode={mode}'.")
elif mode not in modes:
if mode != 'checks':
if mode not in ('checks', checks):
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {modes}.\n{CLI_HELP_MSG}")
LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
checks.check_yolo()

View File

@ -206,7 +206,7 @@ class Exporter:
self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y)
self.pretty_name = self.file.stem.replace('yolo', 'YOLO')
self.metadata = {
'description': f"Ultralytics {self.pretty_name} model trained on {self.model.args['data']}",
'description': f"Ultralytics {self.pretty_name} model trained on {self.args.data}",
'author': 'Ultralytics',
'license': 'GPL-3.0 https://ultralytics.com/license',
'version': __version__,
@ -257,11 +257,16 @@ class Exporter:
f = [str(x) for x in f if x] # filter out '' and None
if any(f):
f = str(Path(f[-1]))
LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nPredict: yolo task={model.task} mode=predict model={f}"
f"\nValidate: yolo task={model.task} mode=val model={f}"
f"\nVisualize: https://netron.app")
square = self.imgsz[0] == self.imgsz[1]
s = f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not work. Use " \
f"export 'imgsz={max(self.imgsz)}' if val is required." if not square else ''
imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(' ', '')
LOGGER.info(
f'\nExport complete ({time.time() - t:.1f}s)'
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nPredict: yolo task={model.task} mode=predict model={f} imgsz={imgsz}"
f"\nValidate: yolo task={model.task} mode=val model={f} imgsz={imgsz} data={self.args.data} {s}"
f"\nVisualize: https://netron.app")
self.run_callbacks("on_export_end")
return f # return list of exported files/dirs
@ -497,7 +502,7 @@ class Exporter:
except ImportError:
check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
import tensorflow as tf # noqa
check_requirements(("onnx", "onnx2tf", "sng4onnx", "onnxsim", "onnx_graphsurgeon"),
check_requirements(("onnx", "onnx2tf", "sng4onnx", "onnxsim", "onnx_graphsurgeon", "tflite_support"),
cmds="--extra-index-url https://pypi.ngc.nvidia.com ")
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
@ -680,24 +685,45 @@ class Exporter:
def _add_tflite_metadata(self, file):
# Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
check_requirements('tflite_support')
from tflite_support import flatbuffers # noqa
from tflite_support import metadata as _metadata # noqa
from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa
# Creates model info.
model_meta = _metadata_fb.ModelMetadataT()
model_meta.name = self.metadata['description']
model_meta.version = self.metadata['version']
model_meta.author = self.metadata['author']
model_meta.license = self.metadata['license']
# Creates input info.
input_meta = _metadata_fb.TensorMetadataT()
input_meta.name = "image"
input_meta.description = "Input image to be detected."
input_meta.content = _metadata_fb.ContentT()
input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT()
input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB
input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties
# Creates output info.
output_meta = _metadata_fb.TensorMetadataT()
output_meta.name = "output"
output_meta.description = "Coordinates of detected objects, class labels, and confidence score."
# Label file
tmp_file = Path('/tmp/meta.txt')
with open(tmp_file, 'w') as meta_f:
meta_f.write(str(self.metadata))
model_meta = _metadata_fb.ModelMetadataT()
label_file = _metadata_fb.AssociatedFileT()
label_file.name = tmp_file.name
model_meta.associatedFiles = [label_file]
label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS
output_meta.associatedFiles = [label_file]
# Creates subgraph info.
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * len(self.output_shape)
subgraph.inputTensorMetadata = [input_meta]
subgraph.outputTensorMetadata = [output_meta]
model_meta.subgraphMetadata = [subgraph]
b = flatbuffers.Builder(0)
@ -710,6 +736,14 @@ class Exporter:
populator.populate()
tmp_file.unlink()
# TODO Rename this here and in `_add_tflite_metadata`
def _extracted_from__add_tflite_metadata_15(self, _metadata_fb, arg1, arg2):
# Creates input info.
result = _metadata_fb.TensorMetadataT()
result.name = arg1
result.description = arg2
return result
def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')):
# YOLOv8 CoreML pipeline
import coremltools as ct # noqa

View File

@ -81,7 +81,7 @@ class YOLO:
cfg_dict = yaml_load(self.cfg, append_filename=True) # model dict
self.task = guess_model_task(cfg_dict)
self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = self._assign_ops_from_task()
self.model = self.ModelClass(cfg_dict, verbose=verbose) # initialize
self.model = self.ModelClass(cfg_dict, verbose=verbose and RANK == -1) # initialize
def _load(self, weights: str):
"""
@ -240,7 +240,7 @@ class YOLO:
if RANK in {0, -1}:
self.model, _ = attempt_load_one_weight(str(self.trainer.best))
self.overrides = self.model.args
self.metrics_data = self.trainer.validator.metrics
self.metrics_data = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP
def to(self, device):
"""

View File

@ -85,6 +85,7 @@ class BaseTrainer:
self.console = LOGGER
self.validator = None
self.model = None
self.metrics = None
init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)
# Dirs
@ -417,7 +418,7 @@ class BaseTrainer:
cfg = ckpt["model"].yaml
else:
cfg = model
self.model = self.get_model(cfg=cfg, weights=weights) # calls Model(cfg, weights)
self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights)
return ckpt
def optimizer_step(self):

View File

@ -7,7 +7,7 @@ from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
from ultralytics.yolo import v8
from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.trainer import BaseTrainer
from ultralytics.yolo.utils import DEFAULT_CFG
from ultralytics.yolo.utils import DEFAULT_CFG, RANK
from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer
@ -23,7 +23,7 @@ class ClassificationTrainer(BaseTrainer):
self.model.names = self.data["names"]
def get_model(self, cfg=None, weights=None, verbose=True):
model = ClassificationModel(cfg, nc=self.data["nc"])
model = ClassificationModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)

View File

@ -9,7 +9,7 @@ from ultralytics.yolo import v8
from ultralytics.yolo.data import build_dataloader
from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
from ultralytics.yolo.engine.trainer import BaseTrainer
from ultralytics.yolo.utils import DEFAULT_CFG, colorstr
from ultralytics.yolo.utils import DEFAULT_CFG, RANK, colorstr
from ultralytics.yolo.utils.loss import BboxLoss
from ultralytics.yolo.utils.ops import xywh2xyxy
from ultralytics.yolo.utils.plotting import plot_images, plot_results
@ -57,7 +57,7 @@ class DetectionTrainer(BaseTrainer):
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
def get_model(self, cfg=None, weights=None, verbose=True):
model = DetectionModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose)
model = DetectionModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)

View File

@ -6,7 +6,7 @@ import torch.nn.functional as F
from ultralytics.nn.tasks import SegmentationModel
from ultralytics.yolo import v8
from ultralytics.yolo.utils import DEFAULT_CFG
from ultralytics.yolo.utils import DEFAULT_CFG, RANK
from ultralytics.yolo.utils.ops import crop_mask, xyxy2xywh
from ultralytics.yolo.utils.plotting import plot_images, plot_results
from ultralytics.yolo.utils.tal import make_anchors
@ -24,7 +24,7 @@ class SegmentationTrainer(v8.detect.DetectionTrainer):
super().__init__(cfg, overrides)
def get_model(self, cfg=None, weights=None, verbose=True):
model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose)
model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
if weights:
model.load(weights)