ultralytics 8.0.55
unified YOLOv8 model YAMLs (#1475)
This commit is contained in:
@ -17,7 +17,7 @@ from .utils import HELP_URL, LOCAL_RANK, LOGGER, get_hash, img2label_paths, veri
|
||||
|
||||
|
||||
class YOLODataset(BaseDataset):
|
||||
cache_version = '1.0.1' # dataset labels *.cache version, >= 1.0.0 for YOLOv8
|
||||
cache_version = '1.0.2' # dataset labels *.cache version, >= 1.0.0 for YOLOv8
|
||||
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
|
||||
"""
|
||||
Dataset class for loading images object detection and/or segmentation labels in YOLO format.
|
||||
|
@ -245,8 +245,7 @@ class Exporter:
|
||||
if tflite:
|
||||
f[7], _ = self._export_tflite(s_model, nms=False, agnostic_nms=self.args.agnostic_nms)
|
||||
if edgetpu:
|
||||
f[8], _ = self._export_edgetpu(tflite_model=str(
|
||||
Path(f[5]) / (self.file.stem + '_full_integer_quant.tflite'))) # int8 in/out
|
||||
f[8], _ = self._export_edgetpu(tflite_model=Path(f[5]) / f'{self.file.stem}_full_integer_quant.tflite')
|
||||
if tfjs:
|
||||
f[9], _ = self._export_tfjs()
|
||||
if paddle: # PaddlePaddle
|
||||
@ -532,9 +531,16 @@ class Exporter:
|
||||
subprocess.run(cmd, shell=True)
|
||||
yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml
|
||||
|
||||
# Remove/rename TFLite models
|
||||
if self.args.int8:
|
||||
for file in f.rglob('*_dynamic_range_quant.tflite'):
|
||||
file.rename(file.with_stem(file.stem.replace('_dynamic_range_quant', '_int8')))
|
||||
for file in f.rglob('*_integer_quant_with_int16_act.tflite'):
|
||||
file.unlink() # delete extra fp16 activation TFLite files
|
||||
|
||||
# Add TFLite metadata
|
||||
for file in f.rglob('*.tflite'):
|
||||
self._add_tflite_metadata(file)
|
||||
f.unlink() if 'quant_with_int16_act.tflite' in str(f) else self._add_tflite_metadata(file)
|
||||
|
||||
# Load saved_model
|
||||
keras_model = tf.saved_model.load(f, tags=None, options=None)
|
||||
@ -565,9 +571,9 @@ class Exporter:
|
||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||
saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model'))
|
||||
if self.args.int8:
|
||||
f = saved_model / f'{self.file.stem}_integer_quant.tflite' # fp32 in/out
|
||||
f = saved_model / f'{self.file.stem}_int8.tflite' # fp32 in/out
|
||||
elif self.args.half:
|
||||
f = saved_model / f'{self.file.stem}_float16.tflite'
|
||||
f = saved_model / f'{self.file.stem}_float16.tflite' # fp32 in/out
|
||||
else:
|
||||
f = saved_model / f'{self.file.stem}_float32.tflite'
|
||||
return str(f), None
|
||||
|
@ -5,7 +5,7 @@ from pathlib import Path
|
||||
|
||||
from ultralytics import yolo # noqa
|
||||
from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight,
|
||||
guess_model_task, nn)
|
||||
guess_model_task, nn, yaml_model_load)
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.engine.exporter import Exporter
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks,
|
||||
@ -111,8 +111,8 @@ class YOLO:
|
||||
task (str) or (None): model task
|
||||
verbose (bool): display model info on load
|
||||
"""
|
||||
self.cfg = check_yaml(cfg) # check YAML
|
||||
cfg_dict = yaml_load(self.cfg, append_filename=True) # model dict
|
||||
cfg_dict = yaml_model_load(cfg)
|
||||
self.cfg = cfg
|
||||
self.task = task or guess_model_task(cfg_dict)
|
||||
self.model = TASK_MAP[self.task][0](cfg_dict, verbose=verbose and RANK == -1) # build model
|
||||
self.overrides['model'] = self.cfg
|
||||
|
@ -248,9 +248,9 @@ def check_yolov5u_filename(file: str, verbose: bool = True):
|
||||
# Replace legacy YOLOv5 filenames with updated YOLOv5u filenames
|
||||
if ('yolov3' in file or 'yolov5' in file) and 'u' not in file:
|
||||
original_file = file
|
||||
file = re.sub(r'(.*yolov5([nsmlx]))\.', '\\1u.', file) # i.e. yolov5n.pt -> yolov5nu.pt
|
||||
file = re.sub(r'(.*yolov5([nsmlx])6)\.', '\\1u.', file) # i.e. yolov5n6.pt -> yolov5n6u.pt
|
||||
file = re.sub(r'(.*yolov3(|-tiny|-spp))\.', '\\1u.', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt
|
||||
file = re.sub(r'(.*yolov5([nsmlx]))\.pt', '\\1u.pt', file) # i.e. yolov5n.pt -> yolov5nu.pt
|
||||
file = re.sub(r'(.*yolov5([nsmlx])6)\.pt', '\\1u.pt', file) # i.e. yolov5n6.pt -> yolov5n6u.pt
|
||||
file = re.sub(r'(.*yolov3(|-tiny|-spp))\.pt', '\\1u.pt', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt
|
||||
if file != original_file and verbose:
|
||||
LOGGER.info(f"PRO TIP 💡 Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are "
|
||||
f'trained with https://github.com/ultralytics/ultralytics and feature improved performance vs '
|
||||
@ -258,7 +258,7 @@ def check_yolov5u_filename(file: str, verbose: bool = True):
|
||||
return file
|
||||
|
||||
|
||||
def check_file(file, suffix='', download=True):
|
||||
def check_file(file, suffix='', download=True, hard=True):
|
||||
# Search/download file (if necessary) and return path
|
||||
check_suffix(file, suffix) # optional
|
||||
file = str(file) # convert to string
|
||||
@ -277,16 +277,16 @@ def check_file(file, suffix='', download=True):
|
||||
files = []
|
||||
for d in 'models', 'datasets', 'tracker/cfg', 'yolo/cfg': # search directories
|
||||
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
|
||||
if not files:
|
||||
if not files and hard:
|
||||
raise FileNotFoundError(f"'{file}' does not exist")
|
||||
elif len(files) > 1:
|
||||
elif len(files) > 1 and hard:
|
||||
raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}")
|
||||
return files[0] # return file
|
||||
return files[0] if len(files) else [] # return file
|
||||
|
||||
|
||||
def check_yaml(file, suffix=('.yaml', '.yml')):
|
||||
def check_yaml(file, suffix=('.yaml', '.yml'), hard=True):
|
||||
# Search/download YAML file (if necessary) and return path, checking suffix
|
||||
return check_file(file, suffix)
|
||||
return check_file(file, suffix, hard=hard)
|
||||
|
||||
|
||||
def check_imshow(warn=False):
|
||||
|
@ -63,13 +63,12 @@ class DetectionPredictor(BasePredictor):
|
||||
|
||||
# write
|
||||
for d in reversed(det):
|
||||
cls, conf, id = d.cls.squeeze(), d.conf.squeeze(), None if d.id is None else int(d.id.item())
|
||||
c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
|
||||
if self.args.save_txt: # Write to file
|
||||
line = (cls, *d.xywhn.view(-1)) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
line = (c, *d.xywhn.view(-1)) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
|
||||
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
|
@ -76,17 +76,17 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
|
||||
# Write results
|
||||
for j, d in enumerate(reversed(det)):
|
||||
cls, conf, id = d.cls.squeeze(), d.conf.squeeze(), None if d.id is None else int(d.id.item())
|
||||
c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
|
||||
if self.args.save_txt: # Write to file
|
||||
seg = mask.segments[len(det) - j - 1].copy().reshape(-1) # reversed mask.segments, (n,2) to (n*2)
|
||||
line = (cls, *seg) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
line = (c, *seg) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
|
||||
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) if self.args.boxes else None
|
||||
if self.args.boxes:
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
|
@ -122,13 +122,15 @@ class SegLoss(Loss):
|
||||
xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]]
|
||||
marea = xyxy2xywh(xyxyn)[:, 2:].prod(1)
|
||||
mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)
|
||||
loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy,
|
||||
marea) # seg loss
|
||||
# WARNING: Uncomment lines below in case of Multi-GPU DDP unused gradient errors
|
||||
# else:
|
||||
# loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
|
||||
# else:
|
||||
# loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
|
||||
loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg
|
||||
|
||||
# WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
|
||||
else:
|
||||
loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
|
||||
|
||||
# WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
|
||||
else:
|
||||
loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
|
||||
|
||||
loss[0] *= self.hyp.box # box gain
|
||||
loss[1] *= self.hyp.box / batch_size # seg gain
|
||||
|
Reference in New Issue
Block a user