ultralytics 8.0.44 export and task fixes (#1088)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Mehran Ghandehari <mehran.maps@gmail.com>
Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-02-24 03:11:25 +01:00
committed by GitHub
parent fe61018975
commit 3ea659411b
32 changed files with 439 additions and 480 deletions

View File

@ -7,7 +7,7 @@ from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
from ultralytics.yolo import v8
from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.trainer import BaseTrainer
from ultralytics.yolo.utils import DEFAULT_CFG, RANK
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer
@ -64,6 +64,7 @@ class ClassificationTrainer(BaseTrainer):
self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
else:
FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.')
ClassificationModel.reshape_outputs(self.model, self.data['nc'])
return # dont return ckpt. Classification doesn't support resume
@ -93,7 +94,7 @@ class ClassificationTrainer(BaseTrainer):
def get_validator(self):
self.loss_names = ['loss']
return v8.classify.ClassificationValidator(self.test_loader, self.save_dir, logger=self.console)
return v8.classify.ClassificationValidator(self.test_loader, self.save_dir)
def criterion(self, preds, batch):
loss = torch.nn.functional.cross_entropy(preds, batch['cls'], reduction='sum') / self.args.nbs
@ -132,11 +133,12 @@ class ClassificationTrainer(BaseTrainer):
strip_optimizer(f) # strip optimizers
# TODO: validate best.pt after training completes
# if f is self.best:
# self.console.info(f'\nValidating {f}...')
# LOGGER.info(f'\nValidating {f}...')
# self.validator.args.save_json = True
# self.metrics = self.validator(model=f)
# self.metrics.pop('fitness', None)
# self.run_callbacks('on_fit_epoch_end')
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
def train(cfg=DEFAULT_CFG, use_python=False):

View File

@ -2,14 +2,14 @@
from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.validator import BaseValidator
from ultralytics.yolo.utils import DEFAULT_CFG
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER
from ultralytics.yolo.utils.metrics import ClassifyMetrics
class ClassificationValidator(BaseValidator):
def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
super().__init__(dataloader, save_dir, pbar, logger, args)
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None):
super().__init__(dataloader, save_dir, pbar, args)
self.args.task = 'classify'
self.metrics = ClassifyMetrics()
@ -31,7 +31,7 @@ class ClassificationValidator(BaseValidator):
self.targets.append(batch['cls'])
def finalize_metrics(self, *args, **kwargs):
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
self.metrics.speed = self.speed
def get_stats(self):
self.metrics.process(self.targets, self.pred)
@ -45,7 +45,7 @@ class ClassificationValidator(BaseValidator):
def print_results(self):
pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format
self.logger.info(pf % ('all', self.metrics.top1, self.metrics.top5))
LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5))
def val(cfg=DEFAULT_CFG, use_python=False):

View File

@ -66,10 +66,7 @@ class DetectionTrainer(BaseTrainer):
def get_validator(self):
self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
return v8.detect.DetectionValidator(self.test_loader,
save_dir=self.save_dir,
logger=self.console,
args=copy(self.args))
return v8.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
def criterion(self, preds, batch):
if not hasattr(self, 'compute_loss'):

View File

@ -9,7 +9,7 @@ import torch
from ultralytics.yolo.data import build_dataloader
from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
from ultralytics.yolo.engine.validator import BaseValidator
from ultralytics.yolo.utils import DEFAULT_CFG, colorstr, ops
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr, ops
from ultralytics.yolo.utils.checks import check_requirements
from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
from ultralytics.yolo.utils.plotting import output_to_target, plot_images
@ -18,8 +18,8 @@ from ultralytics.yolo.utils.torch_utils import de_parallel
class DetectionValidator(BaseValidator):
def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
super().__init__(dataloader, save_dir, pbar, logger, args)
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None):
super().__init__(dataloader, save_dir, pbar, args)
self.args.task = 'detect'
self.is_coco = False
self.class_map = None
@ -112,7 +112,7 @@ class DetectionValidator(BaseValidator):
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
def finalize_metrics(self, *args, **kwargs):
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
self.metrics.speed = self.speed
def get_stats(self):
stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy
@ -123,15 +123,15 @@ class DetectionValidator(BaseValidator):
def print_results(self):
pf = '%22s' + '%11i' * 2 + '%11.3g' * len(self.metrics.keys) # print format
self.logger.info(pf % ('all', self.seen, self.nt_per_class.sum(), *self.metrics.mean_results()))
LOGGER.info(pf % ('all', self.seen, self.nt_per_class.sum(), *self.metrics.mean_results()))
if self.nt_per_class.sum() == 0:
self.logger.warning(
LOGGER.warning(
f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels')
# Print results per class
if self.args.verbose and not self.training and self.nc > 1 and len(self.stats):
for i, c in enumerate(self.metrics.ap_class_index):
self.logger.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i)))
LOGGER.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i)))
if self.args.plots:
self.confusion_matrix.plot(save_dir=self.save_dir, names=list(self.names.values()))
@ -212,7 +212,7 @@ class DetectionValidator(BaseValidator):
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations
pred_json = self.save_dir / 'predictions.json' # predictions
self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements('pycocotools>=2.0.6')
from pycocotools.coco import COCO # noqa
@ -230,7 +230,7 @@ class DetectionValidator(BaseValidator):
eval.summarize()
stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50
except Exception as e:
self.logger.warning(f'pycocotools unable to run: {e}')
LOGGER.warning(f'pycocotools unable to run: {e}')
return stats

View File

@ -68,11 +68,10 @@ class SegmentationPredictor(DetectionPredictor):
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
# Mask plotting
self.annotator.masks(
mask.masks,
colors=[colors(x, True) for x in det.cls],
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(self.device).permute(2, 0, 1).flip(0).contiguous() /
255 if self.args.retina_masks else im[idx])
if self.args.save or self.args.show:
im_gpu = torch.as_tensor(im0, dtype=torch.float16, device=mask.masks.device).permute(
2, 0, 1).flip(0).contiguous() / 255 if self.args.retina_masks else im[idx]
self.annotator.masks(masks=mask.masks, colors=[colors(x, True) for x in det.cls], im_gpu=im_gpu)
# Write results
for j, d in enumerate(reversed(det)):

View File

@ -32,10 +32,7 @@ class SegmentationTrainer(v8.detect.DetectionTrainer):
def get_validator(self):
self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss'
return v8.segment.SegmentationValidator(self.test_loader,
save_dir=self.save_dir,
logger=self.console,
args=copy(self.args))
return v8.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
def criterion(self, preds, batch):
if not hasattr(self, 'compute_loss'):
@ -86,10 +83,6 @@ class SegLoss(Loss):
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
masks = batch['masks'].to(self.device).float()
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
# pboxes
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
@ -103,10 +96,15 @@ class SegLoss(Loss):
# loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
# bbox loss
if fg_mask.sum():
# bbox loss
loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor,
target_scores, target_scores_sum, fg_mask)
# masks loss
masks = batch['masks'].to(self.device).float()
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
for i in range(batch_size):
if fg_mask[i].sum():
mask_idx = target_gt_idx[i][fg_mask[i]]
@ -121,9 +119,9 @@ class SegLoss(Loss):
marea) # seg loss
# WARNING: Uncomment lines below in case of Multi-GPU DDP unused gradient errors
# else:
# loss[1] += proto.sum() * 0
# loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
# else:
# loss[1] += proto.sum() * 0
# loss[1] += proto.sum() * 0 + pred_masks.sum() * 0
loss[0] *= self.hyp.box # box gain
loss[1] *= self.hyp.box / batch_size # seg gain

View File

@ -7,7 +7,7 @@ import numpy as np
import torch
import torch.nn.functional as F
from ultralytics.yolo.utils import DEFAULT_CFG, NUM_THREADS, ops
from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, NUM_THREADS, ops
from ultralytics.yolo.utils.checks import check_requirements
from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou
from ultralytics.yolo.utils.plotting import output_to_target, plot_images
@ -16,8 +16,8 @@ from ultralytics.yolo.v8.detect import DetectionValidator
class SegmentationValidator(DetectionValidator):
def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
super().__init__(dataloader, save_dir, pbar, logger, args)
def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None):
super().__init__(dataloader, save_dir, pbar, args)
self.args.task = 'segment'
self.metrics = SegmentMetrics(save_dir=self.save_dir)
@ -120,7 +120,7 @@ class SegmentationValidator(DetectionValidator):
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
def finalize_metrics(self, *args, **kwargs):
self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed))
self.metrics.speed = self.speed
def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False):
"""
@ -207,7 +207,7 @@ class SegmentationValidator(DetectionValidator):
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations
pred_json = self.save_dir / 'predictions.json' # predictions
self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements('pycocotools>=2.0.6')
from pycocotools.coco import COCO # noqa
@ -228,7 +228,7 @@ class SegmentationValidator(DetectionValidator):
stats[self.metrics.keys[idx + 1]], stats[
self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50
except Exception as e:
self.logger.warning(f'pycocotools unable to run: {e}')
LOGGER.warning(f'pycocotools unable to run: {e}')
return stats