From 6f0ba814274f3a3faaf45977927fcc960c022fe0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 04:56:24 +0100 Subject: [PATCH] Add best.pt val and COCO pycocotools val (#98) Co-authored-by: ayush chaurasia Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .gitignore | 2 +- ultralytics/nn/tasks.py | 25 +++++--- ultralytics/yolo/data/utils.py | 15 ++--- ultralytics/yolo/engine/trainer.py | 20 +++--- ultralytics/yolo/engine/validator.py | 41 +++++++----- ultralytics/yolo/utils/metrics.py | 13 ++-- ultralytics/yolo/utils/torch_utils.py | 3 +- ultralytics/yolo/v8/classify/train.py | 7 ++- ultralytics/yolo/v8/detect/train.py | 8 +-- ultralytics/yolo/v8/detect/val.py | 91 +++++++++++++++++++-------- ultralytics/yolo/v8/segment/train.py | 6 +- ultralytics/yolo/v8/segment/val.py | 43 +++++-------- 12 files changed, 159 insertions(+), 115 deletions(-) diff --git a/.gitignore b/.gitignore index 2898fbb..63f4cce 100644 --- a/.gitignore +++ b/.gitignore @@ -130,7 +130,7 @@ dmypy.json # datasets and projects datasets/ -ultralytics-yolo/ runs/ +wandb/ .DS_Store \ No newline at end of file diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py index 0d259e6..8749c28 100644 --- a/ultralytics/nn/tasks.py +++ b/ultralytics/nn/tasks.py @@ -78,7 +78,7 @@ class BaseModel(nn.Module): class DetectionModel(BaseModel): # YOLOv5 detection model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, verbose=True): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict @@ -92,7 +92,7 @@ class DetectionModel(BaseModel): if nc and nc != self.yaml['nc']: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch], verbose=verbose) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) @@ -108,8 +108,9 @@ class DetectionModel(BaseModel): # Init weights, biases initialize_weights(self) - self.info() - LOGGER.info('') + if verbose: + self.info() + LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): if augment: @@ -152,11 +153,12 @@ class DetectionModel(BaseModel): y[-1] = y[-1][..., i:] # small return y - def load(self, weights): + def load(self, weights, verbose=True): csd = weights['model'].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_state_dicts(csd, self.state_dict()) # intersect self.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights') + if verbose: + LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights') class SegmentationModel(DetectionModel): @@ -260,13 +262,15 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=True): return model -def parse_model(d, ch): # model_dict, input_channels(3) +def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary - LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") + if verbose: + LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() - LOGGER.info(f"{colorstr('activation:')} {act}") # print + if verbose: + LOGGER.info(f"{colorstr('activation:')} {act}") # print no = nc + 4 # number of outputs = classes + box layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out @@ -304,7 +308,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type m.np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type - LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print + if verbose: + LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: diff --git a/ultralytics/yolo/data/utils.py b/ultralytics/yolo/data/utils.py index 2d5c387..7cbb059 100644 --- a/ultralytics/yolo/data/utils.py +++ b/ultralytics/yolo/data/utils.py @@ -132,8 +132,9 @@ def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1): """ Args: imgsz (tuple): The image size. - polygons (np.ndarray): [N, M], N is the number of polygons, - M is the number of points(Be divided by 2). + polygons (np.ndarray): [N, M], N is the number of polygons, M is the number of points(Be divided by 2). + color (int): color + downsample_ratio (int): downsample ratio """ mask = np.zeros(imgsz, dtype=np.uint8) polygons = np.asarray(polygons) @@ -152,9 +153,9 @@ def polygons2masks(imgsz, polygons, color, downsample_ratio=1): """ Args: imgsz (tuple): The image size. - polygons (list[np.ndarray]): each polygon is [N, M], - N is the number of polygons, - M is the number of points(Be divided by 2). + polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0) + color (int): color + downsample_ratio (int): downsample ratio """ masks = [] for si in range(len(polygons)): @@ -191,7 +192,7 @@ def polygons2masks_overlap(imgsz, segments, downsample_ratio=1): def check_dataset_yaml(data, autodownload=True): # Download, check and/or unzip dataset if not found locally data = check_file(data) - DATASETS_DIR = Path.cwd() / "../datasets" + DATASETS_DIR = Path.cwd() / "../datasets" # TODO: handle global dataset dir # Download (optional) extract_dir = '' if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): @@ -231,7 +232,7 @@ def check_dataset_yaml(data, autodownload=True): if not all(x.exists() for x in val): LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) if not s or not autodownload: - raise Exception('Dataset not found ❌') + raise FileNotFoundError('Dataset not found ❌') t = time.time() if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename diff --git a/ultralytics/yolo/engine/trainer.py b/ultralytics/yolo/engine/trainer.py index 5d5a01a..f3932d3 100644 --- a/ultralytics/yolo/engine/trainer.py +++ b/ultralytics/yolo/engine/trainer.py @@ -22,6 +22,7 @@ from tqdm import tqdm import ultralytics.yolo.utils as utils import ultralytics.yolo.utils.callbacks as callbacks +from ultralytics import __version__ from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml from ultralytics.yolo.utils import LOGGER, ROOT, TQDM_BAR_FORMAT, colorstr from ultralytics.yolo.utils.checks import check_file, print_args @@ -52,7 +53,8 @@ class BaseTrainer: self.batch_size = self.args.batch_size self.epochs = self.args.epochs self.start_epoch = 0 - print_args(dict(self.args)) + if RANK == -1: + print_args(dict(self.args)) # Save run settings save_yaml(self.save_dir / 'args.yaml', OmegaConf.to_container(self.args, resolve=True)) @@ -109,7 +111,6 @@ class BaseTrainer: world_size = torch.cuda.device_count() if world_size > 1 and "LOCAL_RANK" not in os.environ: command = generate_ddp_command(world_size, self) - print('DDP command: ', command) try: subprocess.run(command) except Exception as e: @@ -124,7 +125,7 @@ class BaseTrainer: # os.environ['MASTER_PORT'] = '9020' torch.cuda.set_device(rank) self.device = torch.device('cuda', rank) - self.console.info(f"RANK - WORLD_SIZE - DEVICE: {rank} - {world_size} - {self.device} ") + self.console.info(f"DDP settings: RANK {rank}, WORLD_SIZE {world_size}, DEVICE {self.device}") dist.init_process_group("nccl" if dist.is_nccl_available() else "gloo", rank=rank, world_size=world_size) def _setup_train(self, rank, world_size): @@ -259,8 +260,7 @@ class BaseTrainer: if not self.args.noval or final_epoch: self.metrics, self.fitness = self.validate() self.trigger_callbacks('on_val_end') - log_vals = {**self.label_loss_items(self.tloss), **self.metrics, **lr} - self.save_metrics(metrics=log_vals) + self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **lr}) # save model if (not self.args.nosave) or (epoch + 1 == self.epochs): @@ -282,7 +282,6 @@ class BaseTrainer: self.plot_metrics() self.log(f"Results saved to {colorstr('bold', self.save_dir)}") self.trigger_callbacks('on_train_end') - dist.destroy_process_group() if world_size > 1 else None torch.cuda.empty_cache() self.trigger_callbacks('teardown') @@ -295,7 +294,8 @@ class BaseTrainer: 'updates': self.ema.updates, 'optimizer': self.optimizer.state_dict(), 'train_args': self.args, - 'date': datetime.now().isoformat()} + 'date': datetime.now().isoformat(), + 'version': __version__} # Save last, best and delete torch.save(ckpt, self.last) @@ -365,7 +365,7 @@ class BaseTrainer: if rank in {-1, 0}: self.console.info(text) - def load_model(self, model_cfg, weights): + def load_model(self, model_cfg=None, weights=None, verbose=True): raise NotImplementedError("This task trainer doesn't support loading cfg files") def get_validator(self): @@ -417,12 +417,14 @@ class BaseTrainer: pass def final_eval(self): - # TODO: need standalone evaluator to do this for f in self.last, self.best: if f.exists(): strip_optimizer(f) # strip optimizers if f is self.best: self.console.info(f'\nValidating {f}...') + self.metrics = self.validator(model=f) + self.metrics.pop('fitness', None) + self.trigger_callbacks('on_val_end') def check_resume(self): resume = self.args.resume diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py index 4c1c09b..18b6ec6 100644 --- a/ultralytics/yolo/engine/validator.py +++ b/ultralytics/yolo/engine/validator.py @@ -1,3 +1,4 @@ +import json from pathlib import Path import torch @@ -29,6 +30,7 @@ class BaseValidator: self.batch_i = None self.training = True self.speed = None + self.jdict = None self.save_dir = save_dir if save_dir is not None else \ increment_path(Path(self.args.project) / self.args.name, exist_ok=self.args.exist_ok) @@ -65,11 +67,12 @@ class BaseValidator: self.logger.info( f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - if self.args.data.endswith(".yaml"): + if isinstance(self.args.data, str) and self.args.data.endswith(".yaml"): data = check_dataset_yaml(self.args.data) else: data = check_dataset(self.args.data) - self.dataloader = self.get_dataloader(data.get("val") or data.set("test"), self.args.batch_size) + self.dataloader = self.get_dataloader(data.get("val") or data.set("test"), + self.args.batch_size) if not self.dataloader else self.dataloader model.eval() @@ -81,6 +84,7 @@ class BaseValidator: # bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT) bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT) self.init_metrics(de_parallel(model)) + self.jdict = [] # empty before each val for batch_i, batch in enumerate(bar): self.batch_i = batch_i # pre-process @@ -105,25 +109,26 @@ class BaseValidator: self.plot_val_samples(batch, batch_i) self.plot_predictions(batch, preds, batch_i) + if self.args.save_json: + self.pred_to_json(preds, batch) + stats = self.get_stats() self.check_stats(stats) - self.print_results() - - # calculate speed only once when training - if not self.training or trainer.epoch == 0: - self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image - - if not self.training: # print only at inference - self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' % - self.speed) - + self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image if self.training: model.float() - # TODO: implement save json + return {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")} + else: + self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' % + self.speed) + if self.args.save_json and self.jdict: + with open(str(self.save_dir / "predictions.json"), 'w') as f: + self.logger.info(f"Saving {f.name}...") + json.dump(self.jdict, f) # flatten and save - return {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")} \ - if self.training else stats + self.eval_json() + return stats def get_dataloader(self, dataset_path, batch_size): raise NotImplementedError("get_dataloader function not implemented for this validator") @@ -162,3 +167,9 @@ class BaseValidator: def plot_predictions(self, batch, preds, ni): pass + + def pred_to_json(self, preds, batch): + pass + + def eval_json(self): + pass diff --git a/ultralytics/yolo/utils/metrics.py b/ultralytics/yolo/utils/metrics.py index 92cedb3..0d450e1 100644 --- a/ultralytics/yolo/utils/metrics.py +++ b/ultralytics/yolo/utils/metrics.py @@ -337,7 +337,7 @@ def compute_ap(recall, precision): x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve return ap, mpre, mrec @@ -469,11 +469,11 @@ class Metric: def mean_results(self): """Mean of results, return mp, mr, map50, map""" - return (self.mp, self.mr, self.map50, self.map) + return self.mp, self.mr, self.map50, self.map def class_result(self, i): """class-aware result, return p[i], r[i], ap50[i], ap[i]""" - return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + return self.p[i], self.r[i], self.ap50[i], self.ap[i] def get_maps(self, nc): maps = np.zeros(nc) + self.map @@ -491,12 +491,7 @@ class Metric: Args: results: tuple(p, r, ap, f1, ap_class) """ - p, r, f1, all_ap, ap_class_index = results - self.p = p - self.r = r - self.all_ap = all_ap - self.f1 = f1 - self.ap_class_index = ap_class_index + self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results class DetMetrics: diff --git a/ultralytics/yolo/utils/torch_utils.py b/ultralytics/yolo/utils/torch_utils.py index 573a6e9..c53cdb7 100644 --- a/ultralytics/yolo/utils/torch_utils.py +++ b/ultralytics/yolo/utils/torch_utils.py @@ -86,7 +86,8 @@ def select_device(device='', batch_size=0, newline=False): s += 'CPU\n' arg = 'cpu' - LOGGER.info(s if newline else s.rstrip()) + if RANK == -1: + LOGGER.info(s if newline else s.rstrip()) return torch.device(arg) diff --git a/ultralytics/yolo/v8/classify/train.py b/ultralytics/yolo/v8/classify/train.py index d78007f..95666c3 100644 --- a/ultralytics/yolo/v8/classify/train.py +++ b/ultralytics/yolo/v8/classify/train.py @@ -12,8 +12,8 @@ class ClassificationTrainer(BaseTrainer): def set_model_attributes(self): self.model.names = self.data["names"] - def load_model(self, model_cfg=None, weights=None): - # TODO: why treat clf models as unique. We should have clf yamls? + def load_model(self, model_cfg=None, weights=None, verbose=True): + # TODO: why treat clf models as unique. We should have clf yamls? YES WE SHOULD! if isinstance(weights, dict): # yolo ckpt weights = weights["model"] if weights and not weights.__class__.__name__.startswith("yolo"): # torchvision @@ -57,6 +57,9 @@ class ClassificationTrainer(BaseTrainer): def resume_training(self, ckpt): pass + def final_eval(self): + pass + @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) def train(cfg): diff --git a/ultralytics/yolo/v8/detect/train.py b/ultralytics/yolo/v8/detect/train.py index 5a86904..40d7b3f 100644 --- a/ultralytics/yolo/v8/detect/train.py +++ b/ultralytics/yolo/v8/detect/train.py @@ -13,7 +13,7 @@ from ultralytics.yolo.utils.metrics import smooth_BCE from ultralytics.yolo.utils.ops import xywh2xyxy from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors -from ultralytics.yolo.utils.torch_utils import de_parallel +from ultralytics.yolo.utils.torch_utils import de_parallel, strip_optimizer # BaseTrainer python usage @@ -54,10 +54,10 @@ class DetectionTrainer(BaseTrainer): # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc self.model.names = self.data["names"] - def load_model(self, model_cfg=None, weights=None): - model = DetectionModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"]) + def load_model(self, model_cfg=None, weights=None, verbose=True): + model = DetectionModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"], verbose=verbose) if weights: - model.load(weights) + model.load(weights, verbose) return model def get_validator(self): diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/yolo/v8/detect/val.py index d2a32b4..9253979 100644 --- a/ultralytics/yolo/v8/detect/val.py +++ b/ultralytics/yolo/v8/detect/val.py @@ -1,14 +1,16 @@ import os +from pathlib import Path import hydra import numpy as np import torch from ultralytics.yolo.data import build_dataloader +from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.validator import BaseValidator -from ultralytics.yolo.utils import ops -from ultralytics.yolo.utils.checks import check_file +from ultralytics.yolo.utils import colorstr, ops +from ultralytics.yolo.utils.checks import check_file, check_requirements from ultralytics.yolo.utils.files import yaml_load from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou from ultralytics.yolo.utils.plotting import output_to_target, plot_images @@ -43,13 +45,11 @@ class DetectionValidator(BaseValidator): def init_metrics(self, model): head = model.model[-1] if self.training else model.model.model[-1] if self.data: - self.is_coco = isinstance(self.data.get('val'), - str) and self.data['val'].endswith(f'coco{os.sep}val2017.txt') + self.is_coco = self.data.get('val', '').endswith(f'coco{os.sep}val2017.txt') # is COCO dataset self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO self.nc = head.nc self.names = model.names - if isinstance(self.names, (list, tuple)): # old format - self.names = dict(enumerate(self.names)) self.metrics.names = self.names self.confusion_matrix = ConfusionMatrix(nc=self.nc) self.seen = 0 @@ -107,11 +107,6 @@ class DetectionValidator(BaseValidator): ''' if self.args.save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') - if self.args.save_json: - pred_masks = scale_image(im[si].shape[1:], - pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) - save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary - # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) ''' def get_stats(self): @@ -131,7 +126,7 @@ class DetectionValidator(BaseValidator): f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels') # Print results per class - if (self.args.verbose or (self.nc < 50 and not self.training)) and self.nc > 1 and len(self.stats): + if (self.args.verbose or not self.training) and self.nc > 1 and len(self.stats): for i, c in enumerate(self.metrics.ap_class_index): self.logger.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i))) @@ -167,7 +162,19 @@ class DetectionValidator(BaseValidator): # TODO: manage splits differently # calculate stride - check if model is initialized gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) - return build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0] + return create_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + stride=gs, + hyp=dict(self.args), + cache=False, + pad=0.5, + rect=self.args.rect, + workers=self.args.workers, + prefix=colorstr(f'{val}: '), + shuffle=False, + seed=self.args.seed)[0] if self.args.v5loader else \ + build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0] # TODO: align with train loss metrics @property @@ -175,28 +182,58 @@ class DetectionValidator(BaseValidator): return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] def plot_val_samples(self, batch, ni): - images = batch["img"] - cls = batch["cls"].squeeze(-1) - bboxes = batch["bboxes"] - paths = batch["im_file"] - batch_idx = batch["batch_idx"] - plot_images(images, - batch_idx, - cls, - bboxes, - paths=paths, + plot_images(batch["img"], + batch["batch_idx"], + batch["cls"].squeeze(-1), + batch["bboxes"], + paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names) def plot_predictions(self, batch, preds, ni): - images = batch["img"] - paths = batch["im_file"] - plot_images(images, + plot_images(batch["img"], *output_to_target(preds, max_det=15), - paths=paths, + paths=batch["im_file"], fname=self.save_dir / f'val_batch{ni}_pred.jpg', names=self.names) # pred + def pred_to_json(self, preds, batch): + for i, f in enumerate(batch["im_file"]): + stem = Path(f).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(preds[i][:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(preds[i].tolist(), box.tolist()): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + def eval_json(self): + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations + pred_json = self.save_dir / "predictions.json" # predictions + self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f"{x} file not found" + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + eval = COCOeval(anno, pred, 'bbox') + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + self.metrics.metric.map, self.metrics.metric.map50 = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + self.logger.warning(f'pycocotools unable to run: {e}') + @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) def val(cfg): diff --git a/ultralytics/yolo/v8/segment/train.py b/ultralytics/yolo/v8/segment/train.py index 49e9f7f..b98548c 100644 --- a/ultralytics/yolo/v8/segment/train.py +++ b/ultralytics/yolo/v8/segment/train.py @@ -17,10 +17,10 @@ from ..detect import DetectionTrainer # BaseTrainer python usage class SegmentationTrainer(DetectionTrainer): - def load_model(self, model_cfg=None, weights=None): - model = SegmentationModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"]) + def load_model(self, model_cfg=None, weights=None, verbose=True): + model = SegmentationModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"], verbose=verbose) if weights: - model.load(weights) + model.load(weights, verbose) return model def get_validator(self): diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/yolo/v8/segment/val.py index df36a34..f91c67b 100644 --- a/ultralytics/yolo/v8/segment/val.py +++ b/ultralytics/yolo/v8/segment/val.py @@ -7,7 +7,6 @@ import torch.nn.functional as F from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.utils import ops -from ultralytics.yolo.utils.checks import check_requirements from ultralytics.yolo.utils.metrics import ConfusionMatrix, SegmentMetrics, box_iou, mask_iou from ultralytics.yolo.utils.plotting import output_to_target, plot_images @@ -19,7 +18,6 @@ class SegmentationValidator(DetectionValidator): def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None): super().__init__(dataloader, save_dir, pbar, logger, args) if self.args.save_json: - check_requirements(['pycocotools']) self.process = ops.process_mask_upsample # more accurate else: self.process = ops.process_mask # faster @@ -42,14 +40,12 @@ class SegmentationValidator(DetectionValidator): def init_metrics(self, model): head = model.model[-1] if self.training else model.model.model[-1] if self.data: - self.is_coco = isinstance(self.data.get('val'), - str) and self.data['val'].endswith(f'coco{os.sep}val2017.txt') + self.is_coco = self.data.get('val', '').endswith(f'coco{os.sep}val2017.txt') # is COCO dataset self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO self.nc = head.nc self.nm = head.nm if hasattr(head, "nm") else 32 self.names = model.names - if isinstance(self.names, (list, tuple)): # old format - self.names = dict(enumerate(self.names)) self.metrics.names = self.names self.confusion_matrix = ConfusionMatrix(nc=self.nc) self.plot_masks = [] @@ -70,7 +66,7 @@ class SegmentationValidator(DetectionValidator): agnostic=self.args.single_cls, max_det=self.args.max_det, nm=self.nm) - return (p, preds[1], preds[2]) + return p, preds[1], preds[2] def update_metrics(self, preds, batch): # Metrics @@ -117,8 +113,7 @@ class SegmentationValidator(DetectionValidator): masks=True) if self.args.plots: self.confusion_matrix.process_batch(predn, labelsn) - self.stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, - 0])) # (conf, pcls, tcls) + self.stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # conf, pcls, tcls pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if self.args.plots and self.batch_i < 3: @@ -186,28 +181,22 @@ class SegmentationValidator(DetectionValidator): "metrics/mAP50-95(M)",] def plot_val_samples(self, batch, ni): - images = batch["img"] - masks = batch["masks"] - cls = batch["cls"].squeeze(-1) - bboxes = batch["bboxes"] - paths = batch["im_file"] - batch_idx = batch["batch_idx"] - plot_images(images, - batch_idx, - cls, - bboxes, - masks, - paths=paths, + plot_images(batch["img"], + batch["batch_idx"], + batch["cls"].squeeze(-1), + batch["bboxes"], + batch["masks"], + paths=batch["im_file"], fname=self.save_dir / f"val_batch{ni}_labels.jpg", names=self.names) def plot_predictions(self, batch, preds, ni): - images = batch["img"] - paths = batch["im_file"] - if len(self.plot_masks): - plot_masks = torch.cat(self.plot_masks, dim=0) - plot_images(images, *output_to_target(preds[0], max_det=15), plot_masks, paths, - self.save_dir / f'val_batch{ni}_pred.jpg', self.names) # pred + plot_images(batch["img"], + *output_to_target(preds[0], max_det=15), + torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, + paths=batch["im_file"], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names) # pred self.plot_masks.clear()