Add best.pt val and COCO pycocotools val (#98)

Co-authored-by: ayush chaurasia <ayush.chaurarsia@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
single_channel
Glenn Jocher 2 years ago committed by GitHub
parent a1808eeda4
commit 6f0ba81427
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

2
.gitignore vendored

@ -130,7 +130,7 @@ dmypy.json
# datasets and projects # datasets and projects
datasets/ datasets/
ultralytics-yolo/
runs/ runs/
wandb/
.DS_Store .DS_Store

@ -78,7 +78,7 @@ class BaseModel(nn.Module):
class DetectionModel(BaseModel): class DetectionModel(BaseModel):
# YOLOv5 detection model # YOLOv5 detection model
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, verbose=True): # model, input channels, number of classes
super().__init__() super().__init__()
if isinstance(cfg, dict): if isinstance(cfg, dict):
self.yaml = cfg # model dict self.yaml = cfg # model dict
@ -92,7 +92,7 @@ class DetectionModel(BaseModel):
if nc and nc != self.yaml['nc']: if nc and nc != self.yaml['nc']:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value self.yaml['nc'] = nc # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch], verbose=verbose) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.names = [str(i) for i in range(self.yaml['nc'])] # default names
self.inplace = self.yaml.get('inplace', True) self.inplace = self.yaml.get('inplace', True)
@ -108,8 +108,9 @@ class DetectionModel(BaseModel):
# Init weights, biases # Init weights, biases
initialize_weights(self) initialize_weights(self)
self.info() if verbose:
LOGGER.info('') self.info()
LOGGER.info('')
def forward(self, x, augment=False, profile=False, visualize=False): def forward(self, x, augment=False, profile=False, visualize=False):
if augment: if augment:
@ -152,11 +153,12 @@ class DetectionModel(BaseModel):
y[-1] = y[-1][..., i:] # small y[-1] = y[-1][..., i:] # small
return y return y
def load(self, weights): def load(self, weights, verbose=True):
csd = weights['model'].float().state_dict() # checkpoint state_dict as FP32 csd = weights['model'].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_state_dicts(csd, self.state_dict()) # intersect csd = intersect_state_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(csd, strict=False) # load self.load_state_dict(csd, strict=False) # load
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights') if verbose:
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights')
class SegmentationModel(DetectionModel): class SegmentationModel(DetectionModel):
@ -260,13 +262,15 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=True):
return model return model
def parse_model(d, ch): # model_dict, input_channels(3) def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary # Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") if verbose:
LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}")
nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') nc, gd, gw, act = d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act: if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print if verbose:
LOGGER.info(f"{colorstr('activation:')} {act}") # print
no = nc + 4 # number of outputs = classes + box no = nc + 4 # number of outputs = classes + box
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
@ -304,7 +308,8 @@ def parse_model(d, ch): # model_dict, input_channels(3)
t = str(m)[8:-2].replace('__main__.', '') # module type t = str(m)[8:-2].replace('__main__.', '') # module type
m.np = sum(x.numel() for x in m_.parameters()) # number params m.np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type
LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print if verbose:
LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_) layers.append(m_)
if i == 0: if i == 0:

@ -132,8 +132,9 @@ def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
""" """
Args: Args:
imgsz (tuple): The image size. imgsz (tuple): The image size.
polygons (np.ndarray): [N, M], N is the number of polygons, polygons (np.ndarray): [N, M], N is the number of polygons, M is the number of points(Be divided by 2).
M is the number of points(Be divided by 2). color (int): color
downsample_ratio (int): downsample ratio
""" """
mask = np.zeros(imgsz, dtype=np.uint8) mask = np.zeros(imgsz, dtype=np.uint8)
polygons = np.asarray(polygons) polygons = np.asarray(polygons)
@ -152,9 +153,9 @@ def polygons2masks(imgsz, polygons, color, downsample_ratio=1):
""" """
Args: Args:
imgsz (tuple): The image size. imgsz (tuple): The image size.
polygons (list[np.ndarray]): each polygon is [N, M], polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0)
N is the number of polygons, color (int): color
M is the number of points(Be divided by 2). downsample_ratio (int): downsample ratio
""" """
masks = [] masks = []
for si in range(len(polygons)): for si in range(len(polygons)):
@ -191,7 +192,7 @@ def polygons2masks_overlap(imgsz, segments, downsample_ratio=1):
def check_dataset_yaml(data, autodownload=True): def check_dataset_yaml(data, autodownload=True):
# Download, check and/or unzip dataset if not found locally # Download, check and/or unzip dataset if not found locally
data = check_file(data) data = check_file(data)
DATASETS_DIR = Path.cwd() / "../datasets" DATASETS_DIR = Path.cwd() / "../datasets" # TODO: handle global dataset dir
# Download (optional) # Download (optional)
extract_dir = '' extract_dir = ''
if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
@ -231,7 +232,7 @@ def check_dataset_yaml(data, autodownload=True):
if not all(x.exists() for x in val): if not all(x.exists() for x in val):
LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])
if not s or not autodownload: if not s or not autodownload:
raise Exception('Dataset not found ❌') raise FileNotFoundError('Dataset not found ❌')
t = time.time() t = time.time()
if s.startswith('http') and s.endswith('.zip'): # URL if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename f = Path(s).name # filename

@ -22,6 +22,7 @@ from tqdm import tqdm
import ultralytics.yolo.utils as utils import ultralytics.yolo.utils as utils
import ultralytics.yolo.utils.callbacks as callbacks import ultralytics.yolo.utils.callbacks as callbacks
from ultralytics import __version__
from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml from ultralytics.yolo.data.utils import check_dataset, check_dataset_yaml
from ultralytics.yolo.utils import LOGGER, ROOT, TQDM_BAR_FORMAT, colorstr from ultralytics.yolo.utils import LOGGER, ROOT, TQDM_BAR_FORMAT, colorstr
from ultralytics.yolo.utils.checks import check_file, print_args from ultralytics.yolo.utils.checks import check_file, print_args
@ -52,7 +53,8 @@ class BaseTrainer:
self.batch_size = self.args.batch_size self.batch_size = self.args.batch_size
self.epochs = self.args.epochs self.epochs = self.args.epochs
self.start_epoch = 0 self.start_epoch = 0
print_args(dict(self.args)) if RANK == -1:
print_args(dict(self.args))
# Save run settings # Save run settings
save_yaml(self.save_dir / 'args.yaml', OmegaConf.to_container(self.args, resolve=True)) save_yaml(self.save_dir / 'args.yaml', OmegaConf.to_container(self.args, resolve=True))
@ -109,7 +111,6 @@ class BaseTrainer:
world_size = torch.cuda.device_count() world_size = torch.cuda.device_count()
if world_size > 1 and "LOCAL_RANK" not in os.environ: if world_size > 1 and "LOCAL_RANK" not in os.environ:
command = generate_ddp_command(world_size, self) command = generate_ddp_command(world_size, self)
print('DDP command: ', command)
try: try:
subprocess.run(command) subprocess.run(command)
except Exception as e: except Exception as e:
@ -124,7 +125,7 @@ class BaseTrainer:
# os.environ['MASTER_PORT'] = '9020' # os.environ['MASTER_PORT'] = '9020'
torch.cuda.set_device(rank) torch.cuda.set_device(rank)
self.device = torch.device('cuda', rank) self.device = torch.device('cuda', rank)
self.console.info(f"RANK - WORLD_SIZE - DEVICE: {rank} - {world_size} - {self.device} ") self.console.info(f"DDP settings: RANK {rank}, WORLD_SIZE {world_size}, DEVICE {self.device}")
dist.init_process_group("nccl" if dist.is_nccl_available() else "gloo", rank=rank, world_size=world_size) dist.init_process_group("nccl" if dist.is_nccl_available() else "gloo", rank=rank, world_size=world_size)
def _setup_train(self, rank, world_size): def _setup_train(self, rank, world_size):
@ -259,8 +260,7 @@ class BaseTrainer:
if not self.args.noval or final_epoch: if not self.args.noval or final_epoch:
self.metrics, self.fitness = self.validate() self.metrics, self.fitness = self.validate()
self.trigger_callbacks('on_val_end') self.trigger_callbacks('on_val_end')
log_vals = {**self.label_loss_items(self.tloss), **self.metrics, **lr} self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **lr})
self.save_metrics(metrics=log_vals)
# save model # save model
if (not self.args.nosave) or (epoch + 1 == self.epochs): if (not self.args.nosave) or (epoch + 1 == self.epochs):
@ -282,7 +282,6 @@ class BaseTrainer:
self.plot_metrics() self.plot_metrics()
self.log(f"Results saved to {colorstr('bold', self.save_dir)}") self.log(f"Results saved to {colorstr('bold', self.save_dir)}")
self.trigger_callbacks('on_train_end') self.trigger_callbacks('on_train_end')
dist.destroy_process_group() if world_size > 1 else None
torch.cuda.empty_cache() torch.cuda.empty_cache()
self.trigger_callbacks('teardown') self.trigger_callbacks('teardown')
@ -295,7 +294,8 @@ class BaseTrainer:
'updates': self.ema.updates, 'updates': self.ema.updates,
'optimizer': self.optimizer.state_dict(), 'optimizer': self.optimizer.state_dict(),
'train_args': self.args, 'train_args': self.args,
'date': datetime.now().isoformat()} 'date': datetime.now().isoformat(),
'version': __version__}
# Save last, best and delete # Save last, best and delete
torch.save(ckpt, self.last) torch.save(ckpt, self.last)
@ -365,7 +365,7 @@ class BaseTrainer:
if rank in {-1, 0}: if rank in {-1, 0}:
self.console.info(text) self.console.info(text)
def load_model(self, model_cfg, weights): def load_model(self, model_cfg=None, weights=None, verbose=True):
raise NotImplementedError("This task trainer doesn't support loading cfg files") raise NotImplementedError("This task trainer doesn't support loading cfg files")
def get_validator(self): def get_validator(self):
@ -417,12 +417,14 @@ class BaseTrainer:
pass pass
def final_eval(self): def final_eval(self):
# TODO: need standalone evaluator to do this
for f in self.last, self.best: for f in self.last, self.best:
if f.exists(): if f.exists():
strip_optimizer(f) # strip optimizers strip_optimizer(f) # strip optimizers
if f is self.best: if f is self.best:
self.console.info(f'\nValidating {f}...') self.console.info(f'\nValidating {f}...')
self.metrics = self.validator(model=f)
self.metrics.pop('fitness', None)
self.trigger_callbacks('on_val_end')
def check_resume(self): def check_resume(self):
resume = self.args.resume resume = self.args.resume

@ -1,3 +1,4 @@
import json
from pathlib import Path from pathlib import Path
import torch import torch
@ -29,6 +30,7 @@ class BaseValidator:
self.batch_i = None self.batch_i = None
self.training = True self.training = True
self.speed = None self.speed = None
self.jdict = None
self.save_dir = save_dir if save_dir is not None else \ self.save_dir = save_dir if save_dir is not None else \
increment_path(Path(self.args.project) / self.args.name, exist_ok=self.args.exist_ok) increment_path(Path(self.args.project) / self.args.name, exist_ok=self.args.exist_ok)
@ -65,11 +67,12 @@ class BaseValidator:
self.logger.info( self.logger.info(
f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
if self.args.data.endswith(".yaml"): if isinstance(self.args.data, str) and self.args.data.endswith(".yaml"):
data = check_dataset_yaml(self.args.data) data = check_dataset_yaml(self.args.data)
else: else:
data = check_dataset(self.args.data) data = check_dataset(self.args.data)
self.dataloader = self.get_dataloader(data.get("val") or data.set("test"), self.args.batch_size) self.dataloader = self.get_dataloader(data.get("val") or data.set("test"),
self.args.batch_size) if not self.dataloader else self.dataloader
model.eval() model.eval()
@ -81,6 +84,7 @@ class BaseValidator:
# bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT) # bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT)
bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT) bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT)
self.init_metrics(de_parallel(model)) self.init_metrics(de_parallel(model))
self.jdict = [] # empty before each val
for batch_i, batch in enumerate(bar): for batch_i, batch in enumerate(bar):
self.batch_i = batch_i self.batch_i = batch_i
# pre-process # pre-process
@ -105,25 +109,26 @@ class BaseValidator:
self.plot_val_samples(batch, batch_i) self.plot_val_samples(batch, batch_i)
self.plot_predictions(batch, preds, batch_i) self.plot_predictions(batch, preds, batch_i)
if self.args.save_json:
self.pred_to_json(preds, batch)
stats = self.get_stats() stats = self.get_stats()
self.check_stats(stats) self.check_stats(stats)
self.print_results() self.print_results()
self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image
# calculate speed only once when training
if not self.training or trainer.epoch == 0:
self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image
if not self.training: # print only at inference
self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' %
self.speed)
if self.training: if self.training:
model.float() model.float()
# TODO: implement save json return {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")}
else:
self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' %
self.speed)
if self.args.save_json and self.jdict:
with open(str(self.save_dir / "predictions.json"), 'w') as f:
self.logger.info(f"Saving {f.name}...")
json.dump(self.jdict, f) # flatten and save
return {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix="val")} \ self.eval_json()
if self.training else stats return stats
def get_dataloader(self, dataset_path, batch_size): def get_dataloader(self, dataset_path, batch_size):
raise NotImplementedError("get_dataloader function not implemented for this validator") raise NotImplementedError("get_dataloader function not implemented for this validator")
@ -162,3 +167,9 @@ class BaseValidator:
def plot_predictions(self, batch, preds, ni): def plot_predictions(self, batch, preds, ni):
pass pass
def pred_to_json(self, preds, batch):
pass
def eval_json(self):
pass

@ -337,7 +337,7 @@ def compute_ap(recall, precision):
x = np.linspace(0, 1, 101) # 101-point interp (COCO) x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous' else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap, mpre, mrec return ap, mpre, mrec
@ -469,11 +469,11 @@ class Metric:
def mean_results(self): def mean_results(self):
"""Mean of results, return mp, mr, map50, map""" """Mean of results, return mp, mr, map50, map"""
return (self.mp, self.mr, self.map50, self.map) return self.mp, self.mr, self.map50, self.map
def class_result(self, i): def class_result(self, i):
"""class-aware result, return p[i], r[i], ap50[i], ap[i]""" """class-aware result, return p[i], r[i], ap50[i], ap[i]"""
return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) return self.p[i], self.r[i], self.ap50[i], self.ap[i]
def get_maps(self, nc): def get_maps(self, nc):
maps = np.zeros(nc) + self.map maps = np.zeros(nc) + self.map
@ -491,12 +491,7 @@ class Metric:
Args: Args:
results: tuple(p, r, ap, f1, ap_class) results: tuple(p, r, ap, f1, ap_class)
""" """
p, r, f1, all_ap, ap_class_index = results self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results
self.p = p
self.r = r
self.all_ap = all_ap
self.f1 = f1
self.ap_class_index = ap_class_index
class DetMetrics: class DetMetrics:

@ -86,7 +86,8 @@ def select_device(device='', batch_size=0, newline=False):
s += 'CPU\n' s += 'CPU\n'
arg = 'cpu' arg = 'cpu'
LOGGER.info(s if newline else s.rstrip()) if RANK == -1:
LOGGER.info(s if newline else s.rstrip())
return torch.device(arg) return torch.device(arg)

@ -12,8 +12,8 @@ class ClassificationTrainer(BaseTrainer):
def set_model_attributes(self): def set_model_attributes(self):
self.model.names = self.data["names"] self.model.names = self.data["names"]
def load_model(self, model_cfg=None, weights=None): def load_model(self, model_cfg=None, weights=None, verbose=True):
# TODO: why treat clf models as unique. We should have clf yamls? # TODO: why treat clf models as unique. We should have clf yamls? YES WE SHOULD!
if isinstance(weights, dict): # yolo ckpt if isinstance(weights, dict): # yolo ckpt
weights = weights["model"] weights = weights["model"]
if weights and not weights.__class__.__name__.startswith("yolo"): # torchvision if weights and not weights.__class__.__name__.startswith("yolo"): # torchvision
@ -57,6 +57,9 @@ class ClassificationTrainer(BaseTrainer):
def resume_training(self, ckpt): def resume_training(self, ckpt):
pass pass
def final_eval(self):
pass
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
def train(cfg): def train(cfg):

@ -13,7 +13,7 @@ from ultralytics.yolo.utils.metrics import smooth_BCE
from ultralytics.yolo.utils.ops import xywh2xyxy from ultralytics.yolo.utils.ops import xywh2xyxy
from ultralytics.yolo.utils.plotting import plot_images, plot_results from ultralytics.yolo.utils.plotting import plot_images, plot_results
from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors
from ultralytics.yolo.utils.torch_utils import de_parallel from ultralytics.yolo.utils.torch_utils import de_parallel, strip_optimizer
# BaseTrainer python usage # BaseTrainer python usage
@ -54,10 +54,10 @@ class DetectionTrainer(BaseTrainer):
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
self.model.names = self.data["names"] self.model.names = self.data["names"]
def load_model(self, model_cfg=None, weights=None): def load_model(self, model_cfg=None, weights=None, verbose=True):
model = DetectionModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"]) model = DetectionModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"], verbose=verbose)
if weights: if weights:
model.load(weights) model.load(weights, verbose)
return model return model
def get_validator(self): def get_validator(self):

@ -1,14 +1,16 @@
import os import os
from pathlib import Path
import hydra import hydra
import numpy as np import numpy as np
import torch import torch
from ultralytics.yolo.data import build_dataloader from ultralytics.yolo.data import build_dataloader
from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.engine.validator import BaseValidator from ultralytics.yolo.engine.validator import BaseValidator
from ultralytics.yolo.utils import ops from ultralytics.yolo.utils import colorstr, ops
from ultralytics.yolo.utils.checks import check_file from ultralytics.yolo.utils.checks import check_file, check_requirements
from ultralytics.yolo.utils.files import yaml_load from ultralytics.yolo.utils.files import yaml_load
from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou
from ultralytics.yolo.utils.plotting import output_to_target, plot_images from ultralytics.yolo.utils.plotting import output_to_target, plot_images
@ -43,13 +45,11 @@ class DetectionValidator(BaseValidator):
def init_metrics(self, model): def init_metrics(self, model):
head = model.model[-1] if self.training else model.model.model[-1] head = model.model[-1] if self.training else model.model.model[-1]
if self.data: if self.data:
self.is_coco = isinstance(self.data.get('val'), self.is_coco = self.data.get('val', '').endswith(f'coco{os.sep}val2017.txt') # is COCO dataset
str) and self.data['val'].endswith(f'coco{os.sep}val2017.txt')
self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000))
self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO
self.nc = head.nc self.nc = head.nc
self.names = model.names self.names = model.names
if isinstance(self.names, (list, tuple)): # old format
self.names = dict(enumerate(self.names))
self.metrics.names = self.names self.metrics.names = self.names
self.confusion_matrix = ConfusionMatrix(nc=self.nc) self.confusion_matrix = ConfusionMatrix(nc=self.nc)
self.seen = 0 self.seen = 0
@ -107,11 +107,6 @@ class DetectionValidator(BaseValidator):
''' '''
if self.args.save_txt: if self.args.save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
if self.args.save_json:
pred_masks = scale_image(im[si].shape[1:],
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1])
save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary
# callbacks.run('on_val_image_end', pred, predn, path, names, im[si])
''' '''
def get_stats(self): def get_stats(self):
@ -131,7 +126,7 @@ class DetectionValidator(BaseValidator):
f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels') f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels')
# Print results per class # Print results per class
if (self.args.verbose or (self.nc < 50 and not self.training)) and self.nc > 1 and len(self.stats): if (self.args.verbose or not self.training) and self.nc > 1 and len(self.stats):
for i, c in enumerate(self.metrics.ap_class_index): for i, c in enumerate(self.metrics.ap_class_index):
self.logger.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i))) self.logger.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i)))
@ -167,7 +162,19 @@ class DetectionValidator(BaseValidator):
# TODO: manage splits differently # TODO: manage splits differently
# calculate stride - check if model is initialized # calculate stride - check if model is initialized
gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) gs = max(int(de_parallel(self.model).stride if self.model else 0), 32)
return build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0] return create_dataloader(path=dataset_path,
imgsz=self.args.imgsz,
batch_size=batch_size,
stride=gs,
hyp=dict(self.args),
cache=False,
pad=0.5,
rect=self.args.rect,
workers=self.args.workers,
prefix=colorstr(f'{val}: '),
shuffle=False,
seed=self.args.seed)[0] if self.args.v5loader else \
build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0]
# TODO: align with train loss metrics # TODO: align with train loss metrics
@property @property
@ -175,28 +182,58 @@ class DetectionValidator(BaseValidator):
return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"] return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"]
def plot_val_samples(self, batch, ni): def plot_val_samples(self, batch, ni):
images = batch["img"] plot_images(batch["img"],
cls = batch["cls"].squeeze(-1) batch["batch_idx"],
bboxes = batch["bboxes"] batch["cls"].squeeze(-1),
paths = batch["im_file"] batch["bboxes"],
batch_idx = batch["batch_idx"] paths=batch["im_file"],
plot_images(images,
batch_idx,
cls,
bboxes,
paths=paths,
fname=self.save_dir / f"val_batch{ni}_labels.jpg", fname=self.save_dir / f"val_batch{ni}_labels.jpg",
names=self.names) names=self.names)
def plot_predictions(self, batch, preds, ni): def plot_predictions(self, batch, preds, ni):
images = batch["img"] plot_images(batch["img"],
paths = batch["im_file"]
plot_images(images,
*output_to_target(preds, max_det=15), *output_to_target(preds, max_det=15),
paths=paths, paths=batch["im_file"],
fname=self.save_dir / f'val_batch{ni}_pred.jpg', fname=self.save_dir / f'val_batch{ni}_pred.jpg',
names=self.names) # pred names=self.names) # pred
def pred_to_json(self, preds, batch):
for i, f in enumerate(batch["im_file"]):
stem = Path(f).stem
image_id = int(stem) if stem.isnumeric() else stem
box = ops.xyxy2xywh(preds[i][:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(preds[i].tolist(), box.tolist()):
self.jdict.append({
'image_id': image_id,
'category_id': self.class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def eval_json(self):
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements('pycocotools')
from pycocotools.coco import COCO # noqa
from pycocotools.cocoeval import COCOeval # noqa
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
eval = COCOeval(anno, pred, 'bbox')
if self.is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
eval.evaluate()
eval.accumulate()
eval.summarize()
self.metrics.metric.map, self.metrics.metric.map50 = eval.stats[:2] # update mAP50-95 and mAP50
except Exception as e:
self.logger.warning(f'pycocotools unable to run: {e}')
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
def val(cfg): def val(cfg):

@ -17,10 +17,10 @@ from ..detect import DetectionTrainer
# BaseTrainer python usage # BaseTrainer python usage
class SegmentationTrainer(DetectionTrainer): class SegmentationTrainer(DetectionTrainer):
def load_model(self, model_cfg=None, weights=None): def load_model(self, model_cfg=None, weights=None, verbose=True):
model = SegmentationModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"]) model = SegmentationModel(model_cfg or weights["model"].yaml, ch=3, nc=self.data["nc"], verbose=verbose)
if weights: if weights:
model.load(weights) model.load(weights, verbose)
return model return model
def get_validator(self): def get_validator(self):

@ -7,7 +7,6 @@ import torch.nn.functional as F
from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG from ultralytics.yolo.engine.trainer import DEFAULT_CONFIG
from ultralytics.yolo.utils import ops from ultralytics.yolo.utils import ops
from ultralytics.yolo.utils.checks import check_requirements
from ultralytics.yolo.utils.metrics import ConfusionMatrix, SegmentMetrics, box_iou, mask_iou from ultralytics.yolo.utils.metrics import ConfusionMatrix, SegmentMetrics, box_iou, mask_iou
from ultralytics.yolo.utils.plotting import output_to_target, plot_images from ultralytics.yolo.utils.plotting import output_to_target, plot_images
@ -19,7 +18,6 @@ class SegmentationValidator(DetectionValidator):
def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None): def __init__(self, dataloader=None, save_dir=None, pbar=None, logger=None, args=None):
super().__init__(dataloader, save_dir, pbar, logger, args) super().__init__(dataloader, save_dir, pbar, logger, args)
if self.args.save_json: if self.args.save_json:
check_requirements(['pycocotools'])
self.process = ops.process_mask_upsample # more accurate self.process = ops.process_mask_upsample # more accurate
else: else:
self.process = ops.process_mask # faster self.process = ops.process_mask # faster
@ -42,14 +40,12 @@ class SegmentationValidator(DetectionValidator):
def init_metrics(self, model): def init_metrics(self, model):
head = model.model[-1] if self.training else model.model.model[-1] head = model.model[-1] if self.training else model.model.model[-1]
if self.data: if self.data:
self.is_coco = isinstance(self.data.get('val'), self.is_coco = self.data.get('val', '').endswith(f'coco{os.sep}val2017.txt') # is COCO dataset
str) and self.data['val'].endswith(f'coco{os.sep}val2017.txt')
self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000))
self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO
self.nc = head.nc self.nc = head.nc
self.nm = head.nm if hasattr(head, "nm") else 32 self.nm = head.nm if hasattr(head, "nm") else 32
self.names = model.names self.names = model.names
if isinstance(self.names, (list, tuple)): # old format
self.names = dict(enumerate(self.names))
self.metrics.names = self.names self.metrics.names = self.names
self.confusion_matrix = ConfusionMatrix(nc=self.nc) self.confusion_matrix = ConfusionMatrix(nc=self.nc)
self.plot_masks = [] self.plot_masks = []
@ -70,7 +66,7 @@ class SegmentationValidator(DetectionValidator):
agnostic=self.args.single_cls, agnostic=self.args.single_cls,
max_det=self.args.max_det, max_det=self.args.max_det,
nm=self.nm) nm=self.nm)
return (p, preds[1], preds[2]) return p, preds[1], preds[2]
def update_metrics(self, preds, batch): def update_metrics(self, preds, batch):
# Metrics # Metrics
@ -117,8 +113,7 @@ class SegmentationValidator(DetectionValidator):
masks=True) masks=True)
if self.args.plots: if self.args.plots:
self.confusion_matrix.process_batch(predn, labelsn) self.confusion_matrix.process_batch(predn, labelsn)
self.stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, self.stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # conf, pcls, tcls
0])) # (conf, pcls, tcls)
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
if self.args.plots and self.batch_i < 3: if self.args.plots and self.batch_i < 3:
@ -186,28 +181,22 @@ class SegmentationValidator(DetectionValidator):
"metrics/mAP50-95(M)",] "metrics/mAP50-95(M)",]
def plot_val_samples(self, batch, ni): def plot_val_samples(self, batch, ni):
images = batch["img"] plot_images(batch["img"],
masks = batch["masks"] batch["batch_idx"],
cls = batch["cls"].squeeze(-1) batch["cls"].squeeze(-1),
bboxes = batch["bboxes"] batch["bboxes"],
paths = batch["im_file"] batch["masks"],
batch_idx = batch["batch_idx"] paths=batch["im_file"],
plot_images(images,
batch_idx,
cls,
bboxes,
masks,
paths=paths,
fname=self.save_dir / f"val_batch{ni}_labels.jpg", fname=self.save_dir / f"val_batch{ni}_labels.jpg",
names=self.names) names=self.names)
def plot_predictions(self, batch, preds, ni): def plot_predictions(self, batch, preds, ni):
images = batch["img"] plot_images(batch["img"],
paths = batch["im_file"] *output_to_target(preds[0], max_det=15),
if len(self.plot_masks): torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
plot_masks = torch.cat(self.plot_masks, dim=0) paths=batch["im_file"],
plot_images(images, *output_to_target(preds[0], max_det=15), plot_masks, paths, fname=self.save_dir / f'val_batch{ni}_pred.jpg',
self.save_dir / f'val_batch{ni}_pred.jpg', self.names) # pred names=self.names) # pred
self.plot_masks.clear() self.plot_masks.clear()

Loading…
Cancel
Save