Fix Classification train logging (#157)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Ayush Chaurasia <ayush.chaurarsia@gmail.com>
single_channel
Glenn Jocher 2 years ago committed by GitHub
parent d387359f74
commit e79ea1666c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -50,24 +50,24 @@ success = model.export(format="onnx")
| ------------------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------- | ---------------------------- | ------------------ | ----------------- | | ------------------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------- | ---------------------------- | ------------------ | ----------------- |
| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | - | - | **1.9** | **4.5** | | [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | - | - | **1.9** | **4.5** |
| [YOLOv6n](url) | 640 | 35.9 | - | - | 4.3 | 11.1 | | [YOLOv6n](url) | 640 | 35.9 | - | - | 4.3 | 11.1 |
| **[YOLOv8n](url)** | 640 | **37.5** | - | - | 3.2 | 8.9 | | **[YOLOv8n](url)** | 640 | **37.3** | - | - | 3.2 | 8.9 |
| | | | | | | | | | | | | | | |
| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | - | - | 7.2 | 16.5 | | [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | - | - | 7.2 | 16.5 |
| [YOLOv6s](url) | 640 | 43.5 | - | - | 17.2 | 44.2 | | [YOLOv6s](url) | 640 | 43.5 | - | - | 17.2 | 44.2 |
| **[YOLOv8s](url)** | 640 | **44.7** | - | - | 11.2 | 28.8 | | **[YOLOv8s](url)** | 640 | **44.9** | - | - | 11.2 | 28.8 |
| | | | | | | | | | | | | | | |
| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | - | - | 21.2 | 49.0 | | [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | - | - | 21.2 | 49.0 |
| [YOLOv6m](url) | 640 | 49.5 | - | - | 34.3 | 82.2 | | [YOLOv6m](url) | 640 | 49.5 | - | - | 34.3 | 82.2 |
| **[YOLOv8m](url)** | 640 | **50.3** | - | - | 25.9 | 79.3 | | **[YOLOv8m](url)** | 640 | **50.2** | - | - | 25.9 | 79.3 |
| | | | | | | | | | | | | | | |
| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | - | - | 46.5 | 109.1 | | [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | - | - | 46.5 | 109.1 |
| [YOLOv6l](url) | 640 | 52.5 | - | - | 58.5 | 144.0 | | [YOLOv6l](url) | 640 | 52.5 | - | - | 58.5 | 144.0 |
| [YOLOv7](url) | 640 | 51.2 | - | - | 36.9 | 104.7 | | [YOLOv7](url) | 640 | 51.2 | - | - | 36.9 | 104.7 |
| **[YOLOv8l](url)** | 640 | **52.8** | - | - | 43.7 | 165.7 | | **[YOLOv8l](url)** | 640 | **52.9** | - | - | 43.7 | 165.7 |
| | | | | | | | | | | | | | | |
| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | - | - | 86.7 | 205.7 | | [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | - | - | 86.7 | 205.7 |
| [YOLOv7-X](url) | 640 | 52.9 | - | - | 71.3 | 189.9 | | [YOLOv7-X](url) | 640 | 52.9 | - | - | 71.3 | 189.9 |
| **[YOLOv8x](url)** | 640 | **53.7** | - | - | 68.2 | 258.5 | | **[YOLOv8x](url)** | 640 | **53.9** | - | - | 68.2 | 258.5 |
| | | | | | | | | | | | | | | |
| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt) | 1280 | 55.0 | - | - | 140.7 | 839.2 | | [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt) | 1280 | 55.0 | - | - | 140.7 | 839.2 |
| [YOLOv7-E6E](url) | 1280 | 56.8 | - | - | 151.7 | 843.2 | | [YOLOv7-E6E](url) | 1280 | 56.8 | - | - | 151.7 | 843.2 |

@ -259,6 +259,7 @@ class ClassificationModel(BaseModel):
self.yaml['nc'] = nc # override yaml value self.yaml['nc'] = nc # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch], verbose=verbose) # model, savelist self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch], verbose=verbose) # model, savelist
self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict
self.info()
def load(self, weights): def load(self, weights):
model = weights["model"] if isinstance(weights, dict) else weights # torchvision models are not dicts model = weights["model"] if isinstance(weights, dict) else weights # torchvision models are not dicts
@ -292,7 +293,6 @@ class ClassificationModel(BaseModel):
def attempt_load_weights(weights, device=None, inplace=True, fuse=False): def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
LOGGER.info("WARNING: Deprecated in favor of attempt_load_one_weight()")
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
from ultralytics.yolo.utils.downloads import attempt_download from ultralytics.yolo.utils.downloads import attempt_download

@ -33,7 +33,7 @@ resume: False # resume training from last checkpoint
overlap_mask: True # masks should overlap during training overlap_mask: True # masks should overlap during training
mask_ratio: 4 # mask downsample ratio mask_ratio: 4 # mask downsample ratio
# Classification # Classification
dropout: False # use dropout regularization dropout: 0.0 # use dropout regularization
# Val/Test settings ---------------------------------------------------------------------------------------------------- # Val/Test settings ----------------------------------------------------------------------------------------------------
val: True # validate/test during training val: True # validate/test during training

@ -11,7 +11,9 @@ import uuid
from pathlib import Path from pathlib import Path
import cv2 import cv2
import numpy as np
import pandas as pd import pandas as pd
import torch
import yaml import yaml
# Constants # Constants
@ -57,8 +59,8 @@ HELP_MSG = \
""" """
# Settings # Settings
# torch.set_printoptions(linewidth=320, precision=5, profile='long') torch.set_printoptions(linewidth=320, precision=5, profile='long')
# np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
pd.options.display.max_columns = 10 pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads

@ -565,14 +565,8 @@ class SegmentMetrics:
@property @property
def keys(self): def keys(self):
return [ return [
"metrics/precision(B)", "metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)",
"metrics/recall(B)", "metrics/precision(M)", "metrics/recall(M)", "metrics/mAP50(M)", "metrics/mAP50-95(M)"]
"metrics/mAP50(B)",
"metrics/mAP50-95(B)", # metrics
"metrics/precision(M)",
"metrics/recall(M)",
"metrics/mAP50(M)",
"metrics/mAP50-95(M)"]
def mean_results(self): def mean_results(self):
return self.metric_box.mean_results() + self.metric_mask.mean_results() return self.metric_box.mean_results() + self.metric_mask.mean_results()
@ -603,7 +597,10 @@ class ClassifyMetrics:
self.top1 = 0 self.top1 = 0
self.top5 = 0 self.top5 = 0
def process(self, correct): def process(self, targets, pred):
# target classes and predicted classes
pred, targets = torch.cat(pred), torch.cat(targets)
correct = (targets[:, None] == pred).float()
acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy
self.top1, self.top5 = acc.mean(0).tolist() self.top1, self.top5 = acc.mean(0).tolist()
@ -617,4 +614,4 @@ class ClassifyMetrics:
@property @property
def keys(self): def keys(self):
return ["top1", "top5"] return ["metrics/accuracy_top1", "metrics/accuracy_top5"]

@ -2,7 +2,7 @@ import hydra
import torch import torch
import torchvision import torchvision
from ultralytics.nn.tasks import ClassificationModel, attempt_load_weights from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight
from ultralytics.yolo import v8 from ultralytics.yolo import v8
from ultralytics.yolo.data import build_classification_dataloader from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.trainer import BaseTrainer from ultralytics.yolo.engine.trainer import BaseTrainer
@ -20,8 +20,18 @@ class ClassificationTrainer(BaseTrainer):
def set_model_attributes(self): def set_model_attributes(self):
self.model.names = self.data["names"] self.model.names = self.data["names"]
def get_model(self, cfg=None, weights=None): def get_model(self, cfg=None, weights=None, verbose=True):
model = ClassificationModel(cfg, nc=self.data["nc"]) model = ClassificationModel(cfg, nc=self.data["nc"])
pretrained = False
for m in model.modules():
if not pretrained and hasattr(m, 'reset_parameters'):
m.reset_parameters()
if isinstance(m, torch.nn.Dropout) and self.args.dropout:
m.p = self.args.dropout # set dropout
for p in model.parameters():
p.requires_grad = True # for training
if weights: if weights:
model.load(weights) model.load(weights)
@ -43,7 +53,7 @@ class ClassificationTrainer(BaseTrainer):
model = str(self.model) model = str(self.model)
# Load a YOLO model locally, from torchvision, or from Ultralytics assets # Load a YOLO model locally, from torchvision, or from Ultralytics assets
if model.endswith(".pt"): if model.endswith(".pt"):
self.model = attempt_load_weights(model, device='cpu') self.model, _ = attempt_load_one_weight(model, device='cpu')
elif model.endswith(".yaml"): elif model.endswith(".yaml"):
self.model = self.get_model(cfg=model) self.model = self.get_model(cfg=model)
elif model in torchvision.models.__dict__: elif model in torchvision.models.__dict__:
@ -54,10 +64,11 @@ class ClassificationTrainer(BaseTrainer):
return # dont return ckpt. Classification doesn't support resume return # dont return ckpt. Classification doesn't support resume
def get_dataloader(self, dataset_path, batch_size, rank=0, mode="train"): def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode="train"):
return build_classification_dataloader(path=dataset_path, return build_classification_dataloader(path=dataset_path,
imgsz=self.args.imgsz, imgsz=self.args.imgsz,
batch_size=batch_size, batch_size=batch_size if mode == "train" else (batch_size * 2),
augment=mode == "train",
rank=rank) rank=rank)
def preprocess_batch(self, batch): def preprocess_batch(self, batch):
@ -66,15 +77,41 @@ class ClassificationTrainer(BaseTrainer):
return batch return batch
def progress_string(self): def progress_string(self):
return ('\n' + '%11s' * return ('\n' + '%11s' * (4 + len(self.loss_names))) % \
(4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')
def get_validator(self): def get_validator(self):
self.loss_names = ['loss']
return v8.classify.ClassificationValidator(self.test_loader, self.save_dir, logger=self.console) return v8.classify.ClassificationValidator(self.test_loader, self.save_dir, logger=self.console)
def criterion(self, preds, batch): def criterion(self, preds, batch):
loss = torch.nn.functional.cross_entropy(preds, batch["cls"]) loss = torch.nn.functional.cross_entropy(preds, batch["cls"], reduction='sum') / self.args.nbs
return loss, loss loss_items = loss.detach()
return loss, loss_items
# def label_loss_items(self, loss_items=None, prefix="train"):
# """
# Returns a loss dict with labelled training loss items tensor
# """
# # Not needed for classification but necessary for segmentation & detection
# keys = [f"{prefix}/{x}" for x in self.loss_names]
# if loss_items is not None:
# loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
# return dict(zip(keys, loss_items))
# else:
# return keys
def label_loss_items(self, loss_items=None, prefix="train"):
"""
Returns a loss dict with labelled training loss items tensor
"""
# Not needed for classification but necessary for segmentation & detection
keys = [f"{prefix}/{x}" for x in self.loss_names]
if loss_items is not None:
loss_items = [round(float(loss_items), 5)]
return dict(zip(keys, loss_items))
else:
return keys
def resume_training(self, ckpt): def resume_training(self, ckpt):
pass pass
@ -86,12 +123,16 @@ class ClassificationTrainer(BaseTrainer):
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
def train(cfg): def train(cfg):
cfg.model = cfg.model or "yolov8n-cls.yaml" # or "resnet18" cfg.model = cfg.model or "yolov8n-cls.yaml" # or "resnet18"
cfg.data = cfg.data or "imagenette160" # or yolo.ClassificationDataset("mnist") cfg.data = cfg.data or "mnist160" # or yolo.ClassificationDataset("mnist")
# trainer = ClassificationTrainer(cfg) cfg.lr0 = 0.1
# trainer.train() cfg.weight_decay = 5e-5
from ultralytics import YOLO cfg.label_smoothing = 0.1
model = YOLO(cfg.model) cfg.warmup_epochs = 0.0
model.train(**cfg) trainer = ClassificationTrainer(cfg)
trainer.train()
# from ultralytics import YOLO
# model = YOLO(cfg.model)
# model.train(**cfg)
if __name__ == "__main__": if __name__ == "__main__":

@ -1,5 +1,4 @@
import hydra import hydra
import torch
from ultralytics.yolo.data import build_classification_dataloader from ultralytics.yolo.data import build_classification_dataloader
from ultralytics.yolo.engine.validator import BaseValidator from ultralytics.yolo.engine.validator import BaseValidator
@ -13,8 +12,12 @@ class ClassificationValidator(BaseValidator):
super().__init__(dataloader, save_dir, pbar, logger, args) super().__init__(dataloader, save_dir, pbar, logger, args)
self.metrics = ClassifyMetrics() self.metrics = ClassifyMetrics()
def get_desc(self):
return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc')
def init_metrics(self, model): def init_metrics(self, model):
self.correct = torch.tensor([], device=next(model.parameters()).device) self.pred = []
self.targets = []
def preprocess(self, batch): def preprocess(self, batch):
batch["img"] = batch["img"].to(self.device, non_blocking=True) batch["img"] = batch["img"].to(self.device, non_blocking=True)
@ -23,17 +26,20 @@ class ClassificationValidator(BaseValidator):
return batch return batch
def update_metrics(self, preds, batch): def update_metrics(self, preds, batch):
targets = batch["cls"] self.pred.append(preds.argsort(1, descending=True)[:, :5])
correct_in_batch = (targets[:, None] == preds).float() self.targets.append(batch["cls"])
self.correct = torch.cat((self.correct, correct_in_batch))
def get_stats(self): def get_stats(self):
self.metrics.process(self.correct) self.metrics.process(self.targets, self.pred)
return self.metrics.results_dict return self.metrics.results_dict
def get_dataloader(self, dataset_path, batch_size): def get_dataloader(self, dataset_path, batch_size):
return build_classification_dataloader(path=dataset_path, imgsz=self.args.imgsz, batch_size=batch_size) return build_classification_dataloader(path=dataset_path, imgsz=self.args.imgsz, batch_size=batch_size)
def print_results(self):
pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format
self.logger.info(pf % ("all", self.metrics.top1, self.metrics.top5))
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name) @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
def val(cfg): def val(cfg):

Loading…
Cancel
Save