Add flops, num_params, inference speed logging and best.pt logging (#84)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
single_channel
Ayush Chaurasia 2 years ago committed by GitHub
parent f0fff8c13e
commit ae2443c210
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -30,6 +30,7 @@ class BaseValidator:
self.device = None
self.batch_i = None
self.training = True
self.speed = None
self.save_dir = save_dir if save_dir is not None else \
increment_path(Path(self.args.project) / self.args.name, exist_ok=self.args.exist_ok)
@ -110,12 +111,14 @@ class BaseValidator:
self.print_results()
# print speeds
if not self.training:
# calculate speed only once when training
if not self.training or trainer.epoch == 0:
t = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image
# shape = (self.dataloader.batch_size, 3, imgsz, imgsz)
self.logger.info(
'Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image at shape ' % t)
self.speed = t
if not self.training: # print only at inference
self.logger.info(
'Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' % t)
if self.training:
model.float()

@ -1,3 +1,5 @@
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
try:
import clearml
from clearml import Task
@ -38,8 +40,23 @@ def on_val_end(trainer):
_log_scalers(val_loss_dict, "val", trainer.epoch)
_log_scalers(metrics, "metrics", trainer.epoch)
if trainer.epoch == 0:
infer_speed = trainer.validator.speed[1]
model_info = {
"inference_speed": infer_speed,
"flops@640": get_flops(trainer.model),
"params": get_num_params(trainer.model)}
_log_scalers(model_info, "model")
def on_train_end(trainer):
task = Task.current_task()
if task:
task.update_output_model(model_path=str(trainer.best), model_name='Best Model', auto_delete_file=False)
callbacks = {
"before_train": before_train,
"on_val_end": on_val_end,
"on_batch_end": on_batch_end,}
"on_batch_end": on_batch_end,
"on_train_end": on_train_end}

@ -125,8 +125,8 @@ def fuse_conv_and_bn(conv, bn):
def model_info(model, verbose=False, imgsz=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
n_p = get_num_params(model)
n_g = get_num_gradients(model) # number gradients
if verbose:
print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}")
for i, (name, p) in enumerate(model.named_parameters()):
@ -134,18 +134,31 @@ def model_info(model, verbose=False, imgsz=640):
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
try: # FLOPs
flops = get_flops(model, imgsz)
fs = f', {flops:.1f} GFLOPs' if flops else ''
name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'
LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
def get_num_params(model):
return sum(x.numel() for x in model.parameters())
def get_num_gradients(model):
return sum(x.numel() for x in model.parameters() if x.requires_grad)
def get_flops(model, imgsz=640):
try:
p = next(model.parameters())
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs
flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs
return flops
except Exception:
fs = ''
name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'
LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
return 0
def initialize_weights(model):

Loading…
Cancel
Save