Add TensorBoard support (#87)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2022-12-24 14:37:46 +01:00
committed by GitHub
parent 248d54ca03
commit cb4f20f3cf
6 changed files with 133 additions and 51 deletions

View File

@ -1,13 +1,36 @@
def before_train(trainer):
# Initialize tensorboard logger
def on_pretrain_routine_start(trainer):
pass
def on_epoch_start(trainer):
def on_pretrain_routine_end(trainer):
pass
def on_batch_start(trainer):
def on_train_start(trainer):
pass
def on_train_epoch_start(trainer):
pass
def on_train_batch_start(trainer):
pass
def optimizer_step(trainer):
pass
def on_before_zero_grad(trainer):
pass
def on_train_batch_end(trainer):
pass
def on_train_epoch_end(trainer):
pass
@ -15,27 +38,68 @@ def on_val_start(trainer):
pass
def on_val_batch_start(trainer):
pass
def on_val_image_end(trainer):
pass
def on_val_batch_end(trainer):
pass
def on_val_end(trainer):
pass
def on_fit_epoch_end(trainer):
pass
def on_model_save(trainer):
pass
def on_train_end(trainer):
pass
def on_params_update(trainer):
pass
def teardown(trainer):
pass
default_callbacks = {
"before_train": before_train,
"on_epoch_start": on_epoch_start,
"on_batch_start": on_batch_start,
"on_val_start": on_val_start,
"on_val_end": on_val_end,
"on_model_save": on_model_save}
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_pretrain_routine_end': on_pretrain_routine_end,
'on_train_start': on_train_start,
'on_train_epoch_start': on_train_epoch_start,
'on_train_batch_start': on_train_batch_start,
'optimizer_step': optimizer_step,
'on_before_zero_grad': on_before_zero_grad,
'on_train_batch_end': on_train_batch_end,
'on_train_epoch_end': on_train_epoch_end,
'on_val_start': on_val_start,
'on_val_batch_start': on_val_batch_start,
'on_val_image_end': on_val_image_end,
'on_val_batch_end': on_val_batch_end,
'on_val_end': on_val_end,
'on_fit_epoch_end': on_fit_epoch_end, # fit = train + val
'on_model_save': on_model_save,
'on_train_end': on_train_end,
'on_params_update': on_params_update,
'teardown': teardown}
def add_integration_callbacks(trainer):
callbacks = {}
from .clearml import callbacks as clearml_callbacks
from .tb import callbacks as tb_callbacks
from .clearml import callbacks, clearml
if clearml:
for callback, func in callbacks.items():
trainer.add_callback(callback, func)
for x in tb_callbacks, clearml_callbacks:
for k, v in x.items():
trainer.add_callback(k, v) # add_callback(name, func)

View File

@ -9,47 +9,33 @@ except (ImportError, AssertionError):
clearml = None
def _log_scalers(metric_dict, group="", step=0):
task = Task.current_task()
if task:
for k, v in metric_dict.items():
task.get_logger().report_scalar(group, k, v, step)
def before_train(trainer):
def on_train_start(trainer):
# TODO: reuse existing task
task = Task.init(project_name=trainer.args.project if trainer.args.project != 'runs/train' else 'YOLOv5',
task_name=trainer.args.name if trainer.args.name != 'exp' else 'Training',
tags=['YOLOv5'],
task = Task.init(project_name=trainer.args.project if trainer.args.project != 'runs/train' else 'YOLOv8',
task_name=trainer.args.name,
tags=['YOLOv8'],
output_uri=True,
reuse_last_task_id=False,
auto_connect_frameworks={'pytorch': False})
task.connect(dict(trainer.args), name='General')
def on_batch_end(trainer):
_log_scalers(trainer.label_loss_items(trainer.tloss, prefix="train"), "train", trainer.epoch)
def on_val_end(trainer):
_log_scalers(trainer.label_loss_items(trainer.validator.loss, prefix="val"), "val", trainer.epoch)
_log_scalers({k: v for k, v in trainer.metrics.items() if k.startswith("metrics")}, "metrics", trainer.epoch)
if trainer.epoch == 0:
model_info = {
"inference_speed": trainer.validator.speed[1],
"flops@640": get_flops(trainer.model),
"params": get_num_params(trainer.model)}
Task.current_task().connect(model_info, 'Model')
"Inference speed (ms/img)": round(trainer.validator.speed[1], 1),
"GFLOPs": round(get_flops(trainer.model), 1),
"Parameters": get_num_params(trainer.model)}
Task.current_task().connect(model_info, name='Model')
def on_train_end(trainer):
task = Task.current_task()
if task:
task.update_output_model(model_path=str(trainer.best), model_name='Best Model', auto_delete_file=False)
Task.current_task().update_output_model(model_path=str(trainer.best),
model_name=trainer.args.name,
auto_delete_file=False)
callbacks = {
"before_train": before_train,
"on_train_start": on_train_start,
"on_val_end": on_val_end,
"on_batch_end": on_batch_end,
"on_train_end": on_train_end}
"on_train_end": on_train_end} if clearml else {}

View File

@ -0,0 +1,26 @@
from torch.utils.tensorboard import SummaryWriter
writer = None # TensorBoard SummaryWriter instance
def _log_scalars(scalars, step=0):
for k, v in scalars.items():
writer.add_scalar(k, v, step)
def on_train_start(trainer):
global writer
writer = SummaryWriter(str(trainer.save_dir))
trainer.console.info(f"Logging results to {trainer.save_dir}\n"
f"Starting training for {trainer.args.epochs} epochs...")
def on_batch_end(trainer):
_log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch)
def on_val_end(trainer):
_log_scalars(trainer.metrics, trainer.epoch)
callbacks = {"on_train_start": on_train_start, "on_val_end": on_val_end, "on_batch_end": on_batch_end}

View File

@ -15,7 +15,7 @@ nosave: False
cache: False # True/ram, disk or False
device: '' # cuda device, i.e. 0 or 0,1,2,3 or cpu
workers: 8
project: 'runs'
project: 'runs/train'
name: 'exp'
exist_ok: False
pretrained: False