ultralytics 8.0.69 HUB CI and ClearML fixes (#1888)

Co-authored-by: Victor Sonck <victor.sonck@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-04-07 15:16:20 +02:00
committed by GitHub
parent d3f097314f
commit c2cd3fd20e
6 changed files with 67 additions and 88 deletions

View File

@ -27,14 +27,16 @@ def _log_debug_samples(files, title='Debug Samples'):
files (List(PosixPath)) a list of file paths in PosixPath format
title (str) A title that groups together images with the same values
"""
for f in files:
if f.exists():
it = re.search(r'_batch(\d+)', f.name)
iteration = int(it.groups()[0]) if it else 0
Task.current_task().get_logger().report_image(title=title,
series=f.name.replace(it.group(), ''),
local_path=str(f),
iteration=iteration)
task = Task.current_task()
if task:
for f in files:
if f.exists():
it = re.search(r'_batch(\d+)', f.name)
iteration = int(it.groups()[0]) if it else 0
task.get_logger().report_image(title=title,
series=f.name.replace(it.group(), ''),
local_path=str(f),
iteration=iteration)
def _log_plot(title, plot_path):
@ -54,11 +56,9 @@ def _log_plot(title, plot_path):
def on_pretrain_routine_start(trainer):
# TODO: reuse existing task
try:
if Task.current_task():
task = Task.current_task()
task = Task.current_task()
if task:
# Make sure the automatic pytorch and matplotlib bindings are disabled!
# We are logging these plots and model files manually in the integration
PatchPyTorchModelIO.update_current_task(None)
@ -80,43 +80,46 @@ def on_pretrain_routine_start(trainer):
def on_train_epoch_end(trainer):
if trainer.epoch == 1:
if trainer.epoch == 1 and Task.current_task():
_log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic')
def on_fit_epoch_end(trainer):
# You should have access to the validation bboxes under jdict
Task.current_task().get_logger().report_scalar(title='Epoch Time',
series='Epoch Time',
value=trainer.epoch_time,
iteration=trainer.epoch)
if trainer.epoch == 0:
model_info = {
'model/parameters': get_num_params(trainer.model),
'model/GFLOPs': round(get_flops(trainer.model), 3),
'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
for k, v in model_info.items():
Task.current_task().get_logger().report_single_value(k, v)
task = Task.current_task()
if task:
# You should have access to the validation bboxes under jdict
task.get_logger().report_scalar(title='Epoch Time',
series='Epoch Time',
value=trainer.epoch_time,
iteration=trainer.epoch)
if trainer.epoch == 0:
model_info = {
'model/parameters': get_num_params(trainer.model),
'model/GFLOPs': round(get_flops(trainer.model), 3),
'model/speed(ms)': round(trainer.validator.speed['inference'], 3)}
for k, v in model_info.items():
task.get_logger().report_single_value(k, v)
def on_val_end(validator):
# Log val_labels and val_pred
_log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation')
if Task.current_task():
# Log val_labels and val_pred
_log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation')
def on_train_end(trainer):
# Log final results, CM matrix + PR plots
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
for f in files:
_log_plot(title=f.stem, plot_path=f)
# Report final metrics
for k, v in trainer.validator.metrics.results_dict.items():
Task.current_task().get_logger().report_single_value(k, v)
# Log the final model
Task.current_task().update_output_model(model_path=str(trainer.best),
model_name=trainer.args.name,
auto_delete_file=False)
task = Task.current_task()
if task:
# Log final results, CM matrix + PR plots
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
for f in files:
_log_plot(title=f.stem, plot_path=f)
# Report final metrics
for k, v in trainer.validator.metrics.results_dict.items():
task.get_logger().report_single_value(k, v)
# Log the final model
task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
callbacks = {