Update .pre-commit-config.yaml (#1026)

This commit is contained in:
Glenn Jocher
2023-02-17 22:26:40 +01:00
committed by GitHub
parent 9047d737f4
commit edd3ff1669
76 changed files with 928 additions and 935 deletions

View File

@ -11,7 +11,7 @@ except (ImportError, AssertionError):
clearml = None
def _log_images(imgs_dict, group="", step=0):
def _log_images(imgs_dict, group='', step=0):
task = Task.current_task()
if task:
for k, v in imgs_dict.items():
@ -20,7 +20,7 @@ def _log_images(imgs_dict, group="", step=0):
def on_pretrain_routine_start(trainer):
# TODO: reuse existing task
task = Task.init(project_name=trainer.args.project or "YOLOv8",
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
task_name=trainer.args.name,
tags=['YOLOv8'],
output_uri=True,
@ -31,15 +31,15 @@ def on_pretrain_routine_start(trainer):
def on_train_epoch_end(trainer):
if trainer.epoch == 1:
_log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, "Mosaic", trainer.epoch)
_log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, 'Mosaic', trainer.epoch)
def on_fit_epoch_end(trainer):
if trainer.epoch == 0:
model_info = {
"Parameters": get_num_params(trainer.model),
"GFLOPs": round(get_flops(trainer.model), 3),
"Inference speed (ms/img)": round(trainer.validator.speed[1], 3)}
'Parameters': get_num_params(trainer.model),
'GFLOPs': round(get_flops(trainer.model), 3),
'Inference speed (ms/img)': round(trainer.validator.speed[1], 3)}
Task.current_task().connect(model_info, name='Model')
@ -50,7 +50,7 @@ def on_train_end(trainer):
callbacks = {
"on_pretrain_routine_start": on_pretrain_routine_start,
"on_train_epoch_end": on_train_epoch_end,
"on_fit_epoch_end": on_fit_epoch_end,
"on_train_end": on_train_end} if clearml else {}
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_train_epoch_end': on_train_epoch_end,
'on_fit_epoch_end': on_fit_epoch_end,
'on_train_end': on_train_end} if clearml else {}

View File

@ -10,13 +10,13 @@ except ImportError:
def on_pretrain_routine_start(trainer):
experiment = comet_ml.Experiment(project_name=trainer.args.project or "YOLOv8")
experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
experiment.log_parameters(vars(trainer.args))
def on_train_epoch_end(trainer):
experiment = comet_ml.get_global_experiment()
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix="train"), step=trainer.epoch + 1)
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
if trainer.epoch == 1:
for f in trainer.save_dir.glob('train_batch*.jpg'):
experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
@ -27,19 +27,19 @@ def on_fit_epoch_end(trainer):
experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
if trainer.epoch == 0:
model_info = {
"model/parameters": get_num_params(trainer.model),
"model/GFLOPs": round(get_flops(trainer.model), 3),
"model/speed(ms)": round(trainer.validator.speed[1], 3)}
'model/parameters': get_num_params(trainer.model),
'model/GFLOPs': round(get_flops(trainer.model), 3),
'model/speed(ms)': round(trainer.validator.speed[1], 3)}
experiment.log_metrics(model_info, step=trainer.epoch + 1)
def on_train_end(trainer):
experiment = comet_ml.get_global_experiment()
experiment.log_model("YOLOv8", file_or_folder=str(trainer.best), file_name="best.pt", overwrite=True)
experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
callbacks = {
"on_pretrain_routine_start": on_pretrain_routine_start,
"on_train_epoch_end": on_train_epoch_end,
"on_fit_epoch_end": on_fit_epoch_end,
"on_train_end": on_train_end} if comet_ml else {}
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_train_epoch_end': on_train_epoch_end,
'on_fit_epoch_end': on_fit_epoch_end,
'on_train_end': on_train_end} if comet_ml else {}

View File

@ -11,7 +11,7 @@ def on_pretrain_routine_end(trainer):
session = getattr(trainer, 'hub_session', None)
if session:
# Start timer for upload rate limit
LOGGER.info(f"{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀")
LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
session.t = {'metrics': time(), 'ckpt': time()} # start timer on self.rate_limit
@ -31,7 +31,7 @@ def on_model_save(trainer):
# Upload checkpoints with rate limiting
is_best = trainer.best_fitness == trainer.fitness
if time() - session.t['ckpt'] > session.rate_limits['ckpt']:
LOGGER.info(f"{PREFIX}Uploading checkpoint {session.model_id}")
LOGGER.info(f'{PREFIX}Uploading checkpoint {session.model_id}')
session.upload_model(trainer.epoch, trainer.last, is_best)
session.t['ckpt'] = time() # reset timer
@ -40,11 +40,11 @@ def on_train_end(trainer):
session = getattr(trainer, 'hub_session', None)
if session:
# Upload final model and metrics with exponential standoff
LOGGER.info(f"{PREFIX}Training completed successfully ✅\n"
f"{PREFIX}Uploading final {session.model_id}")
LOGGER.info(f'{PREFIX}Training completed successfully ✅\n'
f'{PREFIX}Uploading final {session.model_id}')
session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics['metrics/mAP50-95(B)'], final=True)
session.shutdown() # stop heartbeats
LOGGER.info(f"{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀")
LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
def on_train_start(trainer):
@ -64,11 +64,11 @@ def on_export_start(exporter):
callbacks = {
"on_pretrain_routine_end": on_pretrain_routine_end,
"on_fit_epoch_end": on_fit_epoch_end,
"on_model_save": on_model_save,
"on_train_end": on_train_end,
"on_train_start": on_train_start,
"on_val_start": on_val_start,
"on_predict_start": on_predict_start,
"on_export_start": on_export_start}
'on_pretrain_routine_end': on_pretrain_routine_end,
'on_fit_epoch_end': on_fit_epoch_end,
'on_model_save': on_model_save,
'on_train_end': on_train_end,
'on_train_start': on_train_start,
'on_val_start': on_val_start,
'on_predict_start': on_predict_start,
'on_export_start': on_export_start}

View File

@ -16,7 +16,7 @@ def on_pretrain_routine_start(trainer):
def on_batch_end(trainer):
_log_scalars(trainer.label_loss_items(trainer.tloss, prefix="train"), trainer.epoch + 1)
_log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1)
def on_fit_epoch_end(trainer):
@ -24,6 +24,6 @@ def on_fit_epoch_end(trainer):
callbacks = {
"on_pretrain_routine_start": on_pretrain_routine_start,
"on_fit_epoch_end": on_fit_epoch_end,
"on_batch_end": on_batch_end}
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_fit_epoch_end': on_fit_epoch_end,
'on_batch_end': on_batch_end}