From 5c6d11bdb2bc98ae16253d554f5036d9ebdc2445 Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Tue, 27 Dec 2022 22:49:13 +0800 Subject: [PATCH] Allocated updated pycocotools metrics fix (#101) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- ultralytics/yolo/engine/validator.py | 4 ++-- ultralytics/yolo/v8/detect/val.py | 7 ++++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index b8a0555..0db74c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ ipython # interactive notebook psutil # system utilization thop>=0.1.1 # FLOPs computation # albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0.6 # COCO mAP # roboflow # HUB ----------------------------------------- diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py index 18b6ec6..4c756f0 100644 --- a/ultralytics/yolo/engine/validator.py +++ b/ultralytics/yolo/engine/validator.py @@ -127,7 +127,7 @@ class BaseValidator: self.logger.info(f"Saving {f.name}...") json.dump(self.jdict, f) # flatten and save - self.eval_json() + stats = self.eval_json(stats) return stats def get_dataloader(self, dataset_path, batch_size): @@ -171,5 +171,5 @@ class BaseValidator: def pred_to_json(self, preds, batch): pass - def eval_json(self): + def eval_json(self, stats): pass diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/yolo/v8/detect/val.py index 9253979..763d86d 100644 --- a/ultralytics/yolo/v8/detect/val.py +++ b/ultralytics/yolo/v8/detect/val.py @@ -210,13 +210,13 @@ class DetectionValidator(BaseValidator): 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) - def eval_json(self): + def eval_json(self, stats): if self.args.save_json and self.is_coco and len(self.jdict): anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations pred_json = self.save_dir / "predictions.json" # predictions self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') + check_requirements('pycocotools>=2.0.6') from pycocotools.coco import COCO # noqa from pycocotools.cocoeval import COCOeval # noqa @@ -230,9 +230,10 @@ class DetectionValidator(BaseValidator): eval.evaluate() eval.accumulate() eval.summarize() - self.metrics.metric.map, self.metrics.metric.map50 = eval.stats[:2] # update mAP50-95 and mAP50 + stats[self.metric_keys[-1]], stats[self.metric_keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 except Exception as e: self.logger.warning(f'pycocotools unable to run: {e}') + return stats @hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)