From aa1cab74f8ee4e830d3dfa39ab0bec568334d045 Mon Sep 17 00:00:00 2001 From: Maia Numerosky <17316848+maianumerosky@users.noreply.github.com> Date: Thu, 20 Jul 2023 20:44:46 -0300 Subject: [PATCH] Benchmark with custom `data.yaml` (#3858) Co-authored-by: Glenn Jocher --- docs/modes/benchmark.md | 25 +++++++++++++------------ docs/usage/python.md | 4 ++-- ultralytics/utils/benchmarks.py | 6 ++++-- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md index d90cd47..b4fc6b0 100644 --- a/docs/modes/benchmark.md +++ b/docs/modes/benchmark.md @@ -30,27 +30,28 @@ full list of export arguments. from ultralytics.utils.benchmarks import benchmark # Benchmark on GPU - benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0) + benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0) ``` === "CLI" ```bash - yolo benchmark model=yolov8n.pt imgsz=640 half=False device=0 + yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` ## Arguments -Arguments such as `model`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune +Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune the benchmarks to their specific needs and compare the performance of different export formats with ease. -| Key | Value | Description | -|-------------|---------|----------------------------------------------------------------------| -| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | -| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | -| `half` | `False` | FP16 quantization | -| `int8` | `False` | INT8 quantization | -| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | -| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | +| Key | Value | Description | +|-------------|---------|----------------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to yaml referencing the benchmarking dataset (under `val` label) | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | ## Export Formats @@ -72,4 +73,4 @@ Benchmarks will attempt to run automatically on all possible export formats belo | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | | [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | -See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. \ No newline at end of file +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/usage/python.md b/docs/usage/python.md index 1e9dffe..b2dfc53 100644 --- a/docs/usage/python.md +++ b/docs/usage/python.md @@ -243,7 +243,7 @@ their specific use case based on their requirements for speed and accuracy. from ultralytics.utils.benchmarks import benchmark # Benchmark - benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0) + benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0) ``` [Benchmark Examples](../modes/benchmark.md){ .md-button .md-button--primary} @@ -280,4 +280,4 @@ You can easily customize Trainers to support custom tasks or explore R&D ideas. Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization Section. -[Customization tutorials](engine.md){ .md-button .md-button--primary} \ No newline at end of file +[Customization tutorials](engine.md){ .md-button .md-button--primary} diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index c35fde4..69efd61 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -5,7 +5,7 @@ Benchmark a YOLO model formats for speed and accuracy Usage: from ultralytics.utils.benchmarks import ProfileModels, benchmark ProfileModels(['yolov8n.yaml', 'yolov8s.yaml']).profile() - run_benchmarks(model='yolov8n.pt', imgsz=160) + benchmark(model='yolov8n.pt', imgsz=160) Format | `format=argument` | Model --- | --- | --- @@ -44,6 +44,7 @@ from ultralytics.utils.torch_utils import select_device def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', + data=None, imgsz=160, half=False, int8=False, @@ -55,6 +56,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', Args: model (str | Path | optional): Path to the model file or directory. Default is Path(SETTINGS['weights_dir']) / 'yolov8n.pt'. + data (str, optional): Dataset to evaluate on, inherited from TASK2DATA if not passed. Default is None. imgsz (int, optional): Image size for the benchmark. Default is 160. half (bool, optional): Use half-precision for the model if True. Default is False. int8 (bool, optional): Use int8-precision for the model if True. Default is False. @@ -106,7 +108,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half) # Validate - data = TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect + data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect results = export.val(data=data, batch=1,