Add benchmarks to Docker publish workflow (#3931)

single_channel
Glenn Jocher 1 year ago committed by GitHub
parent 2ee147838a
commit 9f5ab67ba2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -109,25 +109,17 @@ jobs:
pip --version pip --version
pip list pip list
- name: Benchmark DetectionModel - name: Benchmark DetectionModel
shell: python shell: bash
run: | run: yolo benchmark model='path with spaces/${{ matrix.model }}.pt' imgsz=160, verbose=0.26
from ultralytics.utils.benchmarks import benchmark
benchmark(model='path with spaces/${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.26)
- name: Benchmark SegmentationModel - name: Benchmark SegmentationModel
shell: python shell: bash
run: | run: yolo benchmark model='path with spaces/${{ matrix.model }}-seg.pt' imgsz=160, verbose=0.30
from ultralytics.utils.benchmarks import benchmark
benchmark(model='path with spaces/${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.30)
- name: Benchmark ClassificationModel - name: Benchmark ClassificationModel
shell: python shell: bash
run: | run: yolo benchmark model='path with spaces/${{ matrix.model }}-cls.pt' imgsz=160, verbose=0.36
from ultralytics.utils.benchmarks import benchmark
benchmark(model='path with spaces/${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.36)
- name: Benchmark PoseModel - name: Benchmark PoseModel
shell: python shell: bash
run: | run: yolo benchmark model='path with spaces/${{ matrix.model }}-pose.pt' imgsz=160, verbose=0.17
from ultralytics.utils.benchmarks import benchmark
benchmark(model='path with spaces/${{ matrix.model }}-pose.pt', imgsz=160, half=False, hard_fail=0.17)
- name: Benchmark Summary - name: Benchmark Summary
run: | run: |
cat benchmarks.log cat benchmarks.log

@ -76,7 +76,7 @@ jobs:
- name: Run Benchmarks - name: Run Benchmarks
if: matrix.platforms == 'linux/amd64' # arm64 images not supported on GitHub CI runners if: matrix.platforms == 'linux/amd64' # arm64 images not supported on GitHub CI runners
run: | run: |
docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.26
- name: Push Image - name: Push Image
if: github.event_name == 'push' || github.event.inputs.push == true if: github.event_name == 'push' || github.event.inputs.push == true

@ -30,7 +30,15 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt /u
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -e . albumentations comet thop pycocotools onnx onnx-simplifier onnxruntime-gpu RUN pip install --no-cache -e '.[export]' thop albumentations comet pycocotools
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0
RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle
# Remove exported models
RUN rm -rf tmp
# Set environment variables # Set environment variables
ENV OMP_NUM_THREADS=1 ENV OMP_NUM_THREADS=1

@ -28,8 +28,15 @@ RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED
# Install pip packages # Install pip packages
RUN python3 -m pip install --upgrade pip wheel RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -e . thop --extra-index-url https://download.pytorch.org/whl/cpu RUN pip install --no-cache -e '.[export]' thop --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages
RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0
# RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle
# Remove exported models
RUN rm -rf tmp
# Usage Examples ------------------------------------------------------------------------------------------------------- # Usage Examples -------------------------------------------------------------------------------------------------------

@ -31,15 +31,12 @@ RUN python3 -m pip install --upgrade pip wheel
RUN pip install --no-cache -e '.[export]' thop --extra-index-url https://download.pytorch.org/whl/cpu RUN pip install --no-cache -e '.[export]' thop --extra-index-url https://download.pytorch.org/whl/cpu
# Run exports to AutoInstall packages # Run exports to AutoInstall packages
WORKDIR /tmp_exports RUN yolo export model=tmp/yolov8n.pt format=edgetpu imgsz=32
RUN yolo export format=edgetpu imgsz=32 RUN yolo export model=tmp/yolov8n.pt format=ncnn imgsz=32
RUN yolo export format=ncnn imgsz=32
# Requires <= Python 3.10, bug with paddlepaddle==2.5.0 # Requires <= Python 3.10, bug with paddlepaddle==2.5.0
RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle RUN pip install --no-cache paddlepaddle==2.4.2 x2paddle
# Remove exported models
# Reset workdir RUN rm -rf tmp
WORKDIR /usr/src/ultralytics
RUN rm -rf /tmp_exports
# Usage Examples ------------------------------------------------------------------------------------------------------- # Usage Examples -------------------------------------------------------------------------------------------------------

@ -40,18 +40,18 @@ full list of export arguments.
## Arguments ## Arguments
Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` provide users with the flexibility to fine-tune
the benchmarks to their specific needs and compare the performance of different export formats with ease. the benchmarks to their specific needs and compare the performance of different export formats with ease.
| Key | Value | Description | | Key | Value | Description |
|-------------|---------|----------------------------------------------------------------------------| |-----------|---------|-----------------------------------------------------------------------|
| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | | `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml |
| `data` | `None` | path to yaml referencing the benchmarking dataset (under `val` label) | | `data` | `None` | path to yaml referencing the benchmarking dataset (under `val` label) |
| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | | `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) |
| `half` | `False` | FP16 quantization | | `half` | `False` | FP16 quantization |
| `int8` | `False` | INT8 quantization | | `int8` | `False` | INT8 quantization |
| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | | `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu |
| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | | `verbose` | `False` | do not continue on error (bool), or val floor threshold (float) |
## Export Formats ## Export Formats

@ -319,7 +319,11 @@ class YOLO:
overrides.update(kwargs) overrides.update(kwargs)
overrides['mode'] = 'benchmark' overrides['mode'] = 'benchmark'
overrides = {**DEFAULT_CFG_DICT, **overrides} # fill in missing overrides keys with defaults overrides = {**DEFAULT_CFG_DICT, **overrides} # fill in missing overrides keys with defaults
return benchmark(model=self, imgsz=overrides['imgsz'], half=overrides['half'], device=overrides['device']) return benchmark(model=self,
imgsz=overrides['imgsz'],
half=overrides['half'],
device=overrides['device'],
verbose=overrides['verbose'])
def export(self, **kwargs): def export(self, **kwargs):
""" """

@ -26,6 +26,7 @@ ncnn | `ncnn` | yolov8n_ncnn_model/
import glob import glob
import platform import platform
import sys
import time import time
from pathlib import Path from pathlib import Path
@ -49,7 +50,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
half=False, half=False,
int8=False, int8=False,
device='cpu', device='cpu',
hard_fail=False): verbose=False):
""" """
Benchmark a YOLO model across different formats for speed and accuracy. Benchmark a YOLO model across different formats for speed and accuracy.
@ -61,7 +62,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
half (bool, optional): Use half-precision for the model if True. Default is False. half (bool, optional): Use half-precision for the model if True. Default is False.
int8 (bool, optional): Use int8-precision for the model if True. Default is False. int8 (bool, optional): Use int8-precision for the model if True. Default is False.
device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'. device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'.
hard_fail (bool | float | optional): If True or a float, assert benchmarks pass with given metric. verbose (bool | float | optional): If True or a float, assert benchmarks pass with given metric.
Default is False. Default is False.
Returns: Returns:
@ -84,6 +85,8 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux' assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
if i == 10: if i == 10:
assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux' assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux'
elif i == 11:
assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10'
if 'cpu' in device.type: if 'cpu' in device.type:
assert cpu, 'inference not supported on CPU' assert cpu, 'inference not supported on CPU'
if 'cuda' in device.type: if 'cuda' in device.type:
@ -121,7 +124,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
metric, speed = results.results_dict[key], results.speed['inference'] metric, speed = results.results_dict[key], results.speed['inference']
y.append([name, '', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) y.append([name, '', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
except Exception as e: except Exception as e:
if hard_fail: if verbose:
assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}' assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}'
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
@ -136,9 +139,9 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f: with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
f.write(s) f.write(s)
if hard_fail and isinstance(hard_fail, float): if verbose and isinstance(verbose, float):
metrics = df[key].array # values to compare to floor metrics = df[key].array # values to compare to floor
floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}' assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}'
return df return df

@ -28,7 +28,7 @@ TORCHVISION_0_10 = check_version(torchvision.__version__, '0.10.0')
TORCH_1_9 = check_version(torch.__version__, '1.9.0') TORCH_1_9 = check_version(torch.__version__, '1.9.0')
TORCH_1_11 = check_version(torch.__version__, '1.11.0') TORCH_1_11 = check_version(torch.__version__, '1.11.0')
TORCH_1_12 = check_version(torch.__version__, '1.12.0') TORCH_1_12 = check_version(torch.__version__, '1.12.0')
TORCH_2_0 = check_version(torch.__version__, minimum='2.0') TORCH_2_0 = check_version(torch.__version__, '2.0.0')
@contextmanager @contextmanager

Loading…
Cancel
Save