diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 02e4fce..0af9318 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -12,6 +12,64 @@ on:
- cron: '0 0 * * *' # runs at 00:00 UTC every day
jobs:
+ Benchmarks:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ python-version: ['3.10'] # requires python<=3.9
+ model: [yolov8n]
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ #- name: Cache pip
+ # uses: actions/cache@v3
+ # with:
+ # path: ~/.cache/pip
+ # key: ${{ runner.os }}-Benchmarks-${{ hashFiles('requirements.txt') }}
+ # restore-keys: ${{ runner.os }}-Benchmarks-
+ - name: Install requirements
+ run: |
+ python -m pip install --upgrade pip wheel
+ pip install -e '.[export]' --extra-index-url https://download.pytorch.org/whl/cpu
+ - name: Check environment
+ run: |
+ echo "RUNNER_OS is ${{ runner.os }}"
+ echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
+ echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
+ echo "GITHUB_ACTOR is ${{ github.actor }}"
+ echo "GITHUB_REPOSITORY is ${{ github.repository }}"
+ echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}"
+ python --version
+ pip --version
+ pip list
+ - name: TF Lite export
+ run: |
+ yolo export model=${{ matrix.model }}.pt format=tflite
+ yolo task=detect mode=predict model=yolov8n_saved_model/yolov8n_float16.tflite imgsz=640
+ - name: TF *.pb export
+ run: |
+ yolo export model=${{ matrix.model }}.pt format=pb
+ yolo task=detect mode=predict model=yolov8n.pb imgsz=640
+ - name: TF Lite Edge TPU export
+ run: |
+ yolo export model=${{ matrix.model }}.pt format=edgetpu
+ - name: TF.js export
+ run: |
+ yolo export model=${{ matrix.model }}.pt format=tfjs
+ - name: Benchmark DetectionModel
+ run: |
+ # yolo benchmark model=${{ matrix.model }}.pt imgsz=320 min_metric=0.29
+ - name: Benchmark SegmentationModel
+ run: |
+ # yolo benchmark model=${{ matrix.model }}-seg.pt imgsz=320 min_metric=0.29
+ - name: Benchmark ClassificationModel
+ run: |
+ # yolo benchmark model=${{ matrix.model }}-cls.pt imgsz=224 min_metric=0.29
+
Tests:
timeout-minutes: 60
runs-on: ${{ matrix.os }}
@@ -49,15 +107,13 @@ jobs:
run: |
python -m pip install --upgrade pip wheel
if [ "${{ matrix.torch }}" == "1.8.0" ]; then
- pip install -e . torch==1.8.0 torchvision==0.9.0 onnx openvino-dev>=2022.3 pytest --extra-index-url https://download.pytorch.org/whl/cpu
+ pip install -e '.[export]' torch==1.8.0 torchvision==0.9.0 pytest --extra-index-url https://download.pytorch.org/whl/cpu
else
- pip install -e . onnx openvino-dev>=2022.3 pytest --extra-index-url https://download.pytorch.org/whl/cpu
+ pip install -e '.[export]' pytest --extra-index-url https://download.pytorch.org/whl/cpu
fi
- # pip install ultralytics (production)
shell: bash # for Windows compatibility
- name: Check environment
run: |
- # python -c "import utils; utils.notebook_init()"
echo "RUNNER_OS is ${{ runner.os }}"
echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 01f13c4..2ab431d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -31,11 +31,11 @@ repos:
name: Upgrade code
args: [--py37-plus]
- # - repo: https://github.com/PyCQA/isort
- # rev: 5.11.4
- # hooks:
- # - id: isort
- # name: Sort imports
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ name: Sort imports
- repo: https://github.com/google/yapf
rev: v0.32.0
diff --git a/README.md b/README.md
index 23a4d4e..3cddc03 100644
--- a/README.md
+++ b/README.md
@@ -108,6 +108,12 @@ success = model.export(format="onnx") # export the model to ONNX format
Ultralytics [release](https://github.com/ultralytics/assets/releases). See
YOLOv8 [Python Docs](https://docs.ultralytics.com/python) for more examples.
+#### Model Architectures
+
+⭐ **NEW** YOLOv5u anchor free models are now available.
+
+All supported model architectures can be found in the [Models](./ultralytics/models/) section.
+
#### Known Issues / TODOs
We are still working on several parts of YOLOv8! We aim to have these completed soon to bring the YOLOv8 feature set up
@@ -152,13 +158,13 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detection/) for usage ex
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segmentation/) for usage examples with these models.
-| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
-| ---------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
-| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 |
-| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 |
-| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 |
-| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
-| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
+| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
+| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
+| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 |
+| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 |
+| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 |
+| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
+| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `yolo val segment data=coco.yaml device=0`
@@ -172,13 +178,13 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segmentation/) for us
See [Classification Docs](https://docs.ultralytics.com/tasks/classification/) for usage examples with these models.
-| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 |
-| ---------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
-| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 |
-| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 |
-| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 |
-| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
-| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
+| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 |
+| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
+| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 |
+| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 |
+| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 |
+| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
+| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set.
Reproduce by `yolo val classify data=path/to/ImageNet device=0`
diff --git a/README.zh-CN.md b/README.zh-CN.md
index 805fb39..e9ec585 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -132,13 +132,13 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
实例分割
-| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) |
-| ---------------------------------------------------------------------------------------- | --------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- |
-| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 |
-| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 |
-| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 |
-| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
-| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
+| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) |
+| -------------------------------------------------------------------------------------------- | --------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- |
+| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 |
+| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 |
+| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 |
+| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
+| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
- **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。
复现命令 `yolo val segment data=coco.yaml device=0`
@@ -149,13 +149,13 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式
分类
-| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) at 640 |
-| ---------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | --------------- | ------------------------ |
-| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 |
-| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 |
-| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 |
-| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
-| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
+| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) at 640 |
+| -------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | --------------- | ------------------------ |
+| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 |
+| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 |
+| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 |
+| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
+| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
- **acc** 都在 [ImageNet](https://www.image-net.org/) 数据集上,使用单模型单尺度测试得到。
复现命令 `yolo val classify data=path/to/ImageNet device=0`
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 61f43ab..fceb9c7 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -31,8 +31,7 @@ RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
# Install pip packages
COPY requirements.txt .
RUN python3 -m pip install --upgrade pip wheel
-RUN pip install --no-cache ultralytics albumentations comet gsutil notebook \
- coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3
+RUN pip install --no-cache ultralytics[export] albumentations comet gsutil notebook \
# tensorflow tensorflowjs \
# Set environment variables
diff --git a/docker/Dockerfile-arm64 b/docker/Dockerfile-arm64
index 3108c5f..ce33da1 100644
--- a/docker/Dockerfile-arm64
+++ b/docker/Dockerfile-arm64
@@ -26,8 +26,8 @@ RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
# Install pip packages
COPY requirements.txt .
RUN python3 -m pip install --upgrade pip wheel
-RUN pip install --no-cache ultralytics albumentations gsutil notebook \
- coremltools onnx onnxruntime
+RUN pip install --no-cache ultralytics albumentations gsutil notebook
+ # coremltools onnx onnxruntime \
# tensorflow-aarch64 tensorflowjs \
# Cleanup
diff --git a/docker/Dockerfile-cpu b/docker/Dockerfile-cpu
index bf515e5..90e5007 100644
--- a/docker/Dockerfile-cpu
+++ b/docker/Dockerfile-cpu
@@ -26,8 +26,7 @@ RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics
# Install pip packages
COPY requirements.txt .
RUN python3 -m pip install --upgrade pip wheel
-RUN pip install --no-cache ultralytics albumentations gsutil notebook \
- coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 \
+RUN pip install --no-cache ultralytics[export] albumentations gsutil notebook \
# tensorflow-cpu tensorflowjs \
--extra-index-url https://download.pytorch.org/whl/cpu
diff --git a/docs/callbacks.md b/docs/callbacks.md
new file mode 100644
index 0000000..5dce9b0
--- /dev/null
+++ b/docs/callbacks.md
@@ -0,0 +1,75 @@
+## Callbacks
+Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of these objects can be found in Reference section of the docs.
+
+## Examples
+
+### Returning additional information with Prediction
+In this example, we want to return the original frame with each result object. Here's how we can do that
+```python
+def on_predict_batch_end(predictor):
+ # results -> List[batch_size]
+ _, _, im0s, _, _ = predictor.batch
+ im0s = im0s if isinstance(im0s, list) else [im0s]
+ predictor.results = zip(predictor.results, im0s)
+
+model = YOLO(f"yolov8n.pt")
+model.add_callback("on_predict_batch_end", on_predict_batch_end)
+for (result, frame) in model.track/predict():
+ pass
+```
+
+## All callbacks
+Here are all supported callbacks.
+### Trainer
+`on_pretrain_routine_start`
+
+`on_pretrain_routine_end`
+
+`on_train_start`
+
+`on_train_epoch_start`
+
+`on_train_batch_start`
+
+`optimizer_step`
+
+`on_before_zero_grad`
+
+`on_train_batch_end`
+
+`on_train_epoch_end`
+
+`on_fit_epoch_end`
+
+`on_model_save`
+
+`on_train_end`
+
+`on_params_update`
+
+`teardown`
+
+### Validator
+`on_val_start`
+
+`on_val_batch_start`
+
+`on_val_batch_end`
+
+`on_val_end`
+
+### Predictor
+`on_predict_start`
+
+`on_predict_batch_start`
+
+`on_predict_postprocess_end`
+
+`on_predict_batch_end`
+
+`on_predict_end`
+
+### Exporter
+`on_export_start`
+
+`on_export_end`
diff --git a/docs/predict.md b/docs/predict.md
index 67606d7..57c41f0 100644
--- a/docs/predict.md
+++ b/docs/predict.md
@@ -34,7 +34,8 @@ Results object consists of these component objects:
- `Results.boxes` : `Boxes` object with properties and methods for manipulating bboxes
- `Results.masks` : `Masks` object used to index masks or to get segment coordinates.
-- `Results.prob` : `torch.Tensor` containing the class probabilities/logits.
+- `Results.probs` : `torch.Tensor` containing the class probabilities/logits.
+- `Results.orig_shape` : `tuple` containing the original image size as (height, width).
Each result is composed of torch.Tensor by default, in which you can easily use following functionality:
@@ -92,3 +93,19 @@ results[0].probs # cls prob, (num_class, )
```
Class reference documentation for `Results` module and its components can be found [here](reference/results.md)
+
+## Visualizing results
+
+You can use `visualize()` function of `Result` object to get a visualization. It plots all componenets(boxes, masks, classification logits, etc) found in the results object
+```python
+ res = model(img)
+ res_plotted = res[0].visualize()
+ cv2.imshow("result", res_plotted)
+```
+!!! example "`visualize()` arguments"
+
+ `show_conf (bool)`: Show confidence
+
+ `line_width (Float)`: The line width of boxes. Automatically scaled to img size if not provided
+
+ `font_size (Float)`: The font size of . Automatically scaled to img size if not provided
diff --git a/docs/tasks/classification.md b/docs/tasks/classification.md
index 0f1ac3d..6b60df1 100644
--- a/docs/tasks/classification.md
+++ b/docs/tasks/classification.md
@@ -90,6 +90,7 @@ Use a trained YOLOv8n-cls model to run predictions on images.
yolo classify predict model=yolov8n-cls.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
yolo classify predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
```
+
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
## Export
@@ -117,20 +118,20 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc.
yolo export model=path/to/best.pt format=onnx # export custom trained model
```
- Available YOLOv8-cls export formats include:
-
- | Format | `format=` | Model |
- |----------------------------------------------------------------------------|---------------|-------------------------------|
- | [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` |
- | [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-cls.torchscript` |
- | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-cls.onnx` |
- | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-cls_openvino_model/` |
- | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-cls.engine` |
- | [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlmodel` |
- | [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-cls_saved_model/` |
- | [TensorFlow GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-cls.pb` |
- | [TensorFlow Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-cls.tflite` |
- | [TensorFlow Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` |
- | [TensorFlow.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-cls_web_model/` |
- | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` |
-
+Available YOLOv8-cls export formats include:
+
+| Format | `format=` | Model | Metadata |
+|--------------------------------------------------------------------|---------------|-------------------------------|----------|
+| [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ |
+| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-cls.torchscript` | ✅ |
+| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-cls.onnx` | ✅ |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ |
+| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-cls.engine` | ✅ |
+| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlmodel` | ✅ |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ |
+| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-cls.pb` | ❌ |
+| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-cls.tflite` | ✅ |
+| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` | ✅ |
+| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-cls_web_model/` | ✅ |
+| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ |
+
diff --git a/docs/tasks/detection.md b/docs/tasks/detection.md
index 4374de2..d2f7c4f 100644
--- a/docs/tasks/detection.md
+++ b/docs/tasks/detection.md
@@ -92,6 +92,7 @@ Use a trained YOLOv8n model to run predictions on images.
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
```
+
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
## Export
@@ -119,19 +120,19 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
yolo export model=path/to/best.pt format=onnx # export custom trained model
```
- Available YOLOv8 export formats include:
-
- | Format | `format=` | Model |
- |----------------------------------------------------------------------------|--------------------|---------------------------|
- | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` |
- | [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` |
- | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` |
- | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` |
- | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` |
- | [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` |
- | [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` |
- | [TensorFlow GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` |
- | [TensorFlow Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` |
- | [TensorFlow Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` |
- | [TensorFlow.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` |
- | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` |
+Available YOLOv8 export formats include:
+
+| Format | `format=` | Model | Metadata |
+|--------------------------------------------------------------------|---------------|---------------------------|----------|
+| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ |
+| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ |
+| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ |
+| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ |
+| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ |
+| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ |
+| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ |
+| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ |
+| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ |
+| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ |
diff --git a/docs/tasks/segmentation.md b/docs/tasks/segmentation.md
index 0dcdc54..5155115 100644
--- a/docs/tasks/segmentation.md
+++ b/docs/tasks/segmentation.md
@@ -96,6 +96,7 @@ Use a trained YOLOv8n-seg model to run predictions on images.
yolo segment predict model=yolov8n-seg.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
yolo segment predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
```
+
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
## Export
@@ -123,22 +124,21 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc.
yolo export model=path/to/best.pt format=onnx # export custom trained model
```
- Available YOLOv8-seg export formats include:
-
- | Format | `format=` | Model |
- |----------------------------------------------------------------------------|---------------|-------------------------------|
- | [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` |
- | [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-seg.torchscript` |
- | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-seg.onnx` |
- | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-seg_openvino_model/` |
- | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-seg.engine` |
- | [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlmodel` |
- | [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-seg_saved_model/` |
- | [TensorFlow GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-seg.pb` |
- | [TensorFlow Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-seg.tflite` |
- | [TensorFlow Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` |
- | [TensorFlow.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-seg_web_model/` |
- | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` |
-
+Available YOLOv8-seg export formats include:
+
+| Format | `format=` | Model | Metadata |
+|--------------------------------------------------------------------|---------------|-------------------------------|----------|
+| [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ |
+| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-seg.torchscript` | ✅ |
+| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-seg.onnx` | ✅ |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ |
+| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-seg.engine` | ✅ |
+| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlmodel` | ✅ |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ |
+| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-seg.pb` | ❌ |
+| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-seg.tflite` | ✅ |
+| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` | ✅ |
+| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-seg_web_model/` | ✅ |
+| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ |
diff --git a/mkdocs.yml b/mkdocs.yml
index 95957f2..eef6271 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -1,3 +1,5 @@
+# Ultralytics YOLO 🚀, GPL-3.0 license
+
site_name: YOLOv8 Docs
repo_url: https://github.com/ultralytics/ultralytics
edit_uri: https://github.com/ultralytics/ultralytics/tree/main/docs
@@ -109,7 +111,8 @@ nav:
- Python: python.md
- Predict: predict.md
- Configuration: cfg.md
- - Customization Guide: engine.md
+ - Customization using callbacks: callbacks.md
+ - Advanced customization: engine.md
- Ultralytics HUB: hub.md
- iOS and Android App: app.md
- Reference:
diff --git a/requirements.txt b/requirements.txt
index 3e869ba..8fdd8bd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -25,7 +25,7 @@ seaborn>=0.11.0
# Export --------------------------------------
# coremltools>=6.0 # CoreML export
# onnx>=1.12.0 # ONNX export
-# onnx-simplifier>=0.4.1 # ONNX simplifier
+# onnxsim>=0.4.1 # ONNX simplifier
# nvidia-pyindex # TensorRT export
# nvidia-tensorrt # TensorRT export
# scikit-learn==0.19.2 # CoreML quantization
diff --git a/setup.py b/setup.py
index 35d85d9..dde8f54 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ from setuptools import find_packages, setup
# Settings
FILE = Path(__file__).resolve()
PARENT = FILE.parent # root directory
-README = (PARENT / "README.md").read_text(encoding="utf-8")
+README = (PARENT / 'README.md').read_text(encoding='utf-8')
REQUIREMENTS = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements((PARENT / 'requirements.txt').read_text())]
PKG_REQUIREMENTS = ['sentry_sdk'] # pip-only requirements
@@ -20,45 +20,46 @@ def get_version():
setup(
- name="ultralytics", # name of pypi package
+ name='ultralytics', # name of pypi package
version=get_version(), # version of pypi package
- python_requires=">=3.7",
+ python_requires='>=3.7',
license='GPL-3.0',
description='Ultralytics YOLOv8',
long_description=README,
- long_description_content_type="text/markdown",
- url="https://github.com/ultralytics/ultralytics",
+ long_description_content_type='text/markdown',
+ url='https://github.com/ultralytics/ultralytics',
project_urls={
'Bug Reports': 'https://github.com/ultralytics/ultralytics/issues',
'Funding': 'https://ultralytics.com',
'Source': 'https://github.com/ultralytics/ultralytics'},
- author="Ultralytics",
+ author='Ultralytics',
author_email='hello@ultralytics.com',
packages=find_packages(), # required
include_package_data=True,
install_requires=REQUIREMENTS + PKG_REQUIREMENTS,
extras_require={
- 'dev':
- ['check-manifest', 'pytest', 'pytest-cov', 'coverage', 'mkdocs', 'mkdocstrings[python]', 'mkdocs-material']},
+ 'dev': ['check-manifest', 'pytest', 'pytest-cov', 'coverage', 'mkdocs-material', 'mkdocstrings[python]'],
+ 'export': ['coremltools>=6.0', 'onnx', 'onnxsim', 'onnxruntime', 'openvino-dev>=2022.3'],
+ 'tf': ['onnx2tf', 'sng4onnx', 'tflite_support', 'tensorflow']},
classifiers=[
- "Development Status :: 4 - Beta",
- "Intended Audience :: Developers",
- "Intended Audience :: Education",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- "Programming Language :: Python :: 3.9",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
- "Topic :: Software Development",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
- "Topic :: Scientific/Engineering :: Image Recognition",
- "Operating System :: POSIX :: Linux",
- "Operating System :: MacOS",
- "Operating System :: Microsoft :: Windows",],
- keywords="machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics",
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Education',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
+ 'Programming Language :: Python :: 3.10',
+ 'Programming Language :: Python :: 3.11',
+ 'Topic :: Software Development',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
+ 'Topic :: Scientific/Engineering :: Image Recognition',
+ 'Operating System :: POSIX :: Linux',
+ 'Operating System :: MacOS',
+ 'Operating System :: Microsoft :: Windows',],
+ keywords='machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics',
entry_points={
'console_scripts': ['yolo = ultralytics.yolo.cfg:entrypoint', 'ultralytics = ultralytics.yolo.cfg:entrypoint']})
diff --git a/tests/test_cli.py b/tests/test_cli.py
index f594181..21d57e8 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -3,7 +3,7 @@
import subprocess
from pathlib import Path
-from ultralytics.yolo.utils import ROOT, SETTINGS
+from ultralytics.yolo.utils import LINUX, ROOT, SETTINGS
MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n'
CFG = 'yolov8n'
@@ -73,3 +73,8 @@ def test_export_segment_torchscript():
def test_export_classify_torchscript():
run(f'yolo export model={MODEL}-cls.pt format=torchscript')
+
+
+def test_export_detect_edgetpu(enabled=False):
+ if enabled and LINUX:
+ run(f'yolo export model={MODEL}.pt format=edgetpu')
diff --git a/tests/test_python.py b/tests/test_python.py
index 351ea1a..0219b8c 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -1,6 +1,5 @@
# Ultralytics YOLO 🚀, GPL-3.0 license
-import platform
from pathlib import Path
import cv2
@@ -10,12 +9,11 @@ from PIL import Image
from ultralytics import YOLO
from ultralytics.yolo.data.build import load_inference_source
-from ultralytics.yolo.utils import ROOT, SETTINGS
+from ultralytics.yolo.utils import LINUX, ROOT, SETTINGS
MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt'
CFG = 'yolov8n.yaml'
SOURCE = ROOT / 'assets/bus.jpg'
-MACOS = platform.system() == 'Darwin' # macOS environment
def test_model_forward():
@@ -87,24 +85,6 @@ def test_train_pretrained():
def test_export_torchscript():
- """
- Format Argument Suffix CPU GPU
- 0 PyTorch - .pt True True
- 1 TorchScript torchscript .torchscript True True
- 2 ONNX onnx .onnx True True
- 3 OpenVINO openvino _openvino_model True False
- 4 TensorRT engine .engine False True
- 5 CoreML coreml .mlmodel True False
- 6 TensorFlow SavedModel saved_model _saved_model True True
- 7 TensorFlow GraphDef pb .pb True True
- 8 TensorFlow Lite tflite .tflite True False
- 9 TensorFlow Edge TPU edgetpu _edgetpu.tflite False False
- 10 TensorFlow.js tfjs _web_model False False
- 11 PaddlePaddle paddle _paddle_model True True
- """
- from ultralytics.yolo.engine.exporter import export_formats
- print(export_formats())
-
model = YOLO(MODEL)
f = model.export(format='torchscript')
YOLO(f)(SOURCE) # exported model inference
@@ -124,9 +104,25 @@ def test_export_openvino():
def test_export_coreml(): # sourcery skip: move-assign
model = YOLO(MODEL)
- f = model.export(format='coreml')
- if MACOS:
- YOLO(f)(SOURCE) # model prediction only supported on macOS
+ model.export(format='coreml')
+ # if MACOS:
+ # YOLO(f)(SOURCE) # model prediction only supported on macOS
+
+
+def test_export_tflite(enabled=False):
+ # TF suffers from install conflicts on Windows and macOS
+ if enabled and LINUX:
+ model = YOLO(MODEL)
+ f = model.export(format='tflite')
+ YOLO(f)(SOURCE)
+
+
+def test_export_pb(enabled=False):
+ # TF suffers from install conflicts on Windows and macOS
+ if enabled and LINUX:
+ model = YOLO(MODEL)
+ f = model.export(format='pb')
+ YOLO(f)(SOURCE)
def test_export_paddle(enabled=False):
@@ -145,9 +141,8 @@ def test_workflow():
model = YOLO(MODEL)
model.train(data="coco8.yaml", epochs=1, imgsz=32)
model.val()
- print(model.metrics)
model.predict(SOURCE)
- model.export(format="onnx", opset=12) # export a model to ONNX format
+ model.export(format="onnx") # export a model to ONNX format
def test_predict_callback_and_setup():
@@ -170,3 +165,13 @@ def test_predict_callback_and_setup():
print('test_callback', bs)
boxes = result.boxes # Boxes object for bbox outputs
print(boxes)
+
+
+def test_result():
+ model = YOLO("yolov8n-seg.pt")
+ img = str(ROOT / "assets/bus.jpg")
+ res = model([img, img])
+ res[0].numpy()
+ res[0].cpu().numpy()
+ resimg = res[0].visualize(show_conf=False)
+ print(resimg)
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index ab8c107..50ac7f5 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, GPL-3.0 license
-__version__ = "8.0.39"
+__version__ = "8.0.40"
from ultralytics.yolo.engine.model import YOLO
from ultralytics.yolo.utils.checks import check_yolo as checks
diff --git a/ultralytics/models/README.md b/ultralytics/models/README.md
index e56b6e7..074c418 100644
--- a/ultralytics/models/README.md
+++ b/ultralytics/models/README.md
@@ -29,8 +29,81 @@ They may also be used directly in a Python environment, and accepts the same
```python
from ultralytics import YOLO
-model = YOLO("yolov8n.yaml") # build a YOLOv8n model from scratch
-
+model = YOLO("model.yaml") # build a YOLOv8n model from scratch
+# YOLO("model.pt") use pre-trained model if available
model.info() # display model information
model.train(data="coco128.yaml", epochs=100) # train the model
```
+
+## Pre-trained Model Architectures
+
+Ultralytics supports many model architectures. Visit [models](#) page to view detailed information and usage.
+Any of these models can be used by loading their configs or pretrained checkpoints if available.
+
+What to add your model architecture? [Here's](#) how you can contribute
+
+### 1. YOLOv8
+
+**About** - Cutting edge Detection, Segmentation and Classification models developed by Ultralytics.
+**Citation** -
+Available Models:
+
+- Detection - `yolov8n`, `yolov8s`, `yolov8m`, `yolov8l`, `yolov8x`
+- Instance Segmentation - `yolov8n-seg`, `yolov8s-seg`, `yolov8m-seg`, `yolov8l-seg`, `yolov8x-seg`
+- Classification - `yolov8n-cls`, `yolov8s-cls`, `yolov8m-cls`, `yolov8l-cls`, `yolov8x-cls`
+
+Performance
+
+### Detection
+
+| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
+| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
+| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 |
+| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 |
+| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 |
+| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
+| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
+
+### Segmentation
+
+| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
+| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
+| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 |
+| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 |
+| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 |
+| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
+| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
+
+### Classification
+
+| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 |
+| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
+| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 |
+| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 |
+| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 |
+| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 |
+| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 |
+
+
+
+### 2. YOLOv5u
+
+**About** - Anchor-free YOLOv5 models with new detection head and better speed-accuracy tradeoff
+**Citation** -
+Available Models:
+
+- Detection - `yolov5nu`, `yolov5su`, `yolov5mu`, `yolov5lu`, `yolov5xu`
+
+Performance
+
+### Detection
+
+| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) |
+| -------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
+| [YOLOv5nu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5nu.pt) | 640 | 34.3 | 73.6 | 1.06 | 2.6 | 7.7 |
+| [YOLOv5su](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5su.pt) | 640 | 43.0 | 120.7 | 1.27 | 9.1 | 24.0 |
+| [YOLOv5mu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5mu.pt) | 640 | 49.0 | 233.9 | 1.86 | 25.1 | 64.2 |
+| [YOLOv5lu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5lu.pt) | 640 | 52.2 | 408.4 | 2.50 | 53.2 | 135.0 |
+| [YOLOv5xu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5xu.pt) | 640 | 53.2 | 763.2 | 3.81 | 97.2 | 246.4 |
+
+
diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py
index 9878600..1b93a7b 100644
--- a/ultralytics/nn/autobackend.py
+++ b/ultralytics/nn/autobackend.py
@@ -24,9 +24,12 @@ def check_class_names(names):
# Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts.
if isinstance(names, list): # names is a list
names = dict(enumerate(names)) # convert to dict
- if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764'
- map = yaml_load(ROOT / 'yolo/data/datasets/ImageNet.yaml')['map'] # human-readable names
- names = {k: map[v] for k, v in names.items()}
+ if isinstance(names, dict):
+ if not all(isinstance(k, int) for k in names.keys()): # convert string keys to int, i.e. '0' to 0
+ names = {int(k): v for k, v in names.items()}
+ if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764'
+ map = yaml_load(ROOT / 'yolo/data/datasets/ImageNet.yaml')['map'] # human-readable names
+ names = {k: map[v] for k, v in names.items()}
return names
@@ -129,7 +132,6 @@ class AutoBackend(nn.Module):
if batch_dim.is_static:
batch_size = batch_dim.get_length()
executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2
- stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
@@ -138,7 +140,14 @@ class AutoBackend(nn.Module):
device = torch.device('cuda:0')
Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
logger = trt.Logger(trt.Logger.INFO)
+ # Read file
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
+ # Read metadata length
+ meta_len = int.from_bytes(f.read(4), byteorder='little')
+ # Read metadata
+ meta = json.loads(f.read(meta_len).decode('utf-8'))
+ stride, names = int(meta['stride']), meta['names']
+ # Read engine
model = runtime.deserialize_cuda_engine(f.read())
context = model.create_execution_context()
bindings = OrderedDict()
@@ -216,7 +225,7 @@ class AutoBackend(nn.Module):
meta = ast.literal_eval(model.read(meta_file).decode("utf-8"))
stride, names = int(meta['stride']), meta['names']
elif tfjs: # TF.js
- raise NotImplementedError('ERROR: YOLOv8 TF.js inference is not supported')
+ raise NotImplementedError('YOLOv8 TF.js inference is not supported')
elif paddle: # PaddlePaddle
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
@@ -245,7 +254,16 @@ class AutoBackend(nn.Module):
"See https://docs.ultralytics.com/tasks/detection/#export for help."
f"\n\n{EXPORT_FORMATS_TABLE}")
- # class names
+ # Load external metadata YAML
+ if xml or saved_model or paddle:
+ metadata = Path(w).parent / 'metadata.yaml'
+ if metadata.exists():
+ metadata = yaml_load(metadata)
+ stride, names = int(metadata['stride']), metadata['names'] # load metadata
+ else:
+ LOGGER.warning(f"WARNING ⚠️ Metadata not found at '{metadata}'")
+
+ # Check names
if 'names' not in locals(): # names missing
names = yaml_load(check_yaml(data))['names'] if data else {i: f'class{i}' for i in range(999)} # assign
names = check_class_names(names)
@@ -340,7 +358,7 @@ class AutoBackend(nn.Module):
if len(self.output_details) == 2: # segment
y = [y[1], np.transpose(y[0], (0, 3, 1, 2))]
y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
- y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
+ # y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
if isinstance(y, (list, tuple)):
return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
@@ -394,18 +412,3 @@ class AutoBackend(nn.Module):
types[8] &= not types[9] # tflite &= not edgetpu
triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc])
return types + [triton]
-
- @staticmethod
- def _load_metadata(f=Path('path/to/meta.yaml')):
- """
- Loads the metadata from a yaml file
-
- Args:
- f: The path to the metadata file.
- """
-
- # Load metadata from meta.yaml if it exists
- if f.exists():
- d = yaml_load(f)
- return d['stride'], d['names'] # assign stride, names
- return None, None
diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py
index f44c17d..1529126 100644
--- a/ultralytics/nn/tasks.py
+++ b/ultralytics/nn/tasks.py
@@ -248,6 +248,9 @@ class SegmentationModel(DetectionModel):
def __init__(self, cfg='yolov8n-seg.yaml', ch=3, nc=None, verbose=True):
super().__init__(cfg, ch, nc, verbose)
+ def _forward_augment(self, x):
+ raise NotImplementedError("WARNING ⚠️ SegmentationModel has not supported augment inference yet!")
+
class ClassificationModel(BaseModel):
# YOLOv8 classification model
diff --git a/ultralytics/tracker/__init__.py b/ultralytics/tracker/__init__.py
index 9a1ac3d..2eb9f41 100644
--- a/ultralytics/tracker/__init__.py
+++ b/ultralytics/tracker/__init__.py
@@ -1 +1 @@
-from .trackers import BYTETracker, BOTSORT
+from .trackers import BOTSORT, BYTETracker
diff --git a/ultralytics/tracker/track.py b/ultralytics/tracker/track.py
index 843b116..0da0d6f 100644
--- a/ultralytics/tracker/track.py
+++ b/ultralytics/tracker/track.py
@@ -1,8 +1,9 @@
-from ultralytics.tracker import BYTETracker, BOTSORT
-from ultralytics.yolo.utils.checks import check_requirements, check_yaml
-from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
import torch
+from ultralytics.tracker import BOTSORT, BYTETracker
+from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
+from ultralytics.yolo.utils.checks import check_requirements, check_yaml
+
TRACKER_MAP = {"bytetrack": BYTETracker, "botsort": BOTSORT}
check_requirements('lap') # for linear_assignment
diff --git a/ultralytics/tracker/trackers/__init__.py b/ultralytics/tracker/trackers/__init__.py
index b519a0a..225217c 100644
--- a/ultralytics/tracker/trackers/__init__.py
+++ b/ultralytics/tracker/trackers/__init__.py
@@ -1,2 +1,2 @@
-from .byte_tracker import BYTETracker
from .bot_sort import BOTSORT
+from .byte_tracker import BYTETracker
diff --git a/ultralytics/tracker/trackers/basetrack.py b/ultralytics/tracker/trackers/basetrack.py
index db61567..c19464a 100644
--- a/ultralytics/tracker/trackers/basetrack.py
+++ b/ultralytics/tracker/trackers/basetrack.py
@@ -1,6 +1,7 @@
-import numpy as np
from collections import OrderedDict
+import numpy as np
+
class TrackState:
New = 0
diff --git a/ultralytics/tracker/trackers/bot_sort.py b/ultralytics/tracker/trackers/bot_sort.py
index c9f3371..fab20a6 100644
--- a/ultralytics/tracker/trackers/bot_sort.py
+++ b/ultralytics/tracker/trackers/bot_sort.py
@@ -1,10 +1,12 @@
from collections import deque
+
import numpy as np
+
from ..utils import matching
from ..utils.gmc import GMC
from ..utils.kalman_filter import KalmanFilterXYWH
-from .byte_tracker import STrack, BYTETracker
from .basetrack import TrackState
+from .byte_tracker import BYTETracker, STrack
class BOTrack(STrack):
diff --git a/ultralytics/tracker/trackers/byte_tracker.py b/ultralytics/tracker/trackers/byte_tracker.py
index 65c6768..5da2d29 100644
--- a/ultralytics/tracker/trackers/byte_tracker.py
+++ b/ultralytics/tracker/trackers/byte_tracker.py
@@ -1,8 +1,8 @@
import numpy as np
-from .basetrack import BaseTrack, TrackState
from ..utils import matching
from ..utils.kalman_filter import KalmanFilterXYAH
+from .basetrack import BaseTrack, TrackState
class STrack(BaseTrack):
diff --git a/ultralytics/yolo/cfg/default.yaml b/ultralytics/yolo/cfg/default.yaml
index 85a9439..fd7ad9b 100644
--- a/ultralytics/yolo/cfg/default.yaml
+++ b/ultralytics/yolo/cfg/default.yaml
@@ -112,5 +112,4 @@ cfg: # for overriding defaults.yaml
v5loader: False # use legacy YOLOv5 dataloader
# Tracker settings ------------------------------------------------------------------------------------------------------
-tracker: botsort # tracker type, ['botsort', 'bytetrack']
-tracker_cfg: null # path to tracker config file
+tracker: botsort.yaml # tracker type, ['botsort.yaml', 'bytetrack.yaml']
diff --git a/ultralytics/yolo/data/augment.py b/ultralytics/yolo/data/augment.py
index 3c42e61..1809bb0 100644
--- a/ultralytics/yolo/data/augment.py
+++ b/ultralytics/yolo/data/augment.py
@@ -585,6 +585,7 @@ class Albumentations:
new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed
labels["img"] = new["image"]
labels["cls"] = np.array(new["class_labels"])
+ bboxes = np.array(new["bboxes"])
labels["instances"].update(bboxes=bboxes)
return labels
diff --git a/ultralytics/yolo/engine/exporter.py b/ultralytics/yolo/engine/exporter.py
index 1692be8..237b241 100644
--- a/ultralytics/yolo/engine/exporter.py
+++ b/ultralytics/yolo/engine/exporter.py
@@ -18,8 +18,8 @@ TensorFlow.js | `tfjs` | yolov8n_web_model/
PaddlePaddle | `paddle` | yolov8n_paddle_model/
Requirements:
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
+ $ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime openvino-dev tensorflow-cpu # CPU
+ $ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime-gpu openvino-dev tensorflow # GPU
Python:
from ultralytics import YOLO
@@ -69,13 +69,14 @@ from ultralytics.nn.tasks import DetectionModel, SegmentationModel
from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages
from ultralytics.yolo.data.utils import IMAGENET_MEAN, IMAGENET_STD, check_det_dataset
-from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, __version__, callbacks, colorstr, get_default_args, yaml_save
+from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, WINDOWS, __version__, callbacks, colorstr,
+ get_default_args, yaml_save)
from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version, check_yaml
from ultralytics.yolo.utils.files import file_size
from ultralytics.yolo.utils.ops import Profile
from ultralytics.yolo.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode
-MACOS = platform.system() == 'Darwin' # macOS environment
+CUDA = torch.cuda.is_available()
def export_formats():
@@ -229,27 +230,24 @@ class Exporter:
if coreml: # CoreML
f[4], _ = self._export_coreml()
if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
- LOGGER.warning('WARNING ⚠️ YOLOv8 TensorFlow export support is still under development. '
+ LOGGER.warning('WARNING ⚠️ YOLOv8 TensorFlow export is still under development. '
'Please consider contributing to the effort if you have TF expertise. Thank you!')
nms = False
f[5], s_model = self._export_saved_model(nms=nms or self.args.agnostic_nms or tfjs,
agnostic_nms=self.args.agnostic_nms or tfjs)
-
- debug = False
- if debug:
- if pb or tfjs: # pb prerequisite to tfjs
- f[6], _ = self._export_pb(s_model)
- if tflite or edgetpu:
- f[7], _ = self._export_tflite(s_model,
- int8=self.args.int8 or edgetpu,
- data=self.args.data,
- nms=nms,
- agnostic_nms=self.args.agnostic_nms)
- if edgetpu:
- f[8], _ = self._export_edgetpu()
- self._add_tflite_metadata(f[8] or f[7])
- if tfjs:
- f[9], _ = self._export_tfjs()
+ if pb or tfjs: # pb prerequisite to tfjs
+ f[6], _ = self._export_pb(s_model)
+ if tflite or edgetpu:
+ f[7] = str(Path(f[5]) / (self.file.stem + '_float16.tflite'))
+ # f[7], _ = self._export_tflite(s_model,
+ # int8=self.args.int8 or edgetpu,
+ # data=self.args.data,
+ # nms=nms,
+ # agnostic_nms=self.args.agnostic_nms)
+ if edgetpu:
+ f[8], _ = self._export_edgetpu(tflite_model=f[7])
+ if tfjs:
+ f[9], _ = self._export_tfjs()
if paddle: # PaddlePaddle
f[10], _ = self._export_paddle()
@@ -258,13 +256,14 @@ class Exporter:
if any(f):
f = str(Path(f[-1]))
square = self.imgsz[0] == self.imgsz[1]
- s = f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not work. Use " \
- f"export 'imgsz={max(self.imgsz)}' if val is required." if not square else ''
+ s = '' if square else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not " \
+ f"work. Use export 'imgsz={max(self.imgsz)}' if val is required."
imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(' ', '')
+ data = f"data={self.args.data}" if model.task == 'segment' and format == 'pb' else ''
LOGGER.info(
f'\nExport complete ({time.time() - t:.1f}s)'
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
- f"\nPredict: yolo task={model.task} mode=predict model={f} imgsz={imgsz}"
+ f"\nPredict: yolo task={model.task} mode=predict model={f} imgsz={imgsz} {data}"
f"\nValidate: yolo task={model.task} mode=val model={f} imgsz={imgsz} data={self.args.data} {s}"
f"\nVisualize: https://netron.app")
@@ -335,7 +334,7 @@ class Exporter:
check_requirements('onnxsim')
import onnxsim
- LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
+ LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...')
subprocess.run(f'onnxsim {f} {f}', shell=True)
except Exception as e:
LOGGER.info(f'{prefix} simplifier failure: {e}')
@@ -358,7 +357,7 @@ class Exporter:
framework="onnx",
compress_to_fp16=self.args.half) # export
ov.serialize(ov_model, f_ov) # save
- yaml_save(Path(f) / self.file.with_suffix('.yaml').name, self.metadata) # add metadata.yaml
+ yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml
return f, None
@try_export
@@ -372,7 +371,7 @@ class Exporter:
f = str(self.file).replace(self.file.suffix, f'_paddle_model{os.sep}')
pytorch2paddle(module=self.model, save_dir=f, jit_type='trace', input_examples=[self.im]) # export
- yaml_save(Path(f) / self.file.with_suffix('.yaml').name, self.metadata) # add metadata.yaml
+ yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml
return f, None
@try_export
@@ -436,7 +435,7 @@ class Exporter:
try:
import tensorrt as trt # noqa
except ImportError:
- if platform.system() == 'Linux':
+ if LINUX:
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
import tensorrt as trt # noqa
@@ -482,8 +481,16 @@ class Exporter:
f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and self.args.half else 32} engine as {f}')
if builder.platform_has_fast_fp16 and self.args.half:
config.set_flag(trt.BuilderFlag.FP16)
+
+ # Write file
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
+ # Metadata
+ meta = json.dumps(self.metadata)
+ t.write(len(meta).to_bytes(4, byteorder='little', signed=True))
+ t.write(meta.encode())
+ # Model
t.write(engine.serialize())
+
return f, None
@try_export
@@ -500,10 +507,10 @@ class Exporter:
try:
import tensorflow as tf # noqa
except ImportError:
- check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
+ check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}")
import tensorflow as tf # noqa
check_requirements(("onnx", "onnx2tf", "sng4onnx", "onnxsim", "onnx_graphsurgeon", "tflite_support"),
- cmds="--extra-index-url https://pypi.ngc.nvidia.com ")
+ cmds="--extra-index-url https://pypi.ngc.nvidia.com")
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
f = str(self.file).replace(self.file.suffix, '_saved_model')
@@ -514,10 +521,11 @@ class Exporter:
# Export to TF SavedModel
subprocess.run(f'onnx2tf -i {onnx} -o {f} --non_verbose', shell=True)
+ yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml
# Add TFLite metadata
- for tflite_file in Path(f).rglob('*.tflite'):
- self._add_tflite_metadata(tflite_file)
+ for file in Path(f).rglob('*.tflite'):
+ self._add_tflite_metadata(file)
# Load saved_model
keras_model = tf.saved_model.load(f, tags=None, options=None)
@@ -537,7 +545,7 @@ class Exporter:
try:
import tensorflow as tf # noqa
except ImportError:
- check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
+ check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}")
import tensorflow as tf # noqa
# from models.tf import TFModel
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa
@@ -628,11 +636,11 @@ class Exporter:
return f, None
@try_export
- def _export_edgetpu(self, prefix=colorstr('Edge TPU:')):
+ def _export_edgetpu(self, tflite_model='', prefix=colorstr('Edge TPU:')):
# YOLOv8 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
cmd = 'edgetpu_compiler --version'
help_url = 'https://coral.ai/docs/edgetpu/compiler/'
- assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
+ assert LINUX, f'export only supported on Linux. See {help_url}'
if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0:
LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
@@ -646,11 +654,11 @@ class Exporter:
ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
- f = str(self.file).replace(self.file.suffix, '-int8_edgetpu.tflite') # Edge TPU model
- f_tfl = str(self.file).replace(self.file.suffix, '-int8.tflite') # TFLite model
+ f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model
- cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {self.file.parent} {f_tfl}"
+ cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {self.file.parent} {tflite_model}"
subprocess.run(cmd.split(), check=True)
+ self._add_tflite_metadata(f)
return f, None
@try_export
@@ -681,6 +689,7 @@ class Exporter:
f_json.read_text(),
)
j.write(subst)
+ yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml
return f, None
def _add_tflite_metadata(self, file):
@@ -736,14 +745,6 @@ class Exporter:
populator.populate()
tmp_file.unlink()
- # TODO Rename this here and in `_add_tflite_metadata`
- def _extracted_from__add_tflite_metadata_15(self, _metadata_fb, arg1, arg2):
- # Creates input info.
- result = _metadata_fb.TensorMetadataT()
- result.name = arg1
- result.description = arg2
- return result
-
def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')):
# YOLOv8 CoreML pipeline
import coremltools as ct # noqa
diff --git a/ultralytics/yolo/engine/model.py b/ultralytics/yolo/engine/model.py
index 72c32a6..a7fc7b0 100644
--- a/ultralytics/yolo/engine/model.py
+++ b/ultralytics/yolo/engine/model.py
@@ -42,6 +42,7 @@ class YOLO:
model (str, Path): model to load or create
type (str): Type/version of models to use. Defaults to "v8".
"""
+ self._reset_callbacks()
self.type = type
self.ModelClass = None # model class
self.TrainerClass = None # trainer class
@@ -307,3 +308,8 @@ class YOLO:
for arg in 'augment', 'verbose', 'project', 'name', 'exist_ok', 'resume', 'batch', 'epochs', 'cache', \
'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset':
args.pop(arg, None)
+
+ @staticmethod
+ def _reset_callbacks():
+ for event in callbacks.default_callbacks.keys():
+ callbacks.default_callbacks[event] = [callbacks.default_callbacks[event][0]]
diff --git a/ultralytics/yolo/engine/predictor.py b/ultralytics/yolo/engine/predictor.py
index 4678238..d7d8b10 100644
--- a/ultralytics/yolo/engine/predictor.py
+++ b/ultralytics/yolo/engine/predictor.py
@@ -85,7 +85,6 @@ class BasePredictor:
self.data = self.args.data # data_dict
self.imgsz = None
self.device = None
- self.classes = self.args.classes
self.dataset = None
self.vid_path, self.vid_writer = None, None
self.annotator = None
@@ -103,7 +102,7 @@ class BasePredictor:
def write_results(self, results, batch, print_string):
raise NotImplementedError("print_results function needs to be implemented")
- def postprocess(self, preds, img, orig_img, classes=None):
+ def postprocess(self, preds, img, orig_img):
return preds
@smart_inference_mode()
@@ -170,13 +169,13 @@ class BasePredictor:
# postprocess
with self.dt[2]:
- self.results = self.postprocess(preds, im, im0s, self.classes)
+ self.results = self.postprocess(preds, im, im0s)
self.run_callbacks("on_predict_postprocess_end")
# visualize, save, write results
for i in range(len(im)):
- p, im0 = (path[i], im0s[i].copy()) if self.source_type.webcam or self.source_type.from_img else (path,
- im0s)
+ p, im0 = (path[i], im0s[i].copy()) if self.source_type.webcam or self.source_type.from_img \
+ else (path, im0s.copy())
p = Path(p)
if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
diff --git a/ultralytics/yolo/engine/results.py b/ultralytics/yolo/engine/results.py
index 9b67656..404e6fd 100644
--- a/ultralytics/yolo/engine/results.py
+++ b/ultralytics/yolo/engine/results.py
@@ -1,9 +1,13 @@
+from copy import deepcopy
from functools import lru_cache
import numpy as np
import torch
+import torchvision.transforms.functional as F
+from PIL import Image
from ultralytics.yolo.utils import LOGGER, ops
+from ultralytics.yolo.utils.plotting import Annotator, colors
class Results:
@@ -14,22 +18,24 @@ class Results:
boxes (Boxes, optional): A Boxes object containing the detection bounding boxes.
masks (Masks, optional): A Masks object containing the detection masks.
probs (torch.Tensor, optional): A tensor containing the detection class probabilities.
- orig_shape (tuple, optional): Original image size.
+ orig_img (tuple, optional): Original image size.
Attributes:
boxes (Boxes, optional): A Boxes object containing the detection bounding boxes.
masks (Masks, optional): A Masks object containing the detection masks.
probs (torch.Tensor, optional): A tensor containing the detection class probabilities.
- orig_shape (tuple, optional): Original image size.
+ orig_img (tuple, optional): Original image size.
data (torch.Tensor): The raw masks tensor
"""
- def __init__(self, boxes=None, masks=None, probs=None, orig_shape=None) -> None:
- self.boxes = Boxes(boxes, orig_shape) if boxes is not None else None # native size boxes
- self.masks = Masks(masks, orig_shape) if masks is not None else None # native size or imgsz masks
+ def __init__(self, boxes=None, masks=None, probs=None, orig_img=None, names=None) -> None:
+ self.orig_img = orig_img
+ self.orig_shape = orig_img.shape[:2]
+ self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes
+ self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks
self.probs = probs if probs is not None else None
- self.orig_shape = orig_shape
+ self.names = names
self.comp = ["boxes", "masks", "probs"]
def pandas(self):
@@ -37,7 +43,7 @@ class Results:
# TODO masks.pandas + boxes.pandas + cls.pandas
def __getitem__(self, idx):
- r = Results(orig_shape=self.orig_shape)
+ r = Results(orig_img=self.orig_img)
for item in self.comp:
if getattr(self, item) is None:
continue
@@ -53,7 +59,7 @@ class Results:
self.probs = probs
def cpu(self):
- r = Results(orig_shape=self.orig_shape)
+ r = Results(orig_img=self.orig_img)
for item in self.comp:
if getattr(self, item) is None:
continue
@@ -61,7 +67,7 @@ class Results:
return r
def numpy(self):
- r = Results(orig_shape=self.orig_shape)
+ r = Results(orig_img=self.orig_img)
for item in self.comp:
if getattr(self, item) is None:
continue
@@ -69,7 +75,7 @@ class Results:
return r
def cuda(self):
- r = Results(orig_shape=self.orig_shape)
+ r = Results(orig_img=self.orig_img)
for item in self.comp:
if getattr(self, item) is None:
continue
@@ -77,7 +83,7 @@ class Results:
return r
def to(self, *args, **kwargs):
- r = Results(orig_shape=self.orig_shape)
+ r = Results(orig_img=self.orig_img)
for item in self.comp:
if getattr(self, item) is None:
continue
@@ -118,6 +124,40 @@ class Results:
orig_shape (tuple, optional): Original image size.
""")
+ def visualize(self, show_conf=True, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
+ """
+ Plots the given result on an input RGB image. Accepts cv2(numpy) or PIL Image
+
+ Args:
+ show_conf (bool): Show confidence
+ line_width (Float): The line width of boxes. Automatically scaled to img size if not provided
+ font_size (Float): The font size of . Automatically scaled to img size if not provided
+ """
+ img = deepcopy(self.orig_img)
+ annotator = Annotator(img, line_width, font_size, font, pil, example)
+ boxes = self.boxes
+ masks = self.masks.data
+ logits = self.probs
+ names = self.names
+ if boxes is not None:
+ for d in reversed(boxes):
+ cls, conf = d.cls.squeeze(), d.conf.squeeze()
+ c = int(cls)
+ label = (f'{names[c]}' if names else f'{c}') + (f'{conf:.2f}' if show_conf else '')
+ annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
+
+ if masks is not None:
+ im_gpu = torch.as_tensor(img, dtype=torch.float16).permute(2, 0, 1).flip(0).contiguous()
+ im_gpu = F.resize(im_gpu, masks.data.shape[1:]) / 255
+ annotator.masks(masks.data, colors=[colors(x, True) for x in boxes.cls], im_gpu=im_gpu)
+
+ if logits is not None:
+ top5i = logits.argsort(0, descending=True)[:5].tolist() # top 5 indices
+ text = f"{', '.join(f'{names[j] if names else j} {logits[j]:.2f}' for j in top5i)}, "
+ annotator.text((32, 32), text, txt_color=(255, 255, 255)) # TODO: allow setting colors
+
+ return img
+
class Boxes:
"""
diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py
index bbc6379..c67d28a 100644
--- a/ultralytics/yolo/utils/__init__.py
+++ b/ultralytics/yolo/utils/__init__.py
@@ -34,6 +34,7 @@ AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # glob
VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode
TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
LOGGING_NAME = 'ultralytics'
+MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans
HELP_MSG = \
"""
Usage examples for running YOLOv8:
@@ -393,18 +394,15 @@ def get_user_config_dir(sub_dir='Ultralytics'):
Returns:
Path: The path to the user config directory.
"""
- # Get the operating system name
- os_name = platform.system()
-
# Return the appropriate config directory for each operating system
- if os_name == 'Windows':
+ if WINDOWS:
path = Path.home() / 'AppData' / 'Roaming' / sub_dir
- elif os_name == 'Darwin': # macOS
+ elif MACOS: # macOS
path = Path.home() / 'Library' / 'Application Support' / sub_dir
- elif os_name == 'Linux':
+ elif LINUX:
path = Path.home() / '.config' / sub_dir
else:
- raise ValueError(f'Unsupported operating system: {os_name}')
+ raise ValueError(f'Unsupported operating system: {platform.system()}')
# GCP and AWS lambda fix, only /tmp is writeable
if not is_dir_writeable(str(path.parent)):
@@ -421,7 +419,7 @@ USER_CONFIG_DIR = get_user_config_dir() # Ultralytics settings dir
def emojis(string=''):
# Return platform-dependent emoji-safe version of string
- return string.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else string
+ return string.encode().decode('ascii', 'ignore') if WINDOWS else string
def colorstr(*input):
@@ -617,7 +615,7 @@ def set_settings(kwargs, file=USER_CONFIG_DIR / 'settings.yaml'):
# Set logger
set_logging(LOGGING_NAME) # run before defining LOGGER
LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)
-if platform.system() == 'Windows':
+if WINDOWS:
for fn in LOGGER.info, LOGGER.warning:
setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging
diff --git a/ultralytics/yolo/utils/ops.py b/ultralytics/yolo/utils/ops.py
index 5c95684..2004e3e 100644
--- a/ultralytics/yolo/utils/ops.py
+++ b/ultralytics/yolo/utils/ops.py
@@ -139,6 +139,9 @@ def non_max_suppression(
labels=(),
max_det=300,
nc=0, # number of classes (optional)
+ max_time_img=0.05,
+ max_nms=30000,
+ max_wh=7680,
):
"""
Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box.
@@ -160,6 +163,9 @@ def non_max_suppression(
output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2).
max_det (int): The maximum number of boxes to keep after NMS.
nc (int): (optional) The number of classes output by the model. Any indices after this will be considered masks.
+ max_time_img (float): The maximum time (seconds) for processing one image.
+ max_nms (int): The maximum number of boxes into torchvision.ops.nms().
+ max_wh (int): The maximum box width and height in pixels
Returns:
(List[torch.Tensor]): A list of length batch_size, where each element is a tensor of
@@ -185,9 +191,7 @@ def non_max_suppression(
# Settings
# min_wh = 2 # (pixels) minimum box width and height
- max_wh = 7680 # (pixels) maximum box width and height
- max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
- time_limit = 0.5 + 0.05 * bs # seconds to quit after
+ time_limit = 0.5 + max_time_img * bs # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
diff --git a/ultralytics/yolo/utils/plotting.py b/ultralytics/yolo/utils/plotting.py
index 43d547e..4e3af60 100644
--- a/ultralytics/yolo/utils/plotting.py
+++ b/ultralytics/yolo/utils/plotting.py
@@ -136,7 +136,11 @@ class Annotator:
if anchor == 'bottom': # start y from font bottom
w, h = self.font.getsize(text) # text width, height
xy[1] += 1 - h
- self.draw.text(xy, text, fill=txt_color, font=self.font)
+ if self.pil:
+ self.draw.text(xy, text, fill=txt_color, font=self.font)
+ else:
+ tf = max(self.lw - 1, 1) # font thickness
+ cv2.putText(self.im, text, xy, 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA)
def fromarray(self, im):
# Update self.im from a numpy array
diff --git a/ultralytics/yolo/v8/classify/predict.py b/ultralytics/yolo/v8/classify/predict.py
index efd311c..f80c834 100644
--- a/ultralytics/yolo/v8/classify/predict.py
+++ b/ultralytics/yolo/v8/classify/predict.py
@@ -18,11 +18,12 @@ class ClassificationPredictor(BasePredictor):
img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
return img
- def postprocess(self, preds, img, orig_img, classes=None):
+ def postprocess(self, preds, img, orig_img):
results = []
for i, pred in enumerate(preds):
- shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
- results.append(Results(probs=pred, orig_shape=shape[:2]))
+ orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
+ results.append(Results(probs=pred.softmax(0), orig_img=orig_img, names=self.model.names))
+
return results
def write_results(self, idx, results, batch):
diff --git a/ultralytics/yolo/v8/detect/predict.py b/ultralytics/yolo/v8/detect/predict.py
index 69ae86f..cdc0251 100644
--- a/ultralytics/yolo/v8/detect/predict.py
+++ b/ultralytics/yolo/v8/detect/predict.py
@@ -19,7 +19,7 @@ class DetectionPredictor(BasePredictor):
img /= 255 # 0 - 255 to 0.0 - 1.0
return img
- def postprocess(self, preds, img, orig_img, classes=None):
+ def postprocess(self, preds, img, orig_img):
preds = ops.non_max_suppression(preds,
self.args.conf,
self.args.iou,
@@ -29,9 +29,10 @@ class DetectionPredictor(BasePredictor):
results = []
for i, pred in enumerate(preds):
- shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
+ orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
+ shape = orig_img.shape
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
- results.append(Results(boxes=pred, orig_shape=shape[:2]))
+ results.append(Results(boxes=pred, orig_img=orig_img, names=self.model.names))
return results
def write_results(self, idx, results, batch):
diff --git a/ultralytics/yolo/v8/segment/predict.py b/ultralytics/yolo/v8/segment/predict.py
index 9606b6e..6942a4b 100644
--- a/ultralytics/yolo/v8/segment/predict.py
+++ b/ultralytics/yolo/v8/segment/predict.py
@@ -10,7 +10,7 @@ from ultralytics.yolo.v8.detect.predict import DetectionPredictor
class SegmentationPredictor(DetectionPredictor):
- def postprocess(self, preds, img, orig_img, classes=None):
+ def postprocess(self, preds, img, orig_img):
# TODO: filter by classes
p = ops.non_max_suppression(preds[0],
self.args.conf,
@@ -22,9 +22,11 @@ class SegmentationPredictor(DetectionPredictor):
results = []
proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
for i, pred in enumerate(p):
- shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
+ orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
+ shape = orig_img.shape
if not len(pred):
- results.append(Results(boxes=pred[:, :6], orig_shape=shape[:2])) # save empty boxes
+ results.append(Results(boxes=pred[:, :6], orig_img=orig_img,
+ names=self.model.names)) # save empty boxes
continue
if self.args.retina_masks:
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
@@ -32,7 +34,7 @@ class SegmentationPredictor(DetectionPredictor):
else:
masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
- results.append(Results(boxes=pred[:, :6], masks=masks, orig_shape=shape[:2]))
+ results.append(Results(boxes=pred[:, :6], masks=masks, orig_img=orig_img, names=self.model.names))
return results
def write_results(self, idx, results, batch):
diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/yolo/v8/segment/val.py
index 556ac1f..40bc687 100644
--- a/ultralytics/yolo/v8/segment/val.py
+++ b/ultralytics/yolo/v8/segment/val.py
@@ -28,19 +28,8 @@ class SegmentationValidator(DetectionValidator):
return batch
def init_metrics(self, model):
- val = self.data.get(self.args.split, '') # validation path
- self.is_coco = isinstance(val, str) and val.endswith(f'coco{os.sep}val2017.txt') # is COCO dataset
- self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000))
- self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO
- self.names = model.names
- self.nc = len(model.names)
- self.metrics.names = self.names
- self.metrics.plot = self.args.plots
- self.confusion_matrix = ConfusionMatrix(nc=self.nc)
+ super().init_metrics(model)
self.plot_masks = []
- self.seen = 0
- self.jdict = []
- self.stats = []
if self.args.save_json:
check_requirements('pycocotools>=2.0.6')
self.process = ops.process_mask_upsample # more accurate