From 95f96dc5bc1ad6fc22fa9ede1c9fe36d4c6abb56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Apr 2023 00:21:03 +0200 Subject: [PATCH] `ultralytics 8.0.72` faster Windows trainings and corrupt cache fix (#1912) Co-authored-by: andreaswimmer <53872150+andreaswimmer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 2 +- README.md | 16 +- README.zh-CN.md | 362 +++++++++++------------------ docs/modes/train.md | 2 +- docs/tasks/pose.md | 16 +- docs/usage/cfg.md | 2 +- docs/yolov5/train_custom_data.md | 2 +- examples/tutorial.ipynb | 262 ++++++++++----------- tests/test_python.py | 2 - ultralytics/__init__.py | 2 +- ultralytics/models/README.md | 16 +- ultralytics/tracker/utils/gmc.py | 38 +-- ultralytics/yolo/cfg/default.yaml | 2 +- ultralytics/yolo/data/build.py | 17 +- ultralytics/yolo/data/dataset.py | 1 - ultralytics/yolo/engine/model.py | 4 +- ultralytics/yolo/engine/results.py | 4 +- ultralytics/yolo/utils/__init__.py | 1 + 18 files changed, 325 insertions(+), 426 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index e3e2e06..d4c2679 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -30,7 +30,7 @@ jobs: If this is a 🐛 Bug Report, please provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results/). ## Install diff --git a/README.md b/README.md index 40eb0d2..b6b2ce0 100644 --- a/README.md +++ b/README.md @@ -181,14 +181,14 @@ See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usag See [Pose Docs](https://docs.ultralytics.com/tasks/) for usage examples with these models. -| Model | size
(pixels) | mAPbox
50-95 | mAPpose
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | 4088.7 | 10.04 | 99.1 | 1066.4 | +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 49.7 | 79.7 | 131.8 | 1.18 | 3.3 | 9.2 | +| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 59.2 | 85.8 | 233.2 | 1.42 | 11.6 | 30.2 | +| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 63.6 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | +| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.0 | 89.9 | 784.5 | 2.59 | 44.4 | 168.6 | +| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 68.9 | 90.4 | 1607.1 | 3.73 | 69.4 | 263.2 | +| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.5 | 91.3 | 4088.7 | 10.04 | 99.1 | 1066.4 | - **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) dataset. diff --git a/README.zh-CN.md b/README.zh-CN.md index bf7f0ef..81253be 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,266 +1,170 @@ -
-

- - -

- -[English](README.md) | [简体中文](README.zh-CN.md) -
- -
- Ultralytics CI - YOLOv8 Citation - Docker Pulls -
- Run on Gradient - Open In Colab - Open In Kaggle -
-
- -[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) 是由 [Ultralytics](https://ultralytics.com) 开发的一个前沿的 -SOTA 模型。它在以前成功的 YOLO 版本基础上,引入了新的功能和改进,进一步提升了其性能和灵活性。YOLOv8 -基于快速、准确和易于使用的设计理念,使其成为广泛的目标检测、图像分割和图像分类任务的绝佳选择。 - -如果要申请企业许可证,请填写 [Ultralytics 许可](https://ultralytics.com/license)。 - -
- - - - - - - - - - - - - - - - - -
-
- -##
文档
- -有关训练、测试和部署的完整文档见[YOLOv8 Docs](https://docs.ultralytics.com)。请参阅下面的快速入门示例。 - -
-安装 - -Pip 安装包含所有 [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) 的 -ultralytics 包,环境要求 [**Python>=3.7**](https://www.python.org/),且 [\*\*PyTorch>=1.7 -\*\*](https://pytorch.org/get-started/locally/)。 +# YOLOv8 Pose Models -```bash -pip install ultralytics -``` +Pose estimation is a task that involves identifying the location of specific points in an image, usually referred +to as keypoints. The keypoints can represent various parts of the object such as joints, landmarks, or other distinctive +features. The locations of the keypoints are usually represented as a set of 2D `[x, y]` or 3D `[x, y, visible]` +coordinates. -
+ -
-使用方法 +The output of a pose estimation model is a set of points that represent the keypoints on an object in the image, usually +along with the confidence scores for each point. Pose estimation is a good choice when you need to identify specific +parts of an object in a scene, and their location in relation to each other. -YOLOv8 可以直接在命令行界面(CLI)中使用 `yolo` 命令运行: +**Pro Tip:** YOLOv8 _pose_ models use the `-pose` suffix, i.e. `yolov8n-pose.pt`. These models are trained on the [COCO keypoints](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco-pose.yaml) dataset and are suitable for a variety of pose estimation tasks. -```bash -yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Pose models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 49.7 | 79.7 | 131.8 | 1.18 | 3.3 | 9.2 | +| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 59.2 | 85.8 | 233.2 | 1.42 | 11.6 | 30.2 | +| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 63.6 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | +| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.0 | 89.9 | 784.5 | 2.59 | 44.4 | 168.6 | +| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 68.9 | 90.4 | 1607.1 | 3.73 | 69.4 | 263.2 | +| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.5 | 91.3 | 4088.7 | 10.04 | 99.1 | 1066.4 | + +- **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) + dataset. Reproduce by `yolo val pose data=coco-pose.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. Reproduce by `yolo val pose data=coco8-pose.yaml batch=1 device=0|cpu` + +## Train + +Train a YOLOv8-pose model on the COCO128-pose dataset. + +### Python + +```python +from ultralytics import YOLO + +# Load a model +model = YOLO("yolov8n-pose.yaml") # build a new model from YAML +model = YOLO("yolov8n-pose.pt") # load a pretrained model (recommended for training) +model = YOLO("yolov8n-pose.yaml").load( + "yolov8n-pose.pt" +) # build from YAML and transfer weights + +# Train the model +model.train(data="coco8-pose.yaml", epochs=100, imgsz=640) ``` -`yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com) -中可用`yolo`[参数](https://docs.ultralytics.com/usage/cfg/)的完整列表。 +### CLI ```bash -yolo task=detect mode=train model=yolov8n.pt args... - classify predict yolov8n-cls.yaml args... - segment val yolov8n-seg.yaml args... - export yolov8n.pt format=onnx args... +# Build a new model from YAML and start training from scratch +yolo pose train data=coco8-pose.yaml model=yolov8n-pose.yaml epochs=100 imgsz=640 + +# Start training from a pretrained *.pt model +yolo pose train data=coco8-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 + +# Build a new model from YAML, transfer pretrained weights to it and start training +yolo pose train data=coco8-pose.yaml model=yolov8n-pose.yaml pretrained=yolov8n-pose.pt epochs=100 imgsz=640 ``` -YOLOv8 也可以在 Python 环境中直接使用,并接受与上面 CLI 例子中相同的[参数](https://docs.ultralytics.com/usage/cfg/): +## Val + +Validate trained YOLOv8n-pose model accuracy on the COCO128-pose dataset. No argument need to passed as the `model` +retains it's training `data` and arguments as model attributes. + +### Python ```python from ultralytics import YOLO -# 加载模型 -model = YOLO("yolov8n.yaml") # 从头开始构建新模型 -model = YOLO("yolov8n.pt") # 加载预训练模型(推荐用于训练) +# Load a model +model = YOLO("yolov8n-pose.pt") # load an official model +model = YOLO("path/to/best.pt") # load a custom model -# Use the model -results = model.train(data="coco128.yaml", epochs=3) # 训练模型 -results = model.val() # 在验证集上评估模型性能 -results = model("https://ultralytics.com/images/bus.jpg") # 预测图像 -success = model.export(format="onnx") # 将模型导出为 ONNX 格式 +# Validate the model +metrics = model.val() # no arguments needed, dataset and settings remembered +metrics.box.map # map50-95 +metrics.box.map50 # map50 +metrics.box.map75 # map75 +metrics.box.maps # a list contains map50-95 of each category ``` -[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 -Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 +### CLI -
- -##
模型
+```bash +yolo pose val model=yolov8n-pose.pt # val official model +yolo pose val model=path/to/best.pt # val custom model +``` -所有 YOLOv8 的预训练模型都可以在这里找到。目标检测和分割模型是在 COCO 数据集上预训练的,而分类模型是在 ImageNet 数据集上预训练的。 +## Predict -第一次使用时,[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 -Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 +Use a trained YOLOv8n-pose model to run predictions on images. -
目标检测 +### Python -| 模型 | 尺寸
(像素) | mAPval
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) | -| ------------------------------------------------------------------------------------ | --------------- | -------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- | -| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | -| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | -| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | -| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | -| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | +```python +from ultralytics import YOLO -- **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。 -
复现命令 `yolo val detect data=coco.yaml device=0` -- **推理速度**使用 COCO - 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 -
复现命令 `yolo val detect data=coco128.yaml batch=1 device=0|cpu` +# Load a model +model = YOLO("yolov8n-pose.pt") # load an official model +model = YOLO("path/to/best.pt") # load a custom model -
+# Predict with the model +results = model("https://ultralytics.com/images/bus.jpg") # predict on an image +``` -
实例分割 +### CLI -| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) | -| -------------------------------------------------------------------------------------------- | --------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- | -| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | -| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | -| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | -| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | -| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | +```bash +yolo pose predict model=yolov8n-pose.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model +yolo pose predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model +``` -- **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。 -
复现命令 `yolo val segment data=coco.yaml device=0` -- **推理速度**使用 COCO - 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 -
复现命令 `yolo val segment data=coco128-seg.yaml batch=1 device=0|cpu` +See full `predict` mode details in the [Predict](https://docs.ultralytics.com/modes/predict/) page. -
+## Export -
分类 +Export a YOLOv8n Pose model to a different format like ONNX, CoreML, etc. -| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) at 640 | -| -------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | --------------- | ------------------------ | -| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | -| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | -| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | -| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | -| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | +### Python -- **acc** 都在 [ImageNet](https://www.image-net.org/) 数据集上,使用单模型单尺度测试得到。 -
复现命令 `yolo val classify data=path/to/ImageNet device=0` -- **推理速度**使用 ImageNet - 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 -
复现命令 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` +```python +from ultralytics import YOLO -
+# Load a model +model = YOLO("yolov8n-pose.pt") # load an official model +model = YOLO("path/to/best.pt") # load a custom trained -
Pose +# Export the model +model.export(format="onnx") +``` -See [Pose Docs](https://docs.ultralytics.com/tasks/) for usage examples with these models. +### CLI -| Model | size
(pixels) | mAPbox
50-95 | mAPpose
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | 4088.7 | 10.04 | 99.1 | 1066.4 | +```bash +yolo export model=yolov8n-pose.pt format=onnx # export official model +yolo export model=path/to/best.pt format=onnx # export custom trained model +``` -- **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) - dataset. -
Reproduce by `yolo val pose data=coco-pose.yaml device=0` -- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) - instance. -
Reproduce by `yolo val pose data=coco8-pose.yaml batch=1 device=0|cpu` - -
- -##
模块集成
- -
- - -
-
- -
- - - - - - - - - - - -
- -| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv8 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv8 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov8-readme-comet)可让您保存 YOLOv8 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv8 推理的速度最高可提高6倍 | - -##
Ultralytics HUB
- -[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们⭐ **新**的无代码解决方案,用于可视化数据集,训练 YOLOv8🚀 -模型,并以无缝体验方式部署到现实世界。现在开始**免费**! -还可以通过下载 [Ultralytics App](https://ultralytics.com/app_install) 在你的 iOS 或 Android 设备上运行 YOLOv8 模型! - - - - -##
贡献
- -我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv8 做出贡献。请看我们的 [贡献指南](CONTRIBUTING.md) -,并填写 [调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) -向我们发送您的体验反馈。感谢我们所有的贡献者! - - - - - - -##
License
- -YOLOv8 在两种不同的 License 下可用: - -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI - 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 - -##
联系我们
- -请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) -或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv8 错误和请求功能。 - -
-
- - - - - - - - - - - - - - - - - -
+Available YOLOv8-pose export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your model after export completes. + +| Format | `format` Argument | Model | Metadata | +| ------------------------------------------------------------------ | ----------------- | ------------------------------ | -------- | +| [PyTorch](https://pytorch.org/) | - | `yolov8n-pose.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-pose.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-pose.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-pose.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-pose.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-pose.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-pose.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-pose_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-pose_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | + +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/modes/train.md b/docs/modes/train.md index 5074aa3..88f6ff4 100644 --- a/docs/modes/train.md +++ b/docs/modes/train.md @@ -75,7 +75,7 @@ task. | `image_weights` | `False` | use weighted image selection for training | | `rect` | `False` | rectangular training with each batch collated for minimum padding | | `cos_lr` | `False` | use cosine learning rate scheduler | -| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `close_mosaic` | `0` | (int) disable mosaic augmentation for final epochs | | `resume` | `False` | resume training from last checkpoint | | `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | | `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md index fb56208..b6e89ac 100644 --- a/docs/tasks/pose.md +++ b/docs/tasks/pose.md @@ -23,14 +23,14 @@ the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/ [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. -| Model | size
(pixels) | mAPbox
50-95 | mAPpose
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | 4088.7 | 10.04 | 99.1 | 1066.4 | +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 49.7 | 79.7 | 131.8 | 1.18 | 3.3 | 9.2 | +| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 59.2 | 85.8 | 233.2 | 1.42 | 11.6 | 30.2 | +| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 63.6 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | +| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.0 | 89.9 | 784.5 | 2.59 | 44.4 | 168.6 | +| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 68.9 | 90.4 | 1607.1 | 3.73 | 69.4 | 263.2 | +| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.5 | 91.3 | 4088.7 | 10.04 | 99.1 | 1066.4 | - **mAPval** values are for single-model single-scale on [COCO Keypoints val2017](http://cocodataset.org) dataset. diff --git a/docs/usage/cfg.md b/docs/usage/cfg.md index ec648a8..590820a 100644 --- a/docs/usage/cfg.md +++ b/docs/usage/cfg.md @@ -97,7 +97,7 @@ The training settings for YOLO models encompass various hyperparameters and conf | `image_weights` | `False` | use weighted image selection for training | | `rect` | `False` | rectangular training with each batch collated for minimum padding | | `cos_lr` | `False` | use cosine learning rate scheduler | -| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `close_mosaic` | `0` | (int) disable mosaic augmentation for final epochs | | `resume` | `False` | resume training from last checkpoint | | `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | | `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | diff --git a/docs/yolov5/train_custom_data.md b/docs/yolov5/train_custom_data.md index e249861..e0ae263 100644 --- a/docs/yolov5/train_custom_data.md +++ b/docs/yolov5/train_custom_data.md @@ -25,7 +25,7 @@ Creating a custom model to detect your objects is an iterative process of collec YOLOv5 models must be trained on labelled data in order to learn classes of objects in that data. There are two options for creating your dataset before you start training:
-Use Roboflow to manage your dataset in YOLO format +Use Roboflow to create your dataset in YOLO format ### 1.1 Collect Images diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb index 92897c6..950b371 100644 --- a/examples/tutorial.ipynb +++ b/examples/tutorial.ipynb @@ -59,21 +59,21 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "ea235da2-8fb5-4094-9dc2-8523d0800a22" + "outputId": "2ea6e0b9-1a62-4355-c246-5e8b7b1dafff" }, "source": [ "%pip install ultralytics\n", "import ultralytics\n", "ultralytics.checks()" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "Ultralytics YOLOv8.0.57 🚀 Python-3.9.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15102MiB)\n", - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 25.9/166.8 GB disk)\n" + "Ultralytics YOLOv8.0.71 🚀 Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.3/166.8 GB disk)\n" ] } ] @@ -96,24 +96,28 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "fe0a5a26-3bcc-4c1f-e688-cae00ee5b958" + "outputId": "c578afbd-47cd-4d11-beec-8b5c31fcfba8" }, "source": [ "# Run inference on an image with YOLOv8n\n", "!yolo predict model=yolov8n.pt source='https://ultralytics.com/images/zidane.jpg'" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.0.57 🚀 Python-3.9.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15102MiB)\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt to yolov8n.pt...\n", + "100% 6.23M/6.23M [00:00<00:00, 195MB/s]\n", + "Ultralytics YOLOv8.0.71 🚀 Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", "\n", - "Found https://ultralytics.com/images/zidane.jpg locally at zidane.jpg\n", - "image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 14.3ms\n", - "Speed: 0.5ms preprocess, 14.3ms inference, 1.8ms postprocess per image at shape (1, 3, 640, 640)\n" + "Downloading https://ultralytics.com/images/zidane.jpg to zidane.jpg...\n", + "100% 165k/165k [00:00<00:00, 51.7MB/s]\n", + "image 1/1 /content/zidane.jpg: 384x640 2 persons, 1 tie, 60.9ms\n", + "Speed: 0.6ms preprocess, 60.9ms inference, 301.3ms postprocess per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/detect/predict\u001b[0m\n" ] } ] @@ -156,7 +160,7 @@ "cell_type": "code", "metadata": { "id": "X58w8JLpMnjH", - "outputId": "ae2040df-0f95-4701-c680-8bbb7be92bcd", + "outputId": "3e5a9c48-8eba-45eb-d92f-8456cf94b60e", "colab": { "base_uri": "https://localhost:8080/" } @@ -165,26 +169,26 @@ "# Validate YOLOv8n on COCO128 val\n", "!yolo val model=yolov8n.pt data=coco128.yaml" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.0.57 🚀 Python-3.9.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15102MiB)\n", + "Ultralytics YOLOv8.0.71 🚀 Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "YOLOv8n summary (fused): 168 layers, 3151904 parameters, 0 gradients, 8.7 GFLOPs\n", "\n", "Dataset 'coco128.yaml' images not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to /content/datasets/coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 87.2MB/s]\n", + "100% 6.66M/6.66M [00:01<00:00, 6.80MB/s]\n", "Unzipping /content/datasets/coco128.zip to /content/datasets...\n", - "Dataset download success ✅ (0.4s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "Dataset download success ✅ (2.2s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 16.9MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2007.12it/s]\n", + "100% 755k/755k [00:00<00:00, 107MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 80 corrupt: 100% 128/128 [00:00<00:00, 1183.28it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95): 100% 8/8 [00:08<00:00, 1.04s/it]\n", + " Class Images Instances Box(P R mAP50 mAP50-95): 100% 8/8 [00:12<00:00, 1.54s/it]\n", " all 128 929 0.64 0.537 0.605 0.446\n", " person 128 254 0.797 0.677 0.764 0.538\n", " bicycle 128 6 0.514 0.333 0.315 0.264\n", @@ -257,7 +261,7 @@ " scissors 128 1 1 0 0.249 0.0746\n", " teddy bear 128 21 0.877 0.333 0.591 0.394\n", " toothbrush 128 5 0.743 0.6 0.638 0.374\n", - "Speed: 2.9ms preprocess, 6.2ms inference, 0.0ms loss, 5.1ms postprocess per image\n", + "Speed: 5.3ms preprocess, 20.1ms inference, 0.0ms loss, 11.7ms postprocess per image\n", "Results saved to \u001b[1mruns/detect/val\u001b[0m\n" ] } @@ -271,7 +275,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "\n", "Train YOLOv8 on [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/) datasets. See [YOLOv8 Train Docs](https://docs.ultralytics.com/modes/train/) for more information." ] @@ -280,7 +284,7 @@ "cell_type": "code", "metadata": { "id": "1NcFxRcFdJ_O", - "outputId": "fcb5e3da-3766-4c72-97e1-73c1bd2ccbef", + "outputId": "b60a1f74-8035-4f9e-b4b0-604f9cf76231", "colab": { "base_uri": "https://localhost:8080/" } @@ -289,14 +293,14 @@ "# Train YOLOv8n on COCO128 for 3 epochs\n", "!yolo train model=yolov8n.pt data=coco128.yaml epochs=3 imgsz=640" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Ultralytics YOLOv8.0.57 🚀 Python-3.9.16 torch-1.13.1+cu116 CUDA:0 (Tesla T4, 15102MiB)\n", - "\u001b[34m\u001b[1myolo/engine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=True, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, hide_labels=False, hide_conf=False, vid_stride=1, line_thickness=3, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, v5loader=False, tracker=botsort.yaml, save_dir=runs/detect/train\n", + "Ultralytics YOLOv8.0.71 🚀 Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", + "\u001b[34m\u001b[1myolo/engine/trainer: \u001b[0mtask=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=False, optimizer=SGD, verbose=True, seed=0, deterministic=True, single_cls=False, image_weights=False, rect=False, cos_lr=False, close_mosaic=0, resume=False, amp=True, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, line_thickness=3, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, v5loader=False, tracker=botsort.yaml, save_dir=runs/detect/train\n", "\n", " from n params module arguments \n", " 0 -1 1 464 ultralytics.nn.modules.Conv [3, 16, 3, 2] \n", @@ -325,18 +329,13 @@ "Model summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs\n", "\n", "Transferred 355/355 items from pretrained weights\n", - "2023-03-26 14:57:47.224672: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-03-26 14:57:48.209047: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/lib64-nvidia\n", - "2023-03-26 14:57:48.209179: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/local/lib/python3.9/dist-packages/cv2/../../lib64:/usr/lib64-nvidia\n", - "2023-03-26 14:57:48.209199: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/detect/train', view at http://localhost:6006/\n", "\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks with YOLOv8n...\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00(pixels) | mAPbox
50-95 | mAPpose
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | -| ---------------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | -| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | - | 49.7 | 131.8 | 1.18 | 3.3 | 9.2 | -| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | - | 59.2 | 233.2 | 1.42 | 11.6 | 30.2 | -| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | - | 63.6 | 456.3 | 2.00 | 26.4 | 81.0 | -| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | - | 67.0 | 784.5 | 2.59 | 44.4 | 168.6 | -| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | - | 68.9 | 1607.1 | 3.73 | 69.4 | 263.2 | -| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | - | 71.5 | 4088.7 | 10.04 | 99.1 | 1066.4 | +| Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 49.7 | 79.7 | 131.8 | 1.18 | 3.3 | 9.2 | +| [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 59.2 | 85.8 | 233.2 | 1.42 | 11.6 | 30.2 | +| [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 63.6 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | +| [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.0 | 89.9 | 784.5 | 2.59 | 44.4 | 168.6 | +| [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 68.9 | 90.4 | 1607.1 | 3.73 | 69.4 | 263.2 | +| [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.5 | 91.3 | 4088.7 | 10.04 | 99.1 | 1066.4 |
diff --git a/ultralytics/tracker/utils/gmc.py b/ultralytics/tracker/utils/gmc.py index fec09a3..4f268e1 100644 --- a/ultralytics/tracker/utils/gmc.py +++ b/ultralytics/tracker/utils/gmc.py @@ -3,7 +3,6 @@ import copy import cv2 -import matplotlib.pyplot as plt import numpy as np from ultralytics.yolo.utils import LOGGER @@ -205,24 +204,25 @@ class GMC: currPoints = np.array(currPoints) # Draw the keypoint matches on the output image - if 0: - matches_img = np.hstack((self.prevFrame, frame)) - matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) - W = np.size(self.prevFrame, 1) - for m in goodMatches: - prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) - curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) - curr_pt[0] += W - color = np.random.randint(0, 255, 3) - color = (int(color[0]), int(color[1]), int(color[2])) - - matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) - matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) - matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) - - plt.figure() - plt.imshow(matches_img) - plt.show() + # if False: + # import matplotlib.pyplot as plt + # matches_img = np.hstack((self.prevFrame, frame)) + # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) + # W = np.size(self.prevFrame, 1) + # for m in goodMatches: + # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) + # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) + # curr_pt[0] += W + # color = np.random.randint(0, 255, 3) + # color = (int(color[0]), int(color[1]), int(color[2])) + # + # matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) + # matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) + # matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) + # + # plt.figure() + # plt.imshow(matches_img) + # plt.show() # Find rigid matrix if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): diff --git a/ultralytics/yolo/cfg/default.yaml b/ultralytics/yolo/cfg/default.yaml index 302d651..658a64f 100644 --- a/ultralytics/yolo/cfg/default.yaml +++ b/ultralytics/yolo/cfg/default.yaml @@ -28,7 +28,7 @@ single_cls: False # train multi-class data as single-class image_weights: False # use weighted image selection for training rect: False # rectangular training if mode='train' or rectangular validation if mode='val' cos_lr: False # use cosine learning rate scheduler -close_mosaic: 10 # disable mosaic augmentation for final 10 epochs +close_mosaic: 0 # (int) disable mosaic augmentation for final epochs resume: False # resume training from last checkpoint amp: True # Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check # Segmentation diff --git a/ultralytics/yolo/data/build.py b/ultralytics/yolo/data/build.py index b5d3efd..4dd0748 100644 --- a/ultralytics/yolo/data/build.py +++ b/ultralytics/yolo/data/build.py @@ -162,7 +162,18 @@ def check_source(source): def load_inference_source(source=None, transforms=None, imgsz=640, vid_stride=1, stride=32, auto=True): """ - TODO: docs + Loads an inference source for object detection and applies necessary transformations. + + Args: + source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference. + transforms (callable, optional): Custom transformations to be applied to the input source. + imgsz (int, optional): The size of the image for inference. Default is 640. + vid_stride (int, optional): The frame interval for video sources. Default is 1. + stride (int, optional): The model stride. Default is 32. + auto (bool, optional): Automatically apply pre-processing. Default is True. + + Returns: + dataset: A dataset object for the specified input source. """ source, webcam, screenshot, from_img, in_memory, tensor = check_source(source) source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor) @@ -179,7 +190,6 @@ def load_inference_source(source=None, transforms=None, imgsz=640, vid_stride=1, auto=auto, transforms=transforms, vid_stride=vid_stride) - elif screenshot: dataset = LoadScreenshots(source, imgsz=imgsz, stride=stride, auto=auto, transforms=transforms) elif from_img: @@ -192,6 +202,7 @@ def load_inference_source(source=None, transforms=None, imgsz=640, vid_stride=1, transforms=transforms, vid_stride=vid_stride) - setattr(dataset, 'source_type', source_type) # attach source types + # Attach source types to the dataset + setattr(dataset, 'source_type', source_type) return dataset diff --git a/ultralytics/yolo/data/dataset.py b/ultralytics/yolo/data/dataset.py index 0363859..98c03ad 100644 --- a/ultralytics/yolo/data/dataset.py +++ b/ultralytics/yolo/data/dataset.py @@ -77,7 +77,6 @@ class YOLODataset(BaseDataset): nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f'{self.prefix}Scanning {path.parent / path.stem}...' total = len(self.im_files) - nc = len(self.data['names']) nkpt, ndim = self.data.get('kpt_shape', (0, 0)) if self.use_keypoints and (nkpt <= 0 or ndim not in (2, 3)): raise ValueError("'kpt_shape' in data.yaml missing or incorrect. Should be a list with [number of " diff --git a/ultralytics/yolo/engine/model.py b/ultralytics/yolo/engine/model.py index 73fa49b..3469c2b 100644 --- a/ultralytics/yolo/engine/model.py +++ b/ultralytics/yolo/engine/model.py @@ -253,10 +253,10 @@ class YOLO: source (str, optional): The input source for object tracking. Can be a file path or a video stream. stream (bool, optional): Whether the input source is a video stream. Defaults to False. persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False. - **kwargs: Additional keyword arguments for the tracking process. + **kwargs (optional): Additional keyword arguments for the tracking process. Returns: - object: The tracking results. + (List[ultralytics.yolo.engine.results.Results]): The tracking results. """ if not hasattr(self.predictor, 'trackers'): diff --git a/ultralytics/yolo/engine/results.py b/ultralytics/yolo/engine/results.py index fb51304..b50ddc0 100644 --- a/ultralytics/yolo/engine/results.py +++ b/ultralytics/yolo/engine/results.py @@ -244,8 +244,7 @@ class Boxes(BaseTensor): orig_shape (tuple): Original image size, in the format (height, width). Attributes: - boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes, - with shape (num_boxes, 6). + boxes (torch.Tensor) or (numpy.ndarray): The detection boxes with shape (num_boxes, 6). orig_shape (torch.Tensor) or (numpy.ndarray): Original image size, in the format (height, width). is_track (bool): True if the boxes also include track IDs, False otherwise. @@ -272,7 +271,6 @@ class Boxes(BaseTensor): boxes = boxes[None, :] n = boxes.shape[-1] assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, (track_id), conf, cls - # TODO self.is_track = n == 7 self.boxes = boxes self.orig_shape = torch.as_tensor(orig_shape, device=boxes.device) if isinstance(boxes, torch.Tensor) \ diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py index 4c527a4..aec7e97 100644 --- a/ultralytics/yolo/utils/__init__.py +++ b/ultralytics/yolo/utils/__init__.py @@ -102,6 +102,7 @@ np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab class SimpleClass: