From 3fd317edfd2fdd925f919b4e736af5f96aee27e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 May 2023 22:20:05 +0200 Subject: [PATCH] `ultralytics 8.0.91` tracker fix and docs comments (#2343) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/workflows/ci.yaml | 28 +--- .github/workflows/codeql.yaml | 41 +++++ .github/workflows/greetings.yml | 2 +- .github/workflows/links.yml | 40 ++--- CONTRIBUTING.md | 4 +- docs/app.md | 4 + docs/help/FAQ.md | 4 + docs/help/code_of_conduct.md | 4 + docs/help/contributing.md | 4 + docs/help/index.md | 4 + docs/help/minimum_reproducible_example.md | 4 + docs/hub.md | 4 + docs/index.md | 4 + docs/inference_api.md | 4 + docs/models/index.md | 156 ++---------------- docs/models/sam.md | 36 ++++ docs/models/yolov3.md | 7 + docs/models/yolov5.md | 41 +++++ docs/models/yolov8.md | 67 ++++++++ docs/modes/benchmark.md | 4 + docs/modes/export.md | 4 + docs/modes/index.md | 4 + docs/modes/predict.md | 4 + docs/modes/track.md | 4 + docs/modes/train.md | 8 + docs/modes/val.md | 4 + docs/overrides/partials/comments.html | 50 ++++++ docs/quickstart.md | 4 + docs/tasks/classify.md | 4 + docs/tasks/detect.md | 4 + docs/tasks/index.md | 4 + docs/tasks/pose.md | 4 + docs/tasks/segment.md | 4 + docs/usage/callbacks.md | 6 +- docs/usage/cfg.md | 4 + docs/usage/cli.md | 4 + docs/usage/engine.md | 4 + docs/usage/hyperparameter_tuning.md | 4 + docs/usage/python.md | 4 + .../environments/aws_quickstart_tutorial.md | 4 + .../docker_image_quickstart_tutorial.md | 4 + .../google_cloud_quickstart_tutorial.md | 4 + docs/yolov5/index.md | 6 +- docs/yolov5/quickstart_tutorial.md | 4 + .../tutorials/architecture_description.md | 4 + .../tutorials/clearml_logging_integration.md | 4 + .../tutorials/comet_logging_integration.md | 4 + .../tutorials/hyperparameter_evolution.md | 4 + docs/yolov5/tutorials/model_ensembling.md | 4 + docs/yolov5/tutorials/model_export.md | 4 + .../tutorials/model_pruning_and_sparsity.md | 4 + docs/yolov5/tutorials/multi_gpu_training.md | 4 + .../neural_magic_pruning_quantization.md | 8 +- .../tutorials/pytorch_hub_model_loading.md | 4 + .../roboflow_datasets_integration.md | 4 + .../tutorials/running_on_jetson_nano.md | 4 + .../tutorials/test_time_augmentation.md | 4 + .../tips_for_best_training_results.md | 4 + docs/yolov5/tutorials/train_custom_data.md | 4 + .../transfer_learning_with_frozen_layers.md | 4 + mkdocs.yml | 142 ++++++++-------- tests/test_python.py | 10 ++ ultralytics/__init__.py | 2 +- ultralytics/tracker/track.py | 3 +- 65 files changed, 583 insertions(+), 256 deletions(-) create mode 100644 .github/workflows/codeql.yaml create mode 100644 docs/models/sam.md create mode 100644 docs/models/yolov3.md create mode 100644 docs/models/yolov5.md create mode 100644 docs/models/yolov8.md create mode 100644 docs/overrides/partials/comments.html diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 632cec3..d611ca7 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -62,7 +62,7 @@ body: label: Minimal Reproducible Example description: > When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. - This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + This is referred to by community members as creating a [minimal reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). placeholder: | ``` # Code to reproduce your issue here diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 43972f6..04717be 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -56,14 +56,6 @@ jobs: hub.reset_model(model_id) model = YOLO('https://hub.ultralytics.com/models/' + model_id) model.train() - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.23.0 - with: - payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_HUBWEB }} Benchmarks: runs-on: ${{ matrix.os }} @@ -124,14 +116,6 @@ jobs: run: | cat benchmarks.log echo "$(cat benchmarks.log)" >> $GITHUB_STEP_SUMMARY - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.23.0 - with: - payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} Tests: timeout-minutes: 60 @@ -209,11 +193,17 @@ jobs: - name: Pytest tests shell: bash # for Windows compatibility run: pytest tests - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') + + Summary: + runs-on: ubuntu-latest + needs: [HUB, Benchmarks, Tests] # Add job names that you want to check for failure + if: always() # This ensures the job runs even if previous jobs fail + steps: + - name: Check for failure and notify + if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') uses: slackapi/slack-github-action@v1.23.0 with: payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml new file mode 100644 index 0000000..526f5e4 --- /dev/null +++ b/.github/workflows/codeql.yaml @@ -0,0 +1,41 @@ +# Ultralytics YOLO πŸš€, AGPL-3.0 license + +name: "CodeQL" + +on: + schedule: + - cron: '0 0 1 * *' + +jobs: + analyze: + name: Analyze + runs-on: ${{ 'ubuntu-latest' }} + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: security-extended,security-and-quality + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 37d08f3..9813f33 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -28,7 +28,7 @@ jobs: issue-message: | πŸ‘‹ Hello @${{ github.actor }}, thank you for your interest in YOLOv8 πŸš€! We recommend a visit to the [YOLOv8 Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered. - If this is a πŸ› Bug Report, please provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us debug it. + If this is a πŸ› Bug Report, please provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us debug it. If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results/). diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 983c94e..7d3d4c4 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,13 +1,10 @@ # Ultralytics YOLO πŸš€, AGPL-3.0 license -# YOLO Continuous Integration (CI) GitHub Actions tests +# YOLO Continuous Integration (CI) GitHub Actions tests broken link checker +# Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) name: Check Broken links on: - push: - branches: [main] - pull_request: - branches: [main] workflow_dispatch: schedule: - cron: '0 0 * * *' # runs at 00:00 UTC every day @@ -18,21 +15,26 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Test Markdown and HTML links - uses: lycheeverse/lychee-action@v1.7.0 + - name: Download and install lychee + run: | + LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) + curl -L $LYCHEE_URL -o lychee.tar.gz + tar xzf lychee.tar.gz + sudo mv lychee /usr/local/bin + + - name: Test Markdown and HTML links with retry + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail './**/*.md' './**/*.html' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - - name: Test Markdown, HTML, YAML, Python and Notebook links + - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' - uses: lycheeverse/lychee-action@v1.7.0 + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 773f4ed..9326906 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,7 +88,7 @@ short guidelines below to help users provide what we need in order to get starte When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces the problem should be: - βœ… **Minimal** – Use as little code as possible that still produces the same problem @@ -106,7 +106,7 @@ should be: If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the πŸ› **Bug Report** [template](https://github.com/ultralytics/ultralytics/issues/new/choose) and providing -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. ## License diff --git a/docs/app.md b/docs/app.md index 8aaf686..b95d338 100644 --- a/docs/app.md +++ b/docs/app.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics HUB App for YOLOv8 diff --git a/docs/help/FAQ.md b/docs/help/FAQ.md index e2aff1f..e4caa83 100644 --- a/docs/help/FAQ.md +++ b/docs/help/FAQ.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics YOLO Frequently Asked Questions (FAQ) This FAQ section addresses some common questions and issues users might encounter while working with Ultralytics YOLO repositories. diff --git a/docs/help/code_of_conduct.md b/docs/help/code_of_conduct.md index b88a5ad..ba574f1 100644 --- a/docs/help/code_of_conduct.md +++ b/docs/help/code_of_conduct.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics Contributor Covenant Code of Conduct ## Our Pledge diff --git a/docs/help/contributing.md b/docs/help/contributing.md index cf1abd1..4aced9b 100644 --- a/docs/help/contributing.md +++ b/docs/help/contributing.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Contributing to Ultralytics Open-Source YOLO Repositories First of all, thank you for your interest in contributing to Ultralytics open-source YOLO repositories! Your contributions will help improve the project and benefit the community. This document provides guidelines and best practices for contributing to Ultralytics YOLO repositories. diff --git a/docs/help/index.md b/docs/help/index.md index c859d07..ed4ab10 100644 --- a/docs/help/index.md +++ b/docs/help/index.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Welcome to the Ultralytics Help page! We are committed to providing you with comprehensive resources to make your experience with Ultralytics YOLO repositories as smooth and enjoyable as possible. On this page, you'll find essential links to guides and documents that will help you navigate through common tasks and address any questions you might have while using our repositories. - [Frequently Asked Questions (FAQ)](FAQ.md): Find answers to common questions and issues faced by users and contributors of Ultralytics YOLO repositories. diff --git a/docs/help/minimum_reproducible_example.md b/docs/help/minimum_reproducible_example.md index 813194c..b547f1b 100644 --- a/docs/help/minimum_reproducible_example.md +++ b/docs/help/minimum_reproducible_example.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Creating a Minimum Reproducible Example for Bug Reports in Ultralytics YOLO Repositories When submitting a bug report for Ultralytics YOLO repositories, it's essential to provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) (MRE). An MRE is a small, self-contained piece of code that demonstrates the problem you're experiencing. Providing an MRE helps maintainers and contributors understand the issue and work on a fix more efficiently. This guide explains how to create an MRE when submitting bug reports to Ultralytics YOLO repositories. diff --git a/docs/hub.md b/docs/hub.md index 2688836..d93696f 100644 --- a/docs/hub.md +++ b/docs/hub.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics HUB diff --git a/docs/index.md b/docs/index.md index ca2b3c2..e45b022 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,3 +1,7 @@ +--- +comments: true +--- +

diff --git a/docs/inference_api.md b/docs/inference_api.md index 9ce5f8a..5a4d146 100644 --- a/docs/inference_api.md +++ b/docs/inference_api.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # YOLO Inference API (UNDER CONSTRUCTION) The YOLO Inference API allows you to access the YOLOv8 object detection capabilities via a RESTful API. This enables you to run object detection on images without the need to install and set up the YOLOv8 environment locally. diff --git a/docs/models/index.md b/docs/models/index.md index 8a2e3e3..a10ea2e 100644 --- a/docs/models/index.md +++ b/docs/models/index.md @@ -1,14 +1,27 @@ -Ultralytics supports many models and architectures with more to come in the future. What to add your model architecture? [Here's](../help/contributing.md) how you can contribute +--- +comments: true +--- + +# Models + +Ultralytics supports many models and architectures with more to come in the future. Want to add your model architecture? [Here's](../help/contributing.md) how you can contribute. + +In this documentation, we provide information on four major models: + +1. [YOLOv3](./yolov3.md): The third iteration of the YOLO model family, known for its efficient real-time object detection capabilities. +2. [YOLOv5](./yolov5.md): An improved version of the YOLO architecture, offering better performance and speed tradeoffs compared to previous versions. +3. [YOLOv8](./yolov8.md): The latest version of the YOLO family, featuring enhanced capabilities such as instance segmentation, pose/keypoints estimation, and classification. +4. [Segment Anything Model (SAM)](./sam.md): Meta's Segment Anything Model (SAM). +You can use these models directly in the Command Line Interface (CLI) or in a Python environment. Below are examples of how to use the models with CLI and Python: -## YOLO -Model *.yaml files may be used directly in the Command Line Interface (CLI) with a yolo command: +## CLI Example ```bash yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100 ``` -They may also be used directly in a Python environment, and accepts the same arguments as in the CLI example above: +## Python Example ```python from ultralytics import YOLO @@ -19,137 +32,4 @@ model.info() # display model information model.train(data="coco128.yaml", epochs=100) # train the model ``` -## YOLOv8 - -### About - - -### Supported Tasks - -| Model Type | Pre-trained Weights | Task | -|-------------|------------------------------------------------------------------------------------------------------------------|-----------------------| -| YOLOv8 | `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt` | Detection | -| YOLOv8-seg | `yolov8n-seg.pt`, `yolov8s-seg.pt`, `yolov8m-seg.pt`, `yolov8l-seg.pt`, `yolov8x-seg.pt` | Instance Segmentation | -| YOLOv8-pose | `yolov8n-pose.pt`, `yolov8s-pose.pt`, `yolov8m-pose.pt`, `yolov8l-pose.pt`, `yolov8x-pose.pt` ,`yolov8x-pose-p6` | Pose/Keypoints | -| YOLOv8-cls | `yolov8n-cls.pt`, `yolov8s-cls.pt`, `yolov8m-cls.pt`, `yolov8l-cls.pt`, `yolov8x-cls.pt` | Classification | - -### Supported Modes -| Mode | Supported | -|------------|--------------------| -| Inference | :heavy_check_mark: | -| Validation | :heavy_check_mark: | -| Training | :heavy_check_mark: | - -??? Performance - - === "Detection" - - | Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | - | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | - | [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | - | [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | - | [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | - | [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | - | [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | - - === "Segmentation" - - | Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | - | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | - | [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | - | [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | - | [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | - | [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | - | [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | - - === "Classification" - - | Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | - | -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | - | [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | - | [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | - | [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | - | [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | - | [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | - - === "Pose" - - | Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | - | ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | - | [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | - | [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | - | [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | - | [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | - | [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | - | [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 | - - -## YOLOv5u - -### About -Anchor-free YOLOv5 models with improved accuracy-speed tradeoff. - -### Supported Tasks - -| Model Type | Pre-trained Weights | Task | -|------------|-----------------------------------------------------------------------------------------------------------------------------|-----------| -| YOLOv5u | `yolov5nu`, `yolov5su`, `yolov5mu`, `yolov5lu`, `yolov5xu`, `yolov5n6u`, `yolov5s6u`, `yolov5m6u`, `yolov5l6u`, `yolov5x6u` | Detection | - -### Supported Modes -| Mode | Supported | -|------------|--------------------| -| Inference | :heavy_check_mark: | -| Validation | :heavy_check_mark: | -| Training | :heavy_check_mark: | - - -??? Performance - - - === "Detection" - - | Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | - | ---------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | - | [YOLOv5nu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5nu.pt) | 640 | 34.3 | 73.6 | 1.06 | 2.6 | 7.7 | - | [YOLOv5su](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5su.pt) | 640 | 43.0 | 120.7 | 1.27 | 9.1 | 24.0 | - | [YOLOv5mu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5mu.pt) | 640 | 49.0 | 233.9 | 1.86 | 25.1 | 64.2 | - | [YOLOv5lu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5lu.pt) | 640 | 52.2 | 408.4 | 2.50 | 53.2 | 135.0 | - | [YOLOv5xu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5xu.pt) | 640 | 53.2 | 763.2 | 3.81 | 97.2 | 246.4 | - | | | | | | | | - | [YOLOv5n6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5n6u.pt) | 1280 | 42.1 | - | - | 4.3 | 7.8 | - | [YOLOv5s6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5s6u.pt) | 1280 | 48.6 | - | - | 15.3 | 24.6 | - | [YOLOv5m6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5m6u.pt) | 1280 | 53.6 | - | - | 41.2 | 65.7 | - | [YOLOv5l6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5l6u.pt) | 1280 | 55.7 | - | - | 86.1 | 137.4 | - | [YOLOv5x6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5x6u.pt) | 1280 | 56.8 | - | - | 155.4 | 250.7 | - ---- -## Vision Transformers -Vit models currently support Python environment: - -```python -from ultralytics.vit import SAM -# from ultralytics.vit import MODEL_TYPe - -model = SAM("sam_b.pt") -model.info() # display model information -model.predict(...) # train the model -``` - -## Segment Anything - -### About - - -### Supported Tasks - -| Model Type | Pre-trained Weights | Tasks Supported | -|------------|---------------------|-----------------------| -| sam base | `sam_b.pt` | Instance Segmentation | -| sam large | `sam_l.pt` | Instance Segmentation | - -### Supported Modes -| Mode | Supported | -|------------|--------------------| -| Inference | :heavy_check_mark: | -| Validation | :x: | -| Training | :x: | +For more details on each model, their supported tasks, modes, and performance, please visit their respective documentation pages linked above. \ No newline at end of file diff --git a/docs/models/sam.md b/docs/models/sam.md new file mode 100644 index 0000000..e503fe9 --- /dev/null +++ b/docs/models/sam.md @@ -0,0 +1,36 @@ +--- +comments: true +--- + +# Vision Transformers + +Vit models currently support Python environment: + +```python +from ultralytics.vit import SAM + +# from ultralytics.vit import MODEL_TYPe + +model = SAM("sam_b.pt") +model.info() # display model information +model.predict(...) # train the model +``` + +# Segment Anything + +## About + +## Supported Tasks + +| Model Type | Pre-trained Weights | Tasks Supported | +|------------|---------------------|-----------------------| +| sam base | `sam_b.pt` | Instance Segmentation | +| sam large | `sam_l.pt` | Instance Segmentation | + +## Supported Modes + +| Mode | Supported | +|------------|--------------------| +| Inference | :heavy_check_mark: | +| Validation | :x: | +| Training | :x: | diff --git a/docs/models/yolov3.md b/docs/models/yolov3.md new file mode 100644 index 0000000..7b7f76d --- /dev/null +++ b/docs/models/yolov3.md @@ -0,0 +1,7 @@ +--- +comments: true +--- + +# 🚧Page Under Construction βš’ + +This page is currently under construction!οΈπŸ‘·Please check back later for updates. πŸ˜ƒπŸ”œ diff --git a/docs/models/yolov5.md b/docs/models/yolov5.md new file mode 100644 index 0000000..5419025 --- /dev/null +++ b/docs/models/yolov5.md @@ -0,0 +1,41 @@ +--- +comments: true +--- + +# YOLOv5u + +## About + +Anchor-free YOLOv5 models with improved accuracy-speed tradeoff. + +## Supported Tasks + +| Model Type | Pre-trained Weights | Task | +|------------|-----------------------------------------------------------------------------------------------------------------------------|-----------| +| YOLOv5u | `yolov5nu`, `yolov5su`, `yolov5mu`, `yolov5lu`, `yolov5xu`, `yolov5n6u`, `yolov5s6u`, `yolov5m6u`, `yolov5l6u`, `yolov5x6u` | Detection | + +## Supported Modes + +| Mode | Supported | +|------------|--------------------| +| Inference | :heavy_check_mark: | +| Validation | :heavy_check_mark: | +| Training | :heavy_check_mark: | + +??? Performance + + === "Detection" + + | Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | + | ---------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | + | [YOLOv5nu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5nu.pt) | 640 | 34.3 | 73.6 | 1.06 | 2.6 | 7.7 | + | [YOLOv5su](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5su.pt) | 640 | 43.0 | 120.7 | 1.27 | 9.1 | 24.0 | + | [YOLOv5mu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5mu.pt) | 640 | 49.0 | 233.9 | 1.86 | 25.1 | 64.2 | + | [YOLOv5lu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5lu.pt) | 640 | 52.2 | 408.4 | 2.50 | 53.2 | 135.0 | + | [YOLOv5xu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5xu.pt) | 640 | 53.2 | 763.2 | 3.81 | 97.2 | 246.4 | + | | | | | | | | + | [YOLOv5n6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5n6u.pt) | 1280 | 42.1 | - | - | 4.3 | 7.8 | + | [YOLOv5s6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5s6u.pt) | 1280 | 48.6 | - | - | 15.3 | 24.6 | + | [YOLOv5m6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5m6u.pt) | 1280 | 53.6 | - | - | 41.2 | 65.7 | + | [YOLOv5l6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5l6u.pt) | 1280 | 55.7 | - | - | 86.1 | 137.4 | + | [YOLOv5x6u](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5x6u.pt) | 1280 | 56.8 | - | - | 155.4 | 250.7 | diff --git a/docs/models/yolov8.md b/docs/models/yolov8.md new file mode 100644 index 0000000..86faf57 --- /dev/null +++ b/docs/models/yolov8.md @@ -0,0 +1,67 @@ +--- +comments: true +--- + +# YOLOv8 + +## About + +## Supported Tasks + +| Model Type | Pre-trained Weights | Task | +|-------------|------------------------------------------------------------------------------------------------------------------|-----------------------| +| YOLOv8 | `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt` | Detection | +| YOLOv8-seg | `yolov8n-seg.pt`, `yolov8s-seg.pt`, `yolov8m-seg.pt`, `yolov8l-seg.pt`, `yolov8x-seg.pt` | Instance Segmentation | +| YOLOv8-pose | `yolov8n-pose.pt`, `yolov8s-pose.pt`, `yolov8m-pose.pt`, `yolov8l-pose.pt`, `yolov8x-pose.pt` ,`yolov8x-pose-p6` | Pose/Keypoints | +| YOLOv8-cls | `yolov8n-cls.pt`, `yolov8s-cls.pt`, `yolov8m-cls.pt`, `yolov8l-cls.pt`, `yolov8x-cls.pt` | Classification | + +## Supported Modes + +| Mode | Supported | +|------------|--------------------| +| Inference | :heavy_check_mark: | +| Validation | :heavy_check_mark: | +| Training | :heavy_check_mark: | + +??? Performance + + === "Detection" + + | Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | + | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | + | [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | + | [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | + | [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | + | [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | + | [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + + === "Segmentation" + + | Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | + | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | + | [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | + | [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | + | [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | + | [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | + | [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + + === "Classification" + + | Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | + | -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | + | [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | + | [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | + | [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | + | [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | + | [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + + === "Pose" + + | Model | size
(pixels) | mAPpose
50-95 | mAPpose
50 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | + | ---------------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | + | [YOLOv8n-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-pose.pt) | 640 | 50.4 | 80.1 | 131.8 | 1.18 | 3.3 | 9.2 | + | [YOLOv8s-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-pose.pt) | 640 | 60.0 | 86.2 | 233.2 | 1.42 | 11.6 | 30.2 | + | [YOLOv8m-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-pose.pt) | 640 | 65.0 | 88.8 | 456.3 | 2.00 | 26.4 | 81.0 | + | [YOLOv8l-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-pose.pt) | 640 | 67.6 | 90.0 | 784.5 | 2.59 | 44.4 | 168.6 | + | [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 | + | [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 | diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md index cc938ed..2a9ac9b 100644 --- a/docs/modes/benchmark.md +++ b/docs/modes/benchmark.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + **Benchmark mode** is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks diff --git a/docs/modes/export.md b/docs/modes/export.md index f454466..a6b1b79 100644 --- a/docs/modes/export.md +++ b/docs/modes/export.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + **Export mode** is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the diff --git a/docs/modes/index.md b/docs/modes/index.md index 1ca2383..8292fdd 100644 --- a/docs/modes/index.md +++ b/docs/modes/index.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics YOLOv8 Modes diff --git a/docs/modes/predict.md b/docs/modes/predict.md index b252d43..f997bf2 100644 --- a/docs/modes/predict.md +++ b/docs/modes/predict.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + YOLOv8 **predict mode** can generate predictions for various tasks, returning either a list of `Results` objects or a diff --git a/docs/modes/track.md b/docs/modes/track.md index 8058f38..a0d7999 100644 --- a/docs/modes/track.md +++ b/docs/modes/track.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Object tracking is a task that involves identifying the location and class of objects, then assigning a unique ID to diff --git a/docs/modes/train.md b/docs/modes/train.md index d175c5f..a5c9de1 100644 --- a/docs/modes/train.md +++ b/docs/modes/train.md @@ -1,3 +1,11 @@ +--- +comments: true +--- + +--- +comments: true +--- + **Train mode** is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the diff --git a/docs/modes/val.md b/docs/modes/val.md index b0a866d..dee4344 100644 --- a/docs/modes/val.md +++ b/docs/modes/val.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + **Val mode** is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a diff --git a/docs/overrides/partials/comments.html b/docs/overrides/partials/comments.html new file mode 100644 index 0000000..ff1455b --- /dev/null +++ b/docs/overrides/partials/comments.html @@ -0,0 +1,50 @@ +{% if page.meta.comments %} +

{{ lang.t("meta.comments") }}

+ + + + + + +{% endif %} diff --git a/docs/quickstart.md b/docs/quickstart.md index af00fcd..d31972d 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + ## Install Install YOLOv8 via the `ultralytics` pip package for the latest stable release or by cloning diff --git a/docs/tasks/classify.md b/docs/tasks/classify.md index 35d5149..4ce4bb8 100644 --- a/docs/tasks/classify.md +++ b/docs/tasks/classify.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Image classification is the simplest of the three tasks and involves classifying an entire image into one of a set of predefined classes. diff --git a/docs/tasks/detect.md b/docs/tasks/detect.md index 5e19be9..2e8952a 100644 --- a/docs/tasks/detect.md +++ b/docs/tasks/detect.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Object detection is a task that involves identifying the location and class of objects in an image or video stream. diff --git a/docs/tasks/index.md b/docs/tasks/index.md index 47bbd39..2077118 100644 --- a/docs/tasks/index.md +++ b/docs/tasks/index.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Ultralytics YOLOv8 Tasks YOLOv8 is an AI framework that supports multiple computer vision **tasks**. The framework can be used to diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md index bfd463f..275870a 100644 --- a/docs/tasks/pose.md +++ b/docs/tasks/pose.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Pose estimation is a task that involves identifying the location of specific points in an image, usually referred to as keypoints. The keypoints can represent various parts of the object such as joints, landmarks, or other distinctive features. The locations of the keypoints are usually represented as a set of 2D `[x, y]` or 3D `[x, y, visible]` diff --git a/docs/tasks/segment.md b/docs/tasks/segment.md index e9ec13e..5612a8c 100644 --- a/docs/tasks/segment.md +++ b/docs/tasks/segment.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Instance segmentation goes a step further than object detection and involves identifying individual objects in an image and segmenting them from the rest of the image. diff --git a/docs/usage/callbacks.md b/docs/usage/callbacks.md index bea6ece..1a11fcc 100644 --- a/docs/usage/callbacks.md +++ b/docs/usage/callbacks.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + ## Callbacks Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. @@ -13,7 +17,7 @@ In this example, we want to return the original frame with each result object. H ```python def on_predict_batch_end(predictor): # Retrieve the batch data - _, _, im0s, _, _ = predictor.batch + _, im0s, _, _ = predictor.batch # Ensure that im0s is a list im0s = im0s if isinstance(im0s, list) else [im0s] diff --git a/docs/usage/cfg.md b/docs/usage/cfg.md index 7134631..faeb197 100644 --- a/docs/usage/cfg.md +++ b/docs/usage/cfg.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + YOLO settings and hyperparameters play a critical role in the model's performance, speed, and accuracy. These settings and hyperparameters can affect the model's behavior at various stages of the model development process, including training, validation, and prediction. diff --git a/docs/usage/cli.md b/docs/usage/cli.md index 79183d4..ebfab8f 100644 --- a/docs/usage/cli.md +++ b/docs/usage/cli.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Command Line Interface Usage The YOLO command line interface (CLI) allows for simple single-line commands without the need for a Python environment. diff --git a/docs/usage/engine.md b/docs/usage/engine.md index 205ebc0..1bb823f 100644 --- a/docs/usage/engine.md +++ b/docs/usage/engine.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine executors. Let's take a look at the Trainer engine. diff --git a/docs/usage/hyperparameter_tuning.md b/docs/usage/hyperparameter_tuning.md index e261785..c69f574 100644 --- a/docs/usage/hyperparameter_tuning.md +++ b/docs/usage/hyperparameter_tuning.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Hyperparameter Tuning with Ray Tune and YOLOv8 Hyperparameter tuning (or hyperparameter optimization) is the process of determining the right combination of hyperparameters that maximizes model performance. It works by running multiple trials in a single training process, evaluating the performance of each trial, and selecting the best hyperparameter values based on the evaluation results. diff --git a/docs/usage/python.md b/docs/usage/python.md index 867835e..25f2448 100644 --- a/docs/usage/python.md +++ b/docs/usage/python.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Python Usage Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into diff --git a/docs/yolov5/environments/aws_quickstart_tutorial.md b/docs/yolov5/environments/aws_quickstart_tutorial.md index 9a37406..72dc714 100644 --- a/docs/yolov5/environments/aws_quickstart_tutorial.md +++ b/docs/yolov5/environments/aws_quickstart_tutorial.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # YOLOv5 πŸš€ on AWS Deep Learning Instance: A Comprehensive Guide This guide will help new users run YOLOv5 on an Amazon Web Services (AWS) Deep Learning instance. AWS offers a [Free Tier](https://aws.amazon.com/free/) and a [credit program](https://aws.amazon.com/activate/) for a quick and affordable start. diff --git a/docs/yolov5/environments/docker_image_quickstart_tutorial.md b/docs/yolov5/environments/docker_image_quickstart_tutorial.md index f423b51..0e408f5 100644 --- a/docs/yolov5/environments/docker_image_quickstart_tutorial.md +++ b/docs/yolov5/environments/docker_image_quickstart_tutorial.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Get Started with YOLOv5 πŸš€ in Docker This tutorial will guide you through the process of setting up and running YOLOv5 in a Docker container. diff --git a/docs/yolov5/environments/google_cloud_quickstart_tutorial.md b/docs/yolov5/environments/google_cloud_quickstart_tutorial.md index b0ea582..bec7526 100644 --- a/docs/yolov5/environments/google_cloud_quickstart_tutorial.md +++ b/docs/yolov5/environments/google_cloud_quickstart_tutorial.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # Run YOLOv5 πŸš€ on Google Cloud Platform (GCP) Deep Learning Virtual Machine (VM) ⭐ This tutorial will guide you through the process of setting up and running YOLOv5 on a GCP Deep Learning VM. New GCP users are eligible for a [$300 free credit offer](https://cloud.google.com/free/docs/gcp-free-tier#free-trial). diff --git a/docs/yolov5/index.md b/docs/yolov5/index.md index 0e5c2db..92ef9cd 100644 --- a/docs/yolov5/index.md +++ b/docs/yolov5/index.md @@ -1,4 +1,8 @@ -# YOLOv5 Docs +--- +comments: true +--- + +# Ultralytics YOLOv5

diff --git a/docs/yolov5/quickstart_tutorial.md b/docs/yolov5/quickstart_tutorial.md index ff9211a..01bc539 100644 --- a/docs/yolov5/quickstart_tutorial.md +++ b/docs/yolov5/quickstart_tutorial.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # YOLOv5 Quickstart See below for quickstart examples. diff --git a/docs/yolov5/tutorials/architecture_description.md b/docs/yolov5/tutorials/architecture_description.md index 9b9e655..3781d0f 100644 --- a/docs/yolov5/tutorials/architecture_description.md +++ b/docs/yolov5/tutorials/architecture_description.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + ## 1. Model Structure YOLOv5 (v6.0/6.1) consists of: diff --git a/docs/yolov5/tutorials/clearml_logging_integration.md b/docs/yolov5/tutorials/clearml_logging_integration.md index 5c24ca4..306e565 100644 --- a/docs/yolov5/tutorials/clearml_logging_integration.md +++ b/docs/yolov5/tutorials/clearml_logging_integration.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # ClearML Integration Clear|MLClear|ML diff --git a/docs/yolov5/tutorials/comet_logging_integration.md b/docs/yolov5/tutorials/comet_logging_integration.md index 2a08906..5f7fd08 100644 --- a/docs/yolov5/tutorials/comet_logging_integration.md +++ b/docs/yolov5/tutorials/comet_logging_integration.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # YOLOv5 with Comet diff --git a/docs/yolov5/tutorials/hyperparameter_evolution.md b/docs/yolov5/tutorials/hyperparameter_evolution.md index 02c7225..bcbbefa 100644 --- a/docs/yolov5/tutorials/hyperparameter_evolution.md +++ b/docs/yolov5/tutorials/hyperparameter_evolution.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + πŸ“š This guide explains **hyperparameter evolution** for YOLOv5 πŸš€. Hyperparameter evolution is a method of [Hyperparameter Optimization](https://en.wikipedia.org/wiki/Hyperparameter_optimization) using a [Genetic Algorithm](https://en.wikipedia.org/wiki/Genetic_algorithm) (GA) for optimization. UPDATED 25 September 2022. Hyperparameters in ML control various aspects of training, and finding optimal values for them can be a challenge. Traditional methods like grid searches can quickly become intractable due to 1) the high dimensional search space 2) unknown correlations among the dimensions, and 3) expensive nature of evaluating the fitness at each point, making GA a suitable candidate for hyperparameter searches. diff --git a/docs/yolov5/tutorials/model_ensembling.md b/docs/yolov5/tutorials/model_ensembling.md index c75a73b..caf7a2f 100644 --- a/docs/yolov5/tutorials/model_ensembling.md +++ b/docs/yolov5/tutorials/model_ensembling.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + πŸ“š This guide explains how to use YOLOv5 πŸš€ **model ensembling** during testing and inference for improved mAP and Recall. UPDATED 25 September 2022. diff --git a/docs/yolov5/tutorials/model_export.md b/docs/yolov5/tutorials/model_export.md index c01816a..1c562dd 100644 --- a/docs/yolov5/tutorials/model_export.md +++ b/docs/yolov5/tutorials/model_export.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + # TFLite, ONNX, CoreML, TensorRT Export πŸ“š This guide explains how to export a trained YOLOv5 πŸš€ model from PyTorch to ONNX and TorchScript formats. diff --git a/docs/yolov5/tutorials/model_pruning_and_sparsity.md b/docs/yolov5/tutorials/model_pruning_and_sparsity.md index f763808..1848ebe 100644 --- a/docs/yolov5/tutorials/model_pruning_and_sparsity.md +++ b/docs/yolov5/tutorials/model_pruning_and_sparsity.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + πŸ“š This guide explains how to apply **pruning** to YOLOv5 πŸš€ models. UPDATED 25 September 2022. diff --git a/docs/yolov5/tutorials/multi_gpu_training.md b/docs/yolov5/tutorials/multi_gpu_training.md index f9d9b05..4f81d27 100644 --- a/docs/yolov5/tutorials/multi_gpu_training.md +++ b/docs/yolov5/tutorials/multi_gpu_training.md @@ -1,3 +1,7 @@ +--- +comments: true +--- + πŸ“š This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 πŸš€ on single or multiple machine(s). UPDATED 25 December 2022. diff --git a/docs/yolov5/tutorials/neural_magic_pruning_quantization.md b/docs/yolov5/tutorials/neural_magic_pruning_quantization.md index 23283f7..b709652 100644 --- a/docs/yolov5/tutorials/neural_magic_pruning_quantization.md +++ b/docs/yolov5/tutorials/neural_magic_pruning_quantization.md @@ -1,3 +1,7 @@ +--- +comments: true +--- +