From 20f5efd40afce24576d932b697c711896c555999 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Jul 2023 16:03:34 +0200 Subject: [PATCH] `ultralytics 8.0.141` create new SettingsManager (#3790) --- .pre-commit-config.yaml | 2 +- README.md | 8 +- README.zh-CN.md | 8 +- docs/CNAME | 2 +- docs/README.md | 2 +- docs/SECURITY.md | 2 +- docs/build_reference.py | 31 ++-- docs/datasets/classify/caltech101.md | 6 +- docs/datasets/classify/caltech256.md | 6 +- docs/datasets/classify/cifar10.md | 6 +- docs/datasets/classify/cifar100.md | 6 +- docs/datasets/classify/fashion-mnist.md | 6 +- docs/datasets/classify/imagenet.md | 6 +- docs/datasets/classify/imagenet10.md | 6 +- docs/datasets/classify/imagenette.md | 14 +- docs/datasets/classify/imagewoof.md | 6 +- docs/datasets/classify/index.md | 8 +- docs/datasets/classify/mnist.md | 6 +- docs/datasets/detect/argoverse.md | 6 +- docs/datasets/detect/coco.md | 8 +- docs/datasets/detect/coco8.md | 8 +- docs/datasets/detect/globalwheat2020.md | 6 +- docs/datasets/detect/index.md | 8 +- docs/datasets/detect/objects365.md | 6 +- docs/datasets/detect/sku-110k.md | 6 +- docs/datasets/detect/visdrone.md | 10 +- docs/datasets/detect/voc.md | 8 +- docs/datasets/detect/xview.md | 8 +- docs/datasets/index.md | 2 +- docs/datasets/pose/coco.md | 8 +- docs/datasets/pose/coco8-pose.md | 8 +- docs/datasets/pose/index.md | 8 +- docs/datasets/segment/coco.md | 8 +- docs/datasets/segment/coco8-seg.md | 8 +- docs/datasets/segment/index.md | 8 +- docs/datasets/track/index.md | 8 +- docs/help/CI.md | 2 +- docs/help/CLA.md | 2 +- docs/help/FAQ.md | 2 +- docs/help/code_of_conduct.md | 2 +- docs/help/contributing.md | 2 +- docs/help/environmental-health-safety.md | 2 +- docs/help/index.md | 2 +- docs/help/minimum_reproducible_example.md | 2 +- docs/hub/app/android.md | 2 +- docs/hub/app/index.md | 2 +- docs/hub/app/ios.md | 2 +- docs/hub/datasets.md | 2 +- docs/hub/index.md | 2 +- docs/hub/inference_api.md | 50 +++---- docs/hub/models.md | 2 +- docs/hub/projects.md | 2 +- docs/index.md | 2 +- docs/models/fast-sam.md | 2 +- docs/models/index.md | 2 +- docs/models/mobile-sam.md | 2 +- docs/models/rtdetr.md | 2 +- docs/models/sam.md | 18 +-- docs/models/yolo-nas.md | 2 +- docs/models/yolov3.md | 2 +- docs/models/yolov4.md | 4 +- docs/models/yolov5.md | 2 +- docs/models/yolov6.md | 4 +- docs/models/yolov7.md | 2 +- docs/models/yolov8.md | 2 +- docs/modes/benchmark.md | 6 +- docs/modes/export.md | 10 +- docs/modes/index.md | 2 +- docs/modes/predict.md | 136 ++++++++--------- docs/modes/track.md | 30 ++-- docs/modes/train.md | 34 ++--- docs/modes/val.md | 10 +- docs/quickstart.md | 123 +++++++++++++--- docs/reference/cfg/__init__.md | 16 +- docs/reference/data/annotator.md | 2 +- docs/reference/data/augment.md | 2 +- docs/reference/data/base.md | 2 +- docs/reference/data/build.md | 2 +- docs/reference/data/converter.md | 2 +- docs/reference/data/dataset.md | 2 +- docs/reference/data/loaders.md | 2 +- docs/reference/data/utils.md | 2 +- docs/reference/engine/exporter.md | 2 +- docs/reference/engine/model.md | 2 +- docs/reference/engine/predictor.md | 2 +- docs/reference/engine/results.md | 2 +- docs/reference/engine/trainer.md | 2 +- docs/reference/engine/validator.md | 2 +- docs/reference/hub/__init__.md | 2 +- docs/reference/hub/auth.md | 2 +- docs/reference/hub/session.md | 2 +- docs/reference/hub/utils.md | 2 +- docs/reference/models/fastsam/model.md | 2 +- docs/reference/models/fastsam/predict.md | 2 +- docs/reference/models/fastsam/prompt.md | 2 +- docs/reference/models/fastsam/utils.md | 2 +- docs/reference/models/fastsam/val.md | 2 +- docs/reference/models/nas/model.md | 2 +- docs/reference/models/nas/predict.md | 2 +- docs/reference/models/nas/val.md | 2 +- docs/reference/models/rtdetr/model.md | 2 +- docs/reference/models/rtdetr/predict.md | 2 +- docs/reference/models/rtdetr/train.md | 2 +- docs/reference/models/rtdetr/val.md | 2 +- docs/reference/models/sam/amg.md | 2 +- docs/reference/models/sam/build.md | 2 +- docs/reference/models/sam/model.md | 2 +- docs/reference/models/sam/modules/decoders.md | 2 +- docs/reference/models/sam/modules/encoders.md | 2 +- docs/reference/models/sam/modules/sam.md | 2 +- .../models/sam/modules/tiny_encoder.md | 2 +- .../models/sam/modules/transformer.md | 2 +- docs/reference/models/sam/predict.md | 2 +- docs/reference/models/utils/loss.md | 2 +- docs/reference/models/utils/ops.md | 2 +- .../reference/models/yolo/classify/predict.md | 2 +- docs/reference/models/yolo/classify/train.md | 2 +- docs/reference/models/yolo/classify/val.md | 2 +- docs/reference/models/yolo/detect/predict.md | 2 +- docs/reference/models/yolo/detect/train.md | 2 +- docs/reference/models/yolo/detect/val.md | 2 +- docs/reference/models/yolo/pose/predict.md | 2 +- docs/reference/models/yolo/pose/train.md | 2 +- docs/reference/models/yolo/pose/val.md | 2 +- docs/reference/models/yolo/segment/predict.md | 2 +- docs/reference/models/yolo/segment/train.md | 2 +- docs/reference/models/yolo/segment/val.md | 2 +- docs/reference/nn/autobackend.md | 2 +- docs/reference/nn/modules/block.md | 2 +- docs/reference/nn/modules/conv.md | 2 +- docs/reference/nn/modules/head.md | 2 +- docs/reference/nn/modules/transformer.md | 2 +- docs/reference/nn/modules/utils.md | 2 +- docs/reference/nn/tasks.md | 7 +- docs/reference/trackers/basetrack.md | 2 +- docs/reference/trackers/bot_sort.md | 2 +- docs/reference/trackers/byte_tracker.md | 2 +- docs/reference/trackers/track.md | 2 +- docs/reference/trackers/utils/gmc.md | 2 +- .../reference/trackers/utils/kalman_filter.md | 2 +- docs/reference/trackers/utils/matching.md | 2 +- docs/reference/utils/__init__.md | 16 +- docs/reference/utils/autobatch.md | 2 +- docs/reference/utils/benchmarks.md | 2 +- docs/reference/utils/callbacks/base.md | 2 +- docs/reference/utils/callbacks/clearml.md | 2 +- docs/reference/utils/callbacks/comet.md | 2 +- docs/reference/utils/callbacks/dvc.md | 2 +- docs/reference/utils/callbacks/hub.md | 2 +- docs/reference/utils/callbacks/mlflow.md | 2 +- docs/reference/utils/callbacks/neptune.md | 2 +- docs/reference/utils/callbacks/raytune.md | 2 +- docs/reference/utils/callbacks/tensorboard.md | 2 +- docs/reference/utils/callbacks/wb.md | 2 +- docs/reference/utils/checks.md | 2 +- docs/reference/utils/dist.md | 2 +- docs/reference/utils/downloads.md | 2 +- docs/reference/utils/errors.md | 2 +- docs/reference/utils/files.md | 7 +- docs/reference/utils/instance.md | 2 +- docs/reference/utils/loss.md | 2 +- docs/reference/utils/metrics.md | 2 +- docs/reference/utils/ops.md | 2 +- docs/reference/utils/patches.md | 2 +- docs/reference/utils/plotting.md | 2 +- docs/reference/utils/tal.md | 2 +- docs/reference/utils/torch_utils.md | 2 +- docs/reference/utils/tuner.md | 2 +- docs/stylesheets/style.css | 2 +- docs/tasks/classify.md | 32 ++-- docs/tasks/detect.md | 34 ++--- docs/tasks/index.md | 2 +- docs/tasks/pose.md | 34 ++--- docs/tasks/segment.md | 34 ++--- docs/usage/callbacks.md | 6 +- docs/usage/cfg.md | 26 ++-- docs/usage/cli.md | 4 +- docs/usage/engine.md | 2 +- docs/usage/hyperparameter_tuning.md | 6 +- docs/usage/python.md | 30 ++-- .../environments/aws_quickstart_tutorial.md | 2 +- .../docker_image_quickstart_tutorial.md | 2 +- .../google_cloud_quickstart_tutorial.md | 2 +- docs/yolov5/index.md | 2 +- docs/yolov5/quickstart_tutorial.md | 2 +- .../tutorials/architecture_description.md | 14 +- .../tutorials/clearml_logging_integration.md | 2 +- .../tutorials/comet_logging_integration.md | 2 +- .../tutorials/hyperparameter_evolution.md | 10 +- docs/yolov5/tutorials/model_ensembling.md | 14 +- docs/yolov5/tutorials/model_export.md | 10 +- .../tutorials/model_pruning_and_sparsity.md | 8 +- docs/yolov5/tutorials/multi_gpu_training.md | 10 +- .../neural_magic_pruning_quantization.md | 4 +- .../tutorials/pytorch_hub_model_loading.md | 8 +- .../roboflow_datasets_integration.md | 4 +- .../tutorials/running_on_jetson_nano.md | 14 +- .../tutorials/test_time_augmentation.md | 10 +- .../tips_for_best_training_results.md | 4 +- docs/yolov5/tutorials/train_custom_data.md | 8 +- .../transfer_learning_with_frozen_layers.md | 92 ++++++------ ultralytics/__init__.py | 5 +- ultralytics/cfg/__init__.py | 83 ++++++----- ultralytics/hub/auth.py | 4 +- ultralytics/utils/__init__.py | 137 ++++++++++++------ ultralytics/utils/callbacks/clearml.py | 3 +- ultralytics/utils/callbacks/comet.py | 3 +- ultralytics/utils/callbacks/dvc.py | 3 +- ultralytics/utils/callbacks/hub.py | 4 +- ultralytics/utils/callbacks/mlflow.py | 3 +- ultralytics/utils/callbacks/neptune.py | 3 +- ultralytics/utils/callbacks/raytune.py | 4 + ultralytics/utils/callbacks/tensorboard.py | 3 +- ultralytics/utils/callbacks/wb.py | 5 +- ultralytics/utils/files.py | 2 +- 215 files changed, 917 insertions(+), 749 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 192a0ff..dc9d3c9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -60,7 +60,7 @@ repos: hooks: - id: codespell args: - - --ignore-words-list=crate,nd,strack,dota + - --ignore-words-list=crate,nd,strack,dota,ane # - repo: https://github.com/asottile/yesqa # rev: v1.4.0 diff --git a/README.md b/README.md index ca91b05..1d63f17 100644 --- a/README.md +++ b/README.md @@ -234,14 +234,14 @@ We love your input! YOLOv5 and YOLOv8 would not be possible without help from ou ##
License
-YOLOv8 is available under two different licenses: +Ultralytics offers two licensing options to accommodate diverse use cases: -- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details. +- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license). ##
Contact
-For YOLOv8 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues), and join our [Discord](https://discord.gg/2wNGbc6g9X) community for questions and discussions! +For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues), and join our [Discord](https://discord.gg/2wNGbc6g9X) community for questions and discussions!
diff --git a/README.zh-CN.md b/README.zh-CN.md index c112ad3..ab9c063 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -233,14 +233,14 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 ##
许可证
-YOLOv8 提供两种不同的许可证: +Ultralytics 提供两种许可证选项以适应各种使用场景: -- **AGPL-3.0 许可证**:详细信息请参阅 [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件。 -- **企业许可证**:为商业产品开发提供更大的灵活性,无需遵循 AGPL-3.0 的开源要求。典型的用例是将 Ultralytics 软件和 AI 模型嵌入商业产品和应用中。在 [Ultralytics 授权](https://ultralytics.com/license) 处申请企业许可证。 +- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件以了解更多细节。 +- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。 ##
联系方式
-对于 YOLOv8 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论! +对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论!
diff --git a/docs/CNAME b/docs/CNAME index 773aac8..339382a 100644 --- a/docs/CNAME +++ b/docs/CNAME @@ -1 +1 @@ -docs.ultralytics.com \ No newline at end of file +docs.ultralytics.com diff --git a/docs/README.md b/docs/README.md index df1e554..fcd4540 100644 --- a/docs/README.md +++ b/docs/README.md @@ -87,4 +87,4 @@ for your repository and updating the "Custom domain" field in the "GitHub Pages" ![196814117-fc16e711-d2be-4722-9536-b7c6d78fd167](https://user-images.githubusercontent.com/26833433/210150206-9e86dcd7-10af-43e4-9eb2-9518b3799eac.png) For more information on deploying your MkDocs documentation site, see -the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/). \ No newline at end of file +the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/). diff --git a/docs/SECURITY.md b/docs/SECURITY.md index 1126b84..b7320e3 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -23,4 +23,4 @@ In addition to our Snyk scans, we also use GitHub's [CodeQL](https://docs.github If you suspect or discover a security vulnerability in any of our repositories, please let us know immediately. You can reach out to us directly via our [contact form](https://ultralytics.com/contact) or via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon as possible. -We appreciate your help in keeping all Ultralytics open-source projects secure and safe for everyone. \ No newline at end of file +We appreciate your help in keeping all Ultralytics open-source projects secure and safe for everyone. diff --git a/docs/build_reference.py b/docs/build_reference.py index f945310..273c5a2 100644 --- a/docs/build_reference.py +++ b/docs/build_reference.py @@ -21,8 +21,8 @@ def extract_classes_and_functions(filepath): with open(filepath, 'r') as file: content = file.read() - class_pattern = r"(?:^|\n)class\s(\w+)(?:\(|:)" - func_pattern = r"(?:^|\n)def\s(\w+)\(" + class_pattern = r'(?:^|\n)class\s(\w+)(?:\(|:)' + func_pattern = r'(?:^|\n)def\s(\w+)\(' classes = re.findall(class_pattern, content) functions = re.findall(func_pattern, content) @@ -34,18 +34,21 @@ def create_markdown(py_filepath, module_path, classes, functions): md_filepath = py_filepath.with_suffix('.md') # Read existing content and keep header content between first two --- - header_content = "" + header_content = '' if md_filepath.exists(): with open(md_filepath, 'r') as file: existing_content = file.read() - header_parts = existing_content.split('---', 2) - if 'description:' in header_parts or 'comments:' in header_parts and len(header_parts) >= 3: - header_content = f"{header_parts[0]}---{header_parts[1]}---\n\n" + header_parts = existing_content.split('---') + for part in header_parts: + if 'description:' in part or 'comments:' in part: + header_content += f'---{part}---\n\n' module_path = module_path.replace('.__init__', '') - md_content = [f"## {class_name}\n---\n### ::: {module_path}.{class_name}\n

\n" for class_name in classes] - md_content.extend(f"## {func_name}\n---\n### ::: {module_path}.{func_name}\n

\n" for func_name in functions) - md_content = header_content + "\n".join(md_content) + md_content = [f'## {class_name}\n---\n### ::: {module_path}.{class_name}\n

\n' for class_name in classes] + md_content.extend(f'## {func_name}\n---\n### ::: {module_path}.{func_name}\n

\n' for func_name in functions) + md_content = header_content + '\n'.join(md_content) + if not md_content.endswith('\n'): + md_content += '\n' os.makedirs(os.path.dirname(md_filepath), exist_ok=True) with open(md_filepath, 'w') as file: @@ -81,11 +84,11 @@ def create_nav_menu_yaml(nav_items): nav_tree_sorted = sort_nested_dict(nav_tree) def _dict_to_yaml(d, level=0): - yaml_str = "" - indent = " " * level + yaml_str = '' + indent = ' ' * level for k, v in d.items(): if isinstance(v, dict): - yaml_str += f"{indent}- {k}:\n{_dict_to_yaml(v, level + 1)}" + yaml_str += f'{indent}- {k}:\n{_dict_to_yaml(v, level + 1)}' else: yaml_str += f"{indent}- {k}: {str(v).replace('docs/', '')}\n" return yaml_str @@ -99,7 +102,7 @@ def main(): nav_items = [] for root, _, files in os.walk(CODE_DIR): for file in files: - if file.endswith(".py"): + if file.endswith('.py'): py_filepath = Path(root) / file classes, functions = extract_classes_and_functions(py_filepath) @@ -113,5 +116,5 @@ def main(): create_nav_menu_yaml(nav_items) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/docs/datasets/classify/caltech101.md b/docs/datasets/classify/caltech101.md index b6aa130..765c100 100644 --- a/docs/datasets/classify/caltech101.md +++ b/docs/datasets/classify/caltech101.md @@ -34,10 +34,10 @@ To train a YOLO model on the Caltech-101 dataset for 100 epochs, you can use the ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='caltech101', epochs=100, imgsz=416) ``` @@ -74,4 +74,4 @@ If you use the Caltech-101 dataset in your research or development work, please } ``` -We would like to acknowledge Li Fei-Fei, Rob Fergus, and Pietro Perona for creating and maintaining the Caltech-101 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the Caltech-101 dataset and its creators, visit the [Caltech-101 dataset website](https://data.caltech.edu/records/mzrjq-6wc02). \ No newline at end of file +We would like to acknowledge Li Fei-Fei, Rob Fergus, and Pietro Perona for creating and maintaining the Caltech-101 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the Caltech-101 dataset and its creators, visit the [Caltech-101 dataset website](https://data.caltech.edu/records/mzrjq-6wc02). diff --git a/docs/datasets/classify/caltech256.md b/docs/datasets/classify/caltech256.md index 593831b..5830e0b 100644 --- a/docs/datasets/classify/caltech256.md +++ b/docs/datasets/classify/caltech256.md @@ -34,10 +34,10 @@ To train a YOLO model on the Caltech-256 dataset for 100 epochs, you can use the ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='caltech256', epochs=100, imgsz=416) ``` @@ -71,4 +71,4 @@ If you use the Caltech-256 dataset in your research or development work, please We would like to acknowledge Gregory Griffin, Alex Holub, and Pietro Perona for creating and maintaining the Caltech-256 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the -Caltech-256 dataset and its creators, visit the [Caltech-256 dataset website](https://data.caltech.edu/records/nyy15-4j048). \ No newline at end of file +Caltech-256 dataset and its creators, visit the [Caltech-256 dataset website](https://data.caltech.edu/records/nyy15-4j048). diff --git a/docs/datasets/classify/cifar10.md b/docs/datasets/classify/cifar10.md index a7ecfd1..4292564 100644 --- a/docs/datasets/classify/cifar10.md +++ b/docs/datasets/classify/cifar10.md @@ -37,10 +37,10 @@ To train a YOLO model on the CIFAR-10 dataset for 100 epochs with an image size ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='cifar10', epochs=100, imgsz=32) ``` @@ -73,4 +73,4 @@ If you use the CIFAR-10 dataset in your research or development work, please cit } ``` -We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-10 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the CIFAR-10 dataset and its creator, visit the [CIFAR-10 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html). \ No newline at end of file +We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-10 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the CIFAR-10 dataset and its creator, visit the [CIFAR-10 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html). diff --git a/docs/datasets/classify/cifar100.md b/docs/datasets/classify/cifar100.md index 50ebe28..c5c9b7d 100644 --- a/docs/datasets/classify/cifar100.md +++ b/docs/datasets/classify/cifar100.md @@ -37,10 +37,10 @@ To train a YOLO model on the CIFAR-100 dataset for 100 epochs with an image size ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='cifar100', epochs=100, imgsz=32) ``` @@ -73,4 +73,4 @@ If you use the CIFAR-100 dataset in your research or development work, please ci } ``` -We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-100 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the CIFAR-100 dataset and its creator, visit the [CIFAR-100 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html). \ No newline at end of file +We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-100 dataset as a valuable resource for the machine learning and computer vision research community. For more information about the CIFAR-100 dataset and its creator, visit the [CIFAR-100 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html). diff --git a/docs/datasets/classify/fashion-mnist.md b/docs/datasets/classify/fashion-mnist.md index f1e6f7f..876bed2 100644 --- a/docs/datasets/classify/fashion-mnist.md +++ b/docs/datasets/classify/fashion-mnist.md @@ -51,10 +51,10 @@ To train a CNN model on the Fashion-MNIST dataset for 100 epochs with an image s ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='fashion-mnist', epochs=100, imgsz=28) ``` @@ -76,4 +76,4 @@ The example showcases the variety and complexity of the images in the Fashion-MN ## Acknowledgments -If you use the Fashion-MNIST dataset in your research or development work, please acknowledge the dataset by linking to the [GitHub repository](https://github.com/zalandoresearch/fashion-mnist). This dataset was made available by Zalando Research. \ No newline at end of file +If you use the Fashion-MNIST dataset in your research or development work, please acknowledge the dataset by linking to the [GitHub repository](https://github.com/zalandoresearch/fashion-mnist). This dataset was made available by Zalando Research. diff --git a/docs/datasets/classify/imagenet.md b/docs/datasets/classify/imagenet.md index 9ac8d15..ba7da29 100644 --- a/docs/datasets/classify/imagenet.md +++ b/docs/datasets/classify/imagenet.md @@ -37,10 +37,10 @@ To train a deep learning model on the ImageNet dataset for 100 epochs with an im ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='imagenet', epochs=100, imgsz=224) ``` @@ -76,4 +76,4 @@ If you use the ImageNet dataset in your research or development work, please cit } ``` -We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset as a valuable resource for the machine learning and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/). \ No newline at end of file +We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset as a valuable resource for the machine learning and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/). diff --git a/docs/datasets/classify/imagenet10.md b/docs/datasets/classify/imagenet10.md index 806ef2a..3520f4c 100644 --- a/docs/datasets/classify/imagenet10.md +++ b/docs/datasets/classify/imagenet10.md @@ -33,10 +33,10 @@ To test a deep learning model on the ImageNet10 dataset with an image size of 22 ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='imagenet10', epochs=5, imgsz=224) ``` @@ -71,4 +71,4 @@ If you use the ImageNet10 dataset in your research or development work, please c } ``` -We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset. The ImageNet10 dataset, while a compact subset, is a valuable resource for quick testing and debugging in the machine learning and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/). \ No newline at end of file +We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset. The ImageNet10 dataset, while a compact subset, is a valuable resource for quick testing and debugging in the machine learning and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/). diff --git a/docs/datasets/classify/imagenette.md b/docs/datasets/classify/imagenette.md index 9790904..968e279 100644 --- a/docs/datasets/classify/imagenette.md +++ b/docs/datasets/classify/imagenette.md @@ -35,10 +35,10 @@ To train a model on the ImageNette dataset for 100 epochs with a standard image ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='imagenette', epochs=100, imgsz=224) ``` @@ -70,10 +70,10 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model with ImageNette160 model.train(data='imagenette160', epochs=100, imgsz=160) ``` @@ -91,10 +91,10 @@ To use these datasets, simply replace 'imagenette' with 'imagenette160' or 'imag ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model with ImageNette320 model.train(data='imagenette320', epochs=100, imgsz=320) ``` @@ -110,4 +110,4 @@ These smaller versions of the dataset allow for rapid iterations during the deve ## Citations and Acknowledgments -If you use the ImageNette dataset in your research or development work, please acknowledge it appropriately. For more information about the ImageNette dataset, visit the [ImageNette dataset GitHub page](https://github.com/fastai/imagenette). \ No newline at end of file +If you use the ImageNette dataset in your research or development work, please acknowledge it appropriately. For more information about the ImageNette dataset, visit the [ImageNette dataset GitHub page](https://github.com/fastai/imagenette). diff --git a/docs/datasets/classify/imagewoof.md b/docs/datasets/classify/imagewoof.md index ba046a7..86e51f7 100644 --- a/docs/datasets/classify/imagewoof.md +++ b/docs/datasets/classify/imagewoof.md @@ -32,10 +32,10 @@ To train a CNN model on the ImageWoof dataset for 100 epochs with an image size ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='imagewoof', epochs=100, imgsz=224) ``` @@ -81,4 +81,4 @@ The example showcases the subtle differences and similarities among the differen If you use the ImageWoof dataset in your research or development work, please make sure to acknowledge the creators of the dataset by linking to the [official dataset repository](https://github.com/fastai/imagenette). As of my knowledge cutoff in September 2021, there is no official publication specifically about ImageWoof for citation. -We would like to acknowledge the FastAI team for creating and maintaining the ImageWoof dataset as a valuable resource for the machine learning and computer vision research community. For more information about the ImageWoof dataset, visit the [ImageWoof dataset repository](https://github.com/fastai/imagenette). \ No newline at end of file +We would like to acknowledge the FastAI team for creating and maintaining the ImageWoof dataset as a valuable resource for the machine learning and computer vision research community. For more information about the ImageWoof dataset, visit the [ImageWoof dataset repository](https://github.com/fastai/imagenette). diff --git a/docs/datasets/classify/index.md b/docs/datasets/classify/index.md index 1b4e497..220d3aa 100644 --- a/docs/datasets/classify/index.md +++ b/docs/datasets/classify/index.md @@ -83,10 +83,10 @@ In this example, the `train` directory contains subdirectories for each class in !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) @@ -94,7 +94,7 @@ In this example, the `train` directory contains subdirectories for each class in model.train(data='path/to/dataset', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model yolo detect train data=path/to/data model=yolov8n-cls.pt epochs=100 imgsz=640 @@ -117,4 +117,4 @@ Ultralytics supports the following datasets with automatic download: ### Adding your own dataset -If you have your own dataset and would like to use it for training classification models with Ultralytics, ensure that it follows the format specified above under "Dataset format" and then point your `data` argument to the dataset directory. \ No newline at end of file +If you have your own dataset and would like to use it for training classification models with Ultralytics, ensure that it follows the format specified above under "Dataset format" and then point your `data` argument to the dataset directory. diff --git a/docs/datasets/classify/mnist.md b/docs/datasets/classify/mnist.md index 3b439f7..9dac46b 100644 --- a/docs/datasets/classify/mnist.md +++ b/docs/datasets/classify/mnist.md @@ -40,10 +40,10 @@ To train a CNN model on the MNIST dataset for 100 epochs with an image size of 3 ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='mnist', epochs=100, imgsz=32) ``` @@ -79,4 +79,4 @@ research or development work, please cite the following paper: } ``` -We would like to acknowledge Yann LeCun, Corinna Cortes, and Christopher J.C. Burges for creating and maintaining the MNIST dataset as a valuable resource for the machine learning and computer vision research community. For more information about the MNIST dataset and its creators, visit the [MNIST dataset website](http://yann.lecun.com/exdb/mnist/). \ No newline at end of file +We would like to acknowledge Yann LeCun, Corinna Cortes, and Christopher J.C. Burges for creating and maintaining the MNIST dataset as a valuable resource for the machine learning and computer vision research community. For more information about the MNIST dataset and its creators, visit the [MNIST dataset website](http://yann.lecun.com/exdb/mnist/). diff --git a/docs/datasets/detect/argoverse.md b/docs/datasets/detect/argoverse.md index a0ab7c9..8baa1e5 100644 --- a/docs/datasets/detect/argoverse.md +++ b/docs/datasets/detect/argoverse.md @@ -47,10 +47,10 @@ To train a YOLOv8n model on the Argoverse dataset for 100 epochs with an image s ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='Argoverse.yaml', epochs=100, imgsz=640) ``` @@ -86,4 +86,4 @@ If you use the Argoverse dataset in your research or development work, please ci } ``` -We would like to acknowledge Argo AI for creating and maintaining the Argoverse dataset as a valuable resource for the autonomous driving research community. For more information about the Argoverse dataset and its creators, visit the [Argoverse dataset website](https://www.argoverse.org/). \ No newline at end of file +We would like to acknowledge Argo AI for creating and maintaining the Argoverse dataset as a valuable resource for the autonomous driving research community. For more information about the Argoverse dataset and its creators, visit the [Argoverse dataset website](https://www.argoverse.org/). diff --git a/docs/datasets/detect/coco.md b/docs/datasets/detect/coco.md index 9f6a270..9406869 100644 --- a/docs/datasets/detect/coco.md +++ b/docs/datasets/detect/coco.md @@ -47,10 +47,10 @@ To train a YOLOv8n model on the COCO dataset for 100 epochs with an image size o ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco.yaml', epochs=100, imgsz=640) ``` @@ -78,7 +78,7 @@ If you use the COCO dataset in your research or development work, please cite th ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -87,4 +87,4 @@ If you use the COCO dataset in your research or development work, please cite th } ``` -We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/detect/coco8.md b/docs/datasets/detect/coco8.md index 703d087..7fc71fb 100644 --- a/docs/datasets/detect/coco8.md +++ b/docs/datasets/detect/coco8.md @@ -37,10 +37,10 @@ To train a YOLOv8n model on the COCO8 dataset for 100 epochs with an image size ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco8.yaml', epochs=100, imgsz=640) ``` @@ -68,7 +68,7 @@ If you use the COCO dataset in your research or development work, please cite th ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -77,4 +77,4 @@ If you use the COCO dataset in your research or development work, please cite th } ``` -We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/detect/globalwheat2020.md b/docs/datasets/detect/globalwheat2020.md index 27fb6fa..3a60aa0 100644 --- a/docs/datasets/detect/globalwheat2020.md +++ b/docs/datasets/detect/globalwheat2020.md @@ -46,10 +46,10 @@ To train a YOLOv8n model on the Global Wheat Head Dataset for 100 epochs with an ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='GlobalWheat2020.yaml', epochs=100, imgsz=640) ``` @@ -84,4 +84,4 @@ If you use the Global Wheat Head Dataset in your research or development work, p } ``` -We would like to acknowledge the researchers and institutions that contributed to the creation and maintenance of the Global Wheat Head Dataset as a valuable resource for the plant phenotyping and crop management research community. For more information about the dataset and its creators, visit the [Global Wheat Head Dataset website](http://www.global-wheat.com/). \ No newline at end of file +We would like to acknowledge the researchers and institutions that contributed to the creation and maintenance of the Global Wheat Head Dataset as a valuable resource for the plant phenotyping and crop management research community. For more information about the dataset and its creators, visit the [Global Wheat Head Dataset website](http://www.global-wheat.com/). diff --git a/docs/datasets/detect/index.md b/docs/datasets/detect/index.md index 9a7b3b6..e76be2b 100644 --- a/docs/datasets/detect/index.md +++ b/docs/datasets/detect/index.md @@ -51,10 +51,10 @@ Here's how you can use these formats to train your model: !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) @@ -62,7 +62,7 @@ Here's how you can use these formats to train your model: model.train(data='coco128.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 @@ -100,4 +100,4 @@ convert_coco(labels_dir='../coco/annotations/') This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format. -Remember to double-check if the dataset you want to use is compatible with your model and follows the necessary format conventions. Properly formatted datasets are crucial for training successful object detection models. \ No newline at end of file +Remember to double-check if the dataset you want to use is compatible with your model and follows the necessary format conventions. Properly formatted datasets are crucial for training successful object detection models. diff --git a/docs/datasets/detect/objects365.md b/docs/datasets/detect/objects365.md index e47d83a..0254b47 100644 --- a/docs/datasets/detect/objects365.md +++ b/docs/datasets/detect/objects365.md @@ -46,10 +46,10 @@ To train a YOLOv8n model on the Objects365 dataset for 100 epochs with an image ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='Objects365.yaml', epochs=100, imgsz=640) ``` @@ -85,4 +85,4 @@ If you use the Objects365 dataset in your research or development work, please c } ``` -We would like to acknowledge the team of researchers who created and maintain the Objects365 dataset as a valuable resource for the computer vision research community. For more information about the Objects365 dataset and its creators, visit the [Objects365 dataset website](https://www.objects365.org/). \ No newline at end of file +We would like to acknowledge the team of researchers who created and maintain the Objects365 dataset as a valuable resource for the computer vision research community. For more information about the Objects365 dataset and its creators, visit the [Objects365 dataset website](https://www.objects365.org/). diff --git a/docs/datasets/detect/sku-110k.md b/docs/datasets/detect/sku-110k.md index 270dad9..ac56f2b 100644 --- a/docs/datasets/detect/sku-110k.md +++ b/docs/datasets/detect/sku-110k.md @@ -48,10 +48,10 @@ To train a YOLOv8n model on the SKU-110K dataset for 100 epochs with an image si ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='SKU-110K.yaml', epochs=100, imgsz=640) ``` @@ -86,4 +86,4 @@ If you use the SKU-110k dataset in your research or development work, please cit } ``` -We would like to acknowledge Eran Goldman et al. for creating and maintaining the SKU-110k dataset as a valuable resource for the computer vision research community. For more information about the SKU-110k dataset and its creators, visit the [SKU-110k dataset GitHub repository](https://github.com/eg4000/SKU110K_CVPR19). \ No newline at end of file +We would like to acknowledge Eran Goldman et al. for creating and maintaining the SKU-110k dataset as a valuable resource for the computer vision research community. For more information about the SKU-110k dataset and its creators, visit the [SKU-110k dataset GitHub repository](https://github.com/eg4000/SKU110K_CVPR19). diff --git a/docs/datasets/detect/visdrone.md b/docs/datasets/detect/visdrone.md index fc2218d..5468d57 100644 --- a/docs/datasets/detect/visdrone.md +++ b/docs/datasets/detect/visdrone.md @@ -44,10 +44,10 @@ To train a YOLOv8n model on the VisDrone dataset for 100 epochs with an image si ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='VisDrone.yaml', epochs=100, imgsz=640) ``` @@ -76,8 +76,8 @@ If you use the VisDrone dataset in your research or development work, please cit ```bibtex @ARTICLE{9573394, author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin}, - journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, - title={Detection and Tracking Meet Drones Challenge}, + journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, + title={Detection and Tracking Meet Drones Challenge}, year={2021}, volume={}, number={}, @@ -85,4 +85,4 @@ If you use the VisDrone dataset in your research or development work, please cit doi={10.1109/TPAMI.2021.3119563}} ``` -We would like to acknowledge the AISKYEYE team at the Lab of Machine Learning and Data Mining, Tianjin University, China, for creating and maintaining the VisDrone dataset as a valuable resource for the drone-based computer vision research community. For more information about the VisDrone dataset and its creators, visit the [VisDrone Dataset GitHub repository](https://github.com/VisDrone/VisDrone-Dataset). \ No newline at end of file +We would like to acknowledge the AISKYEYE team at the Lab of Machine Learning and Data Mining, Tianjin University, China, for creating and maintaining the VisDrone dataset as a valuable resource for the drone-based computer vision research community. For more information about the VisDrone dataset and its creators, visit the [VisDrone Dataset GitHub repository](https://github.com/VisDrone/VisDrone-Dataset). diff --git a/docs/datasets/detect/voc.md b/docs/datasets/detect/voc.md index 5a42524..d29da7d 100644 --- a/docs/datasets/detect/voc.md +++ b/docs/datasets/detect/voc.md @@ -47,10 +47,10 @@ To train a YOLOv8n model on the VOC dataset for 100 epochs with an image size of ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='VOC.yaml', epochs=100, imgsz=640) ``` @@ -79,7 +79,7 @@ If you use the VOC dataset in your research or development work, please cite the ```bibtex @misc{everingham2010pascal, - title={The PASCAL Visual Object Classes (VOC) Challenge}, + title={The PASCAL Visual Object Classes (VOC) Challenge}, author={Mark Everingham and Luc Van Gool and Christopher K. I. Williams and John Winn and Andrew Zisserman}, year={2010}, eprint={0909.5206}, @@ -88,4 +88,4 @@ If you use the VOC dataset in your research or development work, please cite the } ``` -We would like to acknowledge the PASCAL VOC Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the VOC dataset and its creators, visit the [PASCAL VOC dataset website](http://host.robots.ox.ac.uk/pascal/VOC/). \ No newline at end of file +We would like to acknowledge the PASCAL VOC Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the VOC dataset and its creators, visit the [PASCAL VOC dataset website](http://host.robots.ox.ac.uk/pascal/VOC/). diff --git a/docs/datasets/detect/xview.md b/docs/datasets/detect/xview.md index 9bdb55b..9da4ca8 100644 --- a/docs/datasets/detect/xview.md +++ b/docs/datasets/detect/xview.md @@ -50,10 +50,10 @@ To train a model on the xView dataset for 100 epochs with an image size of 640, ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='xView.yaml', epochs=100, imgsz=640) ``` @@ -81,7 +81,7 @@ If you use the xView dataset in your research or development work, please cite t ```bibtex @misc{lam2018xview, - title={xView: Objects in Context in Overhead Imagery}, + title={xView: Objects in Context in Overhead Imagery}, author={Darius Lam and Richard Kuzma and Kevin McGee and Samuel Dooley and Michael Laielli and Matthew Klaric and Yaroslav Bulatov and Brendan McCord}, year={2018}, eprint={1802.07856}, @@ -90,4 +90,4 @@ If you use the xView dataset in your research or development work, please cite t } ``` -We would like to acknowledge the [Defense Innovation Unit](https://www.diu.mil/) (DIU) and the creators of the xView dataset for their valuable contribution to the computer vision research community. For more information about the xView dataset and its creators, visit the [xView dataset website](http://xviewdataset.org/). \ No newline at end of file +We would like to acknowledge the [Defense Innovation Unit](https://www.diu.mil/) (DIU) and the creators of the xView dataset for their valuable contribution to the computer vision research community. For more information about the xView dataset and its creators, visit the [xView dataset website](http://xviewdataset.org/). diff --git a/docs/datasets/index.md b/docs/datasets/index.md index 8f19a1c..680ffb2 100644 --- a/docs/datasets/index.md +++ b/docs/datasets/index.md @@ -56,4 +56,4 @@ Image classification is a computer vision task that involves categorizing an ima Multi-object tracking is a computer vision technique that involves detecting and tracking multiple objects over time in a video sequence. * [Argoverse](detect/argoverse.md): A dataset containing 3D tracking and motion forecasting data from urban environments with rich annotations for multi-object tracking tasks. -* [VisDrone](detect/visdrone.md): A dataset containing object detection and multi-object tracking data from drone-captured imagery with over 10K images and video sequences. \ No newline at end of file +* [VisDrone](detect/visdrone.md): A dataset containing object detection and multi-object tracking data from drone-captured imagery with over 10K images and video sequences. diff --git a/docs/datasets/pose/coco.md b/docs/datasets/pose/coco.md index 6466e6e..1549fd3 100644 --- a/docs/datasets/pose/coco.md +++ b/docs/datasets/pose/coco.md @@ -48,10 +48,10 @@ To train a YOLOv8n-pose model on the COCO-Pose dataset for 100 epochs with an im ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco-pose.yaml', epochs=100, imgsz=640) ``` @@ -79,7 +79,7 @@ If you use the COCO-Pose dataset in your research or development work, please ci ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -88,4 +88,4 @@ If you use the COCO-Pose dataset in your research or development work, please ci } ``` -We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO-Pose dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO-Pose dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/pose/coco8-pose.md b/docs/datasets/pose/coco8-pose.md index c0e2996..ed6cfb6 100644 --- a/docs/datasets/pose/coco8-pose.md +++ b/docs/datasets/pose/coco8-pose.md @@ -37,10 +37,10 @@ To train a YOLOv8n-pose model on the COCO8-Pose dataset for 100 epochs with an i ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco8-pose.yaml', epochs=100, imgsz=640) ``` @@ -68,7 +68,7 @@ If you use the COCO dataset in your research or development work, please cite th ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -77,4 +77,4 @@ If you use the COCO dataset in your research or development work, please cite th } ``` -We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/pose/index.md b/docs/datasets/pose/index.md index ff0f8ce..18cb134 100644 --- a/docs/datasets/pose/index.md +++ b/docs/datasets/pose/index.md @@ -70,10 +70,10 @@ For example if we assume five keypoints of facial landmark: [left eye, right eye !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load a pretrained model (recommended for training) @@ -81,7 +81,7 @@ For example if we assume five keypoints of facial landmark: [left eye, right eye model.train(data='coco128-pose.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model yolo detect train data=coco128-pose.yaml model=yolov8n-pose.pt epochs=100 imgsz=640 @@ -125,4 +125,4 @@ from ultralytics.data.converter import convert_coco convert_coco(labels_dir='../coco/annotations/', use_keypoints=True) ``` -This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format. The `use_keypoints` parameter specifies whether to include keypoints (for pose estimation) in the converted labels. \ No newline at end of file +This conversion tool can be used to convert the COCO dataset or any dataset in the COCO format to the Ultralytics YOLO format. The `use_keypoints` parameter specifies whether to include keypoints (for pose estimation) in the converted labels. diff --git a/docs/datasets/segment/coco.md b/docs/datasets/segment/coco.md index a1a102d..2738703 100644 --- a/docs/datasets/segment/coco.md +++ b/docs/datasets/segment/coco.md @@ -47,10 +47,10 @@ To train a YOLOv8n-seg model on the COCO-Seg dataset for 100 epochs with an imag ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco-seg.yaml', epochs=100, imgsz=640) ``` @@ -78,7 +78,7 @@ If you use the COCO-Seg dataset in your research or development work, please cit ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -87,4 +87,4 @@ If you use the COCO-Seg dataset in your research or development work, please cit } ``` -We extend our thanks to the COCO Consortium for creating and maintaining this invaluable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We extend our thanks to the COCO Consortium for creating and maintaining this invaluable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/segment/coco8-seg.md b/docs/datasets/segment/coco8-seg.md index 32fa157..d27e22d 100644 --- a/docs/datasets/segment/coco8-seg.md +++ b/docs/datasets/segment/coco8-seg.md @@ -37,10 +37,10 @@ To train a YOLOv8n-seg model on the COCO8-Seg dataset for 100 epochs with an ima ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) - + # Train the model model.train(data='coco8-seg.yaml', epochs=100, imgsz=640) ``` @@ -68,7 +68,7 @@ If you use the COCO dataset in your research or development work, please cite th ```bibtex @misc{lin2015microsoft, - title={Microsoft COCO: Common Objects in Context}, + title={Microsoft COCO: Common Objects in Context}, author={Tsung-Yi Lin and Michael Maire and Serge Belongie and Lubomir Bourdev and Ross Girshick and James Hays and Pietro Perona and Deva Ramanan and C. Lawrence Zitnick and Piotr Dollár}, year={2015}, eprint={1405.0312}, @@ -77,4 +77,4 @@ If you use the COCO dataset in your research or development work, please cite th } ``` -We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). \ No newline at end of file +We would like to acknowledge the COCO Consortium for creating and maintaining this valuable resource for the computer vision community. For more information about the COCO dataset and its creators, visit the [COCO dataset website](https://cocodataset.org/#home). diff --git a/docs/datasets/segment/index.md b/docs/datasets/segment/index.md index 0dd8459..b5e279c 100644 --- a/docs/datasets/segment/index.md +++ b/docs/datasets/segment/index.md @@ -71,10 +71,10 @@ The `train` and `val` fields specify the paths to the directories containing the !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) @@ -82,7 +82,7 @@ The `train` and `val` fields specify the paths to the directories containing the model.train(data='coco128-seg.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model yolo detect train data=coco128-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 @@ -137,4 +137,4 @@ auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model='sam_b.pt The `auto_annotate` function takes the path to your images, along with optional arguments for specifying the pre-trained detection and [SAM segmentation models](https://docs.ultralytics.com/models/sam), the device to run the models on, and the output directory for saving the annotated results. -By leveraging the power of pre-trained models, auto-annotation can significantly reduce the time and effort required for creating high-quality segmentation datasets. This feature is particularly useful for researchers and developers working with large image collections, as it allows them to focus on model development and evaluation rather than manual annotation. \ No newline at end of file +By leveraging the power of pre-trained models, auto-annotation can significantly reduce the time and effort required for creating high-quality segmentation datasets. This feature is particularly useful for researchers and developers working with large image collections, as it allows them to focus on model development and evaluation rather than manual annotation. diff --git a/docs/datasets/track/index.md b/docs/datasets/track/index.md index 9ae4979..c25119e 100644 --- a/docs/datasets/track/index.md +++ b/docs/datasets/track/index.md @@ -16,15 +16,15 @@ Support for training trackers alone is coming soon !!! example "" === "Python" - + ```python from ultralytics import YOLO model = YOLO('yolov8n.pt') - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) ``` === "CLI" - + ```bash yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show - ``` \ No newline at end of file + ``` diff --git a/docs/help/CI.md b/docs/help/CI.md index 3fea95d..fdfd468 100644 --- a/docs/help/CI.md +++ b/docs/help/CI.md @@ -32,4 +32,4 @@ If you notice a test failing, it would be a great help if you could report it th Remember, a successful CI test does not mean that everything is perfect. It is always recommended to manually review the code before deployment or merging changes. -Happy coding! \ No newline at end of file +Happy coding! diff --git a/docs/help/CLA.md b/docs/help/CLA.md index 10a7ca6..6edc4e3 100644 --- a/docs/help/CLA.md +++ b/docs/help/CLA.md @@ -67,4 +67,4 @@ that any of the provisions of this Agreement shall be held by a court or other t to be unenforceable, the remaining portions hereof shall remain in full force and effect. **Assignment.** You agree that Ultralytics may assign this Agreement, and all of its rights, obligations and licenses -hereunder. \ No newline at end of file +hereunder. diff --git a/docs/help/FAQ.md b/docs/help/FAQ.md index 94c2f78..8e4430a 100644 --- a/docs/help/FAQ.md +++ b/docs/help/FAQ.md @@ -36,4 +36,4 @@ Improving the accuracy of a YOLO model may involve several strategies, such as: Remember that there's often a trade-off between accuracy and inference speed, so finding the right balance is crucial for your specific application. -If you have any more questions or need assistance, don't hesitate to consult the Ultralytics documentation or reach out to the community through GitHub Issues or the official discussion forum. \ No newline at end of file +If you have any more questions or need assistance, don't hesitate to consult the Ultralytics documentation or reach out to the community through GitHub Issues or the official discussion forum. diff --git a/docs/help/code_of_conduct.md b/docs/help/code_of_conduct.md index c23efdd..cad1dae 100644 --- a/docs/help/code_of_conduct.md +++ b/docs/help/code_of_conduct.md @@ -131,4 +131,4 @@ For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. -[homepage]: https://www.contributor-covenant.org \ No newline at end of file +[homepage]: https://www.contributor-covenant.org diff --git a/docs/help/contributing.md b/docs/help/contributing.md index 0069fad..8e6395b 100644 --- a/docs/help/contributing.md +++ b/docs/help/contributing.md @@ -72,4 +72,4 @@ def example_function(arg1: int, arg2: str) -> bool: ### GitHub Actions CI Tests -Before your pull request can be merged, all GitHub Actions Continuous Integration (CI) tests must pass. These tests include linting, unit tests, and other checks to ensure that your changes meet the quality standards of the project. Make sure to review the output of the GitHub Actions and fix any issues \ No newline at end of file +Before your pull request can be merged, all GitHub Actions Continuous Integration (CI) tests must pass. These tests include linting, unit tests, and other checks to ensure that your changes meet the quality standards of the project. Make sure to review the output of the GitHub Actions and fix any issues diff --git a/docs/help/environmental-health-safety.md b/docs/help/environmental-health-safety.md index 006f864..9fee240 100644 --- a/docs/help/environmental-health-safety.md +++ b/docs/help/environmental-health-safety.md @@ -34,4 +34,4 @@ At Ultralytics, we recognize that the long-term success of our company relies no This policy reflects our commitment to minimizing our environmental footprint, ensuring the safety and well-being of our employees, and continuously improving our performance. -Please remember that the implementation of an effective EHS policy requires the involvement and commitment of everyone working at or with Ultralytics. We encourage you to take personal responsibility for your safety and the safety of others, and to take care of the environment in which we live and work. \ No newline at end of file +Please remember that the implementation of an effective EHS policy requires the involvement and commitment of everyone working at or with Ultralytics. We encourage you to take personal responsibility for your safety and the safety of others, and to take care of the environment in which we live and work. diff --git a/docs/help/index.md b/docs/help/index.md index 00c20af..468a8d5 100644 --- a/docs/help/index.md +++ b/docs/help/index.md @@ -15,4 +15,4 @@ Welcome to the Ultralytics Help page! We are committed to providing you with com - [Environmental, Health and Safety (EHS) Policy](environmental-health-safety.md): Explore Ultralytics' dedicated approach towards maintaining a sustainable, safe, and healthy work environment for all our stakeholders. - [Security Policy](../SECURITY.md): Understand our security practices and how to report security vulnerabilities responsibly. -We highly recommend going through these guides to make the most of your collaboration with the Ultralytics community. Our goal is to maintain a welcoming and supportive environment for all users and contributors. If you need further assistance, don't hesitate to reach out to us through GitHub Issues or the official discussion forum. Happy coding! \ No newline at end of file +We highly recommend going through these guides to make the most of your collaboration with the Ultralytics community. Our goal is to maintain a welcoming and supportive environment for all users and contributors. If you need further assistance, don't hesitate to reach out to us through GitHub Issues or the official discussion forum. Happy coding! diff --git a/docs/help/minimum_reproducible_example.md b/docs/help/minimum_reproducible_example.md index 0dba0e6..47a0cdf 100644 --- a/docs/help/minimum_reproducible_example.md +++ b/docs/help/minimum_reproducible_example.md @@ -75,4 +75,4 @@ RuntimeError: Expected input[1, 0, 640, 640] to have 3 channels, but got 0 chann In this example, the MRE demonstrates the issue with a minimal amount of code, uses a public model ('yolov8n.pt'), includes all necessary dependencies, and provides a clear description of the problem along with the error message. -By following these guidelines, you'll help the maintainers and contributors of Ultralytics YOLO repositories to understand and resolve your issue more efficiently. \ No newline at end of file +By following these guidelines, you'll help the maintainers and contributors of Ultralytics YOLO repositories to understand and resolve your issue more efficiently. diff --git a/docs/hub/app/android.md b/docs/hub/app/android.md index 1177db5..20ab090 100644 --- a/docs/hub/app/android.md +++ b/docs/hub/app/android.md @@ -63,4 +63,4 @@ To get started with the Ultralytics Android App, follow these steps: 6. Explore the app's settings to adjust the detection threshold, enable or disable specific object classes, and more. -With the Ultralytics Android App, you now have the power of real-time object detection using YOLO models right at your fingertips. Enjoy exploring the app's features and optimizing its settings to suit your specific use cases. \ No newline at end of file +With the Ultralytics Android App, you now have the power of real-time object detection using YOLO models right at your fingertips. Enjoy exploring the app's features and optimizing its settings to suit your specific use cases. diff --git a/docs/hub/app/index.md b/docs/hub/app/index.md index 4f87e7a..af20578 100644 --- a/docs/hub/app/index.md +++ b/docs/hub/app/index.md @@ -49,4 +49,4 @@ Welcome to the Ultralytics HUB App! We are excited to introduce this powerful mo - [**iOS**](./ios.md): Learn about YOLO CoreML models accelerated on Apple's Neural Engine for iPhones and iPads. - [**Android**](./android.md): Explore TFLite acceleration on Android mobile devices. -Get started today by downloading the Ultralytics HUB App on your mobile device and unlock the potential of YOLOv5 and YOLOv8 models on-the-go. Don't forget to check out our comprehensive [HUB Docs](../) for more information on training, deploying, and using your custom models with the Ultralytics HUB platform. \ No newline at end of file +Get started today by downloading the Ultralytics HUB App on your mobile device and unlock the potential of YOLOv5 and YOLOv8 models on-the-go. Don't forget to check out our comprehensive [HUB Docs](../) for more information on training, deploying, and using your custom models with the Ultralytics HUB platform. diff --git a/docs/hub/app/ios.md b/docs/hub/app/ios.md index 09c5792..c202eec 100644 --- a/docs/hub/app/ios.md +++ b/docs/hub/app/ios.md @@ -53,4 +53,4 @@ To get started with the Ultralytics iOS App, follow these steps: 6. Explore the app's settings to adjust the detection threshold, enable or disable specific object classes, and more. -With the Ultralytics iOS App, you can now leverage the power of YOLO models for real-time object detection on your iPhone or iPad, powered by the Apple Neural Engine and optimized with FP16 or INT8 quantization. \ No newline at end of file +With the Ultralytics iOS App, you can now leverage the power of YOLO models for real-time object detection on your iPhone or iPad, powered by the Apple Neural Engine and optimized with FP16 or INT8 quantization. diff --git a/docs/hub/datasets.md b/docs/hub/datasets.md index 7ca5c5a..e2ac465 100644 --- a/docs/hub/datasets.md +++ b/docs/hub/datasets.md @@ -156,4 +156,4 @@ Navigate to the Dataset page of the dataset you want to delete, open the dataset If you change your mind, you can restore the dataset from the [Trash](https://hub.ultralytics.com/trash) page. - ![Ultralytics HUB screenshot of the Trash page with an arrow pointing to the Restore option of one of the datasets](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/datasets/hub_delete_dataset_3.jpg) \ No newline at end of file + ![Ultralytics HUB screenshot of the Trash page with an arrow pointing to the Restore option of one of the datasets](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/datasets/hub_delete_dataset_3.jpg) diff --git a/docs/hub/index.md b/docs/hub/index.md index 790bfae..26dec78 100644 --- a/docs/hub/index.md +++ b/docs/hub/index.md @@ -39,4 +39,4 @@ We hope that the resources here will help you get the most out of HUB. Please br - [**Ultralytics HUB App**](./app/index.md). Learn about the Ultralytics App for iOS and Android, which allows you to run models directly on your mobile device. * [**iOS**](./app/ios.md). Learn about YOLO CoreML models accelerated on Apple's Neural Engine on iPhones and iPads. * [**Android**](./app/android.md). Explore TFLite acceleration on mobile devices. -- [**Inference API**](./inference_api.md). Understand how to use the Inference API for running your trained models in the cloud to generate predictions. \ No newline at end of file +- [**Inference API**](./inference_api.md). Understand how to use the Inference API for running your trained models in the cloud to generate predictions. diff --git a/docs/hub/inference_api.md b/docs/hub/inference_api.md index ca623d1..4cc62bf 100644 --- a/docs/hub/inference_api.md +++ b/docs/hub/inference_api.md @@ -111,7 +111,7 @@ YOLO detection models, such as `yolov8n.pt`, can return JSON responses from loca === "Local" ```python from ultralytics import YOLO - + # Load model model = YOLO('yolov8n.pt') @@ -119,12 +119,12 @@ YOLO detection models, such as `yolov8n.pt`, can return JSON responses from loca results = model('image.jpg') # Print image.jpg results in JSON format - print(results[0].tojson()) + print(results[0].tojson()) ``` === "CLI API" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ -H "x-api-key: API_KEY" \ -F "image=@/path/to/image.jpg" \ -F "size=640" \ @@ -135,21 +135,21 @@ YOLO detection models, such as `yolov8n.pt`, can return JSON responses from loca === "Python API" ```python import requests - + # API URL, use actual MODEL_ID url = f"https://api.ultralytics.com/v1/predict/MODEL_ID" - + # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - + # Inference arguments (optional) data = {"size": 640, "confidence": 0.25, "iou": 0.45} - + # Load image and send request with open("path/to/image.jpg", "rb") as image_file: files = {"image": image_file} response = requests.post(url, headers=headers, files=files, data=data) - + print(response.json()) ``` @@ -205,7 +205,7 @@ YOLO segmentation models, such as `yolov8n-seg.pt`, can return JSON responses fr === "Local" ```python from ultralytics import YOLO - + # Load model model = YOLO('yolov8n-seg.pt') @@ -213,12 +213,12 @@ YOLO segmentation models, such as `yolov8n-seg.pt`, can return JSON responses fr results = model('image.jpg') # Print image.jpg results in JSON format - print(results[0].tojson()) + print(results[0].tojson()) ``` === "CLI API" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ -H "x-api-key: API_KEY" \ -F "image=@/path/to/image.jpg" \ -F "size=640" \ @@ -229,21 +229,21 @@ YOLO segmentation models, such as `yolov8n-seg.pt`, can return JSON responses fr === "Python API" ```python import requests - + # API URL, use actual MODEL_ID url = f"https://api.ultralytics.com/v1/predict/MODEL_ID" - + # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - + # Inference arguments (optional) data = {"size": 640, "confidence": 0.25, "iou": 0.45} - + # Load image and send request with open("path/to/image.jpg", "rb") as image_file: files = {"image": image_file} response = requests.post(url, headers=headers, files=files, data=data) - + print(response.json()) ``` @@ -342,7 +342,7 @@ YOLO pose models, such as `yolov8n-pose.pt`, can return JSON responses from loca === "Local" ```python from ultralytics import YOLO - + # Load model model = YOLO('yolov8n-seg.pt') @@ -350,12 +350,12 @@ YOLO pose models, such as `yolov8n-pose.pt`, can return JSON responses from loca results = model('image.jpg') # Print image.jpg results in JSON format - print(results[0].tojson()) + print(results[0].tojson()) ``` === "CLI API" ```bash - curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ + curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ -H "x-api-key: API_KEY" \ -F "image=@/path/to/image.jpg" \ -F "size=640" \ @@ -366,21 +366,21 @@ YOLO pose models, such as `yolov8n-pose.pt`, can return JSON responses from loca === "Python API" ```python import requests - + # API URL, use actual MODEL_ID url = f"https://api.ultralytics.com/v1/predict/MODEL_ID" - + # Headers, use actual API_KEY headers = {"x-api-key": "API_KEY"} - + # Inference arguments (optional) data = {"size": 640, "confidence": 0.25, "iou": 0.45} - + # Load image and send request with open("path/to/image.jpg", "rb") as image_file: files = {"image": image_file} response = requests.post(url, headers=headers, files=files, data=data) - + print(response.json()) ``` @@ -455,4 +455,4 @@ YOLO pose models, such as `yolov8n-pose.pt`, can return JSON responses from loca } ] } - ``` \ No newline at end of file + ``` diff --git a/docs/hub/models.md b/docs/hub/models.md index ccaae53..a8bb36a 100644 --- a/docs/hub/models.md +++ b/docs/hub/models.md @@ -210,4 +210,4 @@ Navigate to the Model page of the model you want to delete, open the model actio If you change your mind, you can restore the model from the [Trash](https://hub.ultralytics.com/trash) page. - ![Ultralytics HUB screenshot of the Trash page with an arrow pointing to the Restore option of one of the models](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/models/hub_delete_model_3.jpg) \ No newline at end of file + ![Ultralytics HUB screenshot of the Trash page with an arrow pointing to the Restore option of one of the models](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/models/hub_delete_model_3.jpg) diff --git a/docs/hub/projects.md b/docs/hub/projects.md index 8577936..41c7732 100644 --- a/docs/hub/projects.md +++ b/docs/hub/projects.md @@ -166,4 +166,4 @@ Navigate to the Project page of the project where the model you want to mode is Select the project you want to transfer the model to and click **Save**. -![Ultralytics HUB screenshot of the Transfer Model dialog with an arrow pointing to the dropdown and one to the Save button](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/projects/hub_transfer_models_3.jpg) \ No newline at end of file +![Ultralytics HUB screenshot of the Transfer Model dialog with an arrow pointing to the dropdown and one to the Save button](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/projects/hub_transfer_models_3.jpg) diff --git a/docs/index.md b/docs/index.md index f3e9ff5..b92aa9a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -48,4 +48,4 @@ Ultralytics YOLO repositories like YOLOv3, YOLOv5, or YOLOv8 are available under - **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for details. - **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). -Please note our licensing approach ensures that any enhancements made to our open-source projects are shared back to the community. We firmly believe in the principles of open source, and we are committed to ensuring that our work can be used and improved upon in a manner that benefits everyone. \ No newline at end of file +Please note our licensing approach ensures that any enhancements made to our open-source projects are shared back to the community. We firmly believe in the principles of open source, and we are committed to ensuring that our work can be used and improved upon in a manner that benefits everyone. diff --git a/docs/models/fast-sam.md b/docs/models/fast-sam.md index fae0c8c..aaa8813 100644 --- a/docs/models/fast-sam.md +++ b/docs/models/fast-sam.md @@ -166,4 +166,4 @@ We would like to acknowledge the FastSAM authors for their significant contribut } ``` -The original FastSAM paper can be found on [arXiv](https://arxiv.org/abs/2306.12156). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/CASIA-IVA-Lab/FastSAM). We appreciate their efforts in advancing the field and making their work accessible to the broader community. \ No newline at end of file +The original FastSAM paper can be found on [arXiv](https://arxiv.org/abs/2306.12156). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/CASIA-IVA-Lab/FastSAM). We appreciate their efforts in advancing the field and making their work accessible to the broader community. diff --git a/docs/models/index.md b/docs/models/index.md index 04d7044..e683790 100644 --- a/docs/models/index.md +++ b/docs/models/index.md @@ -45,4 +45,4 @@ model.info() # display model information model.train(data="coco128.yaml", epochs=100) # train the model ``` -For more details on each model, their supported tasks, modes, and performance, please visit their respective documentation pages linked above. \ No newline at end of file +For more details on each model, their supported tasks, modes, and performance, please visit their respective documentation pages linked above. diff --git a/docs/models/mobile-sam.md b/docs/models/mobile-sam.md index 0bf7a65..66d2391 100644 --- a/docs/models/mobile-sam.md +++ b/docs/models/mobile-sam.md @@ -96,4 +96,4 @@ If you find MobileSAM useful in your research or development work, please consid journal={arXiv preprint arXiv:2306.14289}, year={2023} } -``` \ No newline at end of file +``` diff --git a/docs/models/rtdetr.md b/docs/models/rtdetr.md index 76b90ac..6c8b642 100644 --- a/docs/models/rtdetr.md +++ b/docs/models/rtdetr.md @@ -71,4 +71,4 @@ If you use Baidu's RT-DETR in your research or development work, please cite the We would like to acknowledge Baidu and the [PaddlePaddle](https://github.com/PaddlePaddle/PaddleDetection) team for creating and maintaining this valuable resource for the computer vision community. Their contribution to the field with the development of the Vision Transformers-based real-time object detector, RT-DETR, is greatly appreciated. -*Keywords: RT-DETR, Transformer, ViT, Vision Transformers, Baidu RT-DETR, PaddlePaddle, Paddle Paddle RT-DETR, real-time object detection, Vision Transformers-based object detection, pre-trained PaddlePaddle RT-DETR models, Baidu's RT-DETR usage, Ultralytics Python API* \ No newline at end of file +*Keywords: RT-DETR, Transformer, ViT, Vision Transformers, Baidu RT-DETR, PaddlePaddle, Paddle Paddle RT-DETR, real-time object detection, Vision Transformers-based object detection, pre-trained PaddlePaddle RT-DETR models, Baidu's RT-DETR usage, Ultralytics Python API* diff --git a/docs/models/sam.md b/docs/models/sam.md index ab47fa4..cd2d168 100644 --- a/docs/models/sam.md +++ b/docs/models/sam.md @@ -37,10 +37,10 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t Segment image with given prompts. === "Python" - + ```python from ultralytics import SAM - + # Load a model model = SAM('sam_b.pt') @@ -59,10 +59,10 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t Segment the whole image. === "Python" - + ```python from ultralytics import SAM - + # Load a model model = SAM('sam_b.pt') @@ -73,7 +73,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t model('path/to/image.jpg') ``` === "CLI" - + ```bash # Run inference with a SAM model yolo predict model=sam_b.pt source=path/to/image.jpg @@ -86,7 +86,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t This way you can set image once and run prompts inference multiple times without running image encoder multiple times. === "Prompt inference" - + ```python from ultralytics.models.sam import Predictor as SAMPredictor @@ -106,7 +106,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t Segment everything with additional args. === "Segment everything" - + ```python from ultralytics.models.sam import Predictor as SAMPredictor @@ -207,7 +207,7 @@ If you find SAM useful in your research or development work, please consider cit ```bibtex @misc{kirillov2023segment, - title={Segment Anything}, + title={Segment Anything}, author={Alexander Kirillov and Eric Mintun and Nikhila Ravi and Hanzi Mao and Chloe Rolland and Laura Gustafson and Tete Xiao and Spencer Whitehead and Alexander C. Berg and Wan-Yen Lo and Piotr Dollár and Ross Girshick}, year={2023}, eprint={2304.02643}, @@ -218,4 +218,4 @@ If you find SAM useful in your research or development work, please consider cit We would like to express our gratitude to Meta AI for creating and maintaining this valuable resource for the computer vision community. -*keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI.* \ No newline at end of file +*keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI.* diff --git a/docs/models/yolo-nas.md b/docs/models/yolo-nas.md index e21574c..2dd834d 100644 --- a/docs/models/yolo-nas.md +++ b/docs/models/yolo-nas.md @@ -106,4 +106,4 @@ If you employ YOLO-NAS in your research or development work, please cite SuperGr We express our gratitude to Deci AI's [SuperGradients](https://github.com/Deci-AI/super-gradients/) team for their efforts in creating and maintaining this valuable resource for the computer vision community. We believe YOLO-NAS, with its innovative architecture and superior object detection capabilities, will become a critical tool for developers and researchers alike. -*Keywords: YOLO-NAS, Deci AI, object detection, deep learning, neural architecture search, Ultralytics Python API, YOLO model, SuperGradients, pre-trained models, quantization-friendly basic block, advanced training schemes, post-training quantization, AutoNAC optimization, COCO, Objects365, Roboflow 100* \ No newline at end of file +*Keywords: YOLO-NAS, Deci AI, object detection, deep learning, neural architecture search, Ultralytics Python API, YOLO model, SuperGradients, pre-trained models, quantization-friendly basic block, advanced training schemes, post-training quantization, AutoNAC optimization, COCO, Objects365, Roboflow 100* diff --git a/docs/models/yolov3.md b/docs/models/yolov3.md index 6315d1f..703829d 100644 --- a/docs/models/yolov3.md +++ b/docs/models/yolov3.md @@ -77,4 +77,4 @@ If you use YOLOv3 in your research, please cite the original YOLO papers and the } ``` -Thank you to Joseph Redmon and Ali Farhadi for developing the original YOLOv3. \ No newline at end of file +Thank you to Joseph Redmon and Ali Farhadi for developing the original YOLOv3. diff --git a/docs/models/yolov4.md b/docs/models/yolov4.md index ce78cf8..60d4527 100644 --- a/docs/models/yolov4.md +++ b/docs/models/yolov4.md @@ -55,7 +55,7 @@ We would like to acknowledge the YOLOv4 authors for their significant contributi ```bibtex @misc{bochkovskiy2020yolov4, - title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, + title={YOLOv4: Optimal Speed and Accuracy of Object Detection}, author={Alexey Bochkovskiy and Chien-Yao Wang and Hong-Yuan Mark Liao}, year={2020}, eprint={2004.10934}, @@ -64,4 +64,4 @@ We would like to acknowledge the YOLOv4 authors for their significant contributi } ``` -The original YOLOv4 paper can be found on [arXiv](https://arxiv.org/pdf/2004.10934.pdf). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/AlexeyAB/darknet). We appreciate their efforts in advancing the field and making their work accessible to the broader community. \ No newline at end of file +The original YOLOv4 paper can be found on [arXiv](https://arxiv.org/pdf/2004.10934.pdf). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/AlexeyAB/darknet). We appreciate their efforts in advancing the field and making their work accessible to the broader community. diff --git a/docs/models/yolov5.md b/docs/models/yolov5.md index 231dad7..5ade9f4 100644 --- a/docs/models/yolov5.md +++ b/docs/models/yolov5.md @@ -86,4 +86,4 @@ If you use YOLOv5 or YOLOv5u in your research, please cite the Ultralytics YOLOv } ``` -Special thanks to Glenn Jocher and the Ultralytics team for their work on developing and maintaining the YOLOv5 and YOLOv5u models. \ No newline at end of file +Special thanks to Glenn Jocher and the Ultralytics team for their work on developing and maintaining the YOLOv5 and YOLOv5u models. diff --git a/docs/models/yolov6.md b/docs/models/yolov6.md index 1012c13..2f13a80 100644 --- a/docs/models/yolov6.md +++ b/docs/models/yolov6.md @@ -70,7 +70,7 @@ We would like to acknowledge the authors for their significant contributions in ```bibtex @misc{li2023yolov6, - title={YOLOv6 v3.0: A Full-Scale Reloading}, + title={YOLOv6 v3.0: A Full-Scale Reloading}, author={Chuyi Li and Lulu Li and Yifei Geng and Hongliang Jiang and Meng Cheng and Bo Zhang and Zaidan Ke and Xiaoming Xu and Xiangxiang Chu}, year={2023}, eprint={2301.05586}, @@ -79,4 +79,4 @@ We would like to acknowledge the authors for their significant contributions in } ``` -The original YOLOv6 paper can be found on [arXiv](https://arxiv.org/abs/2301.05586). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/meituan/YOLOv6). We appreciate their efforts in advancing the field and making their work accessible to the broader community. \ No newline at end of file +The original YOLOv6 paper can be found on [arXiv](https://arxiv.org/abs/2301.05586). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/meituan/YOLOv6). We appreciate their efforts in advancing the field and making their work accessible to the broader community. diff --git a/docs/models/yolov7.md b/docs/models/yolov7.md index 20ae129..1350bbb 100644 --- a/docs/models/yolov7.md +++ b/docs/models/yolov7.md @@ -58,4 +58,4 @@ We would like to acknowledge the YOLOv7 authors for their significant contributi } ``` -The original YOLOv7 paper can be found on [arXiv](https://arxiv.org/pdf/2207.02696.pdf). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/WongKinYiu/yolov7). We appreciate their efforts in advancing the field and making their work accessible to the broader community. \ No newline at end of file +The original YOLOv7 paper can be found on [arXiv](https://arxiv.org/pdf/2207.02696.pdf). The authors have made their work publicly available, and the codebase can be accessed on [GitHub](https://github.com/WongKinYiu/yolov7). We appreciate their efforts in advancing the field and making their work accessible to the broader community. diff --git a/docs/models/yolov8.md b/docs/models/yolov8.md index 02225c5..240be83 100644 --- a/docs/models/yolov8.md +++ b/docs/models/yolov8.md @@ -112,4 +112,4 @@ If you use the YOLOv8 model or any other software from this repository in your w } ``` -Please note that the DOI is pending and will be added to the citation once it is available. The usage of the software is in accordance with the AGPL-3.0 license. \ No newline at end of file +Please note that the DOI is pending and will be added to the citation once it is available. The usage of the software is in accordance with the AGPL-3.0 license. diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md index b4fc6b0..d8bab15 100644 --- a/docs/modes/benchmark.md +++ b/docs/modes/benchmark.md @@ -25,15 +25,15 @@ full list of export arguments. !!! example "" === "Python" - + ```python from ultralytics.utils.benchmarks import benchmark - + # Benchmark on GPU benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0) ``` === "CLI" - + ```bash yolo benchmark model=yolov8n.pt data='coco8.yaml' imgsz=640 half=False device=0 ``` diff --git a/docs/modes/export.md b/docs/modes/export.md index 9f76d36..3e83c9e 100644 --- a/docs/modes/export.md +++ b/docs/modes/export.md @@ -23,19 +23,19 @@ export arguments. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom trained - + # Export the model model.export(format='onnx') ``` === "CLI" - + ```bash yolo export model=yolov8n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model @@ -85,4 +85,4 @@ i.e. `format='onnx'` or `format='engine'`. | [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` | | [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz` | | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` | -| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` | \ No newline at end of file +| [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` | diff --git a/docs/modes/index.md b/docs/modes/index.md index 9cfe6dc..1d0e7c4 100644 --- a/docs/modes/index.md +++ b/docs/modes/index.md @@ -65,4 +65,4 @@ or `accuracy_top5` metrics (for classification), and the inference time in milli formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy. -[Benchmark Examples](benchmark.md){ .md-button .md-button--primary} \ No newline at end of file +[Benchmark Examples](benchmark.md){ .md-button .md-button--primary} diff --git a/docs/modes/predict.md b/docs/modes/predict.md index 02117c3..bd3d55c 100644 --- a/docs/modes/predict.md +++ b/docs/modes/predict.md @@ -21,7 +21,7 @@ passing `stream=True` in the predictor's call method. # Run batched inference on a list of images results = model(['im1.jpg', 'im2.jpg']) # return a list of Results objects - + # Process results list for result in results: boxes = result.boxes # Boxes object for bbox outputs @@ -39,7 +39,7 @@ passing `stream=True` in the predictor's call method. # Run batched inference on a list of images results = model(['im1.jpg', 'im2.jpg'], stream=True) # return a generator of Results objects - + # Process results generator for result in results: boxes = result.boxes # Boxes object for bbox outputs @@ -65,7 +65,7 @@ YOLOv8 can process different types of input sources for inference, as shown in t | OpenCV | `cv2.imread('im.jpg')` | `np.ndarray` of `uint8 (0-255)` | HWC format with BGR channels. | | numpy | `np.zeros((640,1280,3))` | `np.ndarray` of `uint8 (0-255)` | HWC format with BGR channels. | | torch | `torch.zeros(16,3,320,640)` | `torch.Tensor` of `float32 (0.0-1.0)` | BCHW format with RGB channels. | -| CSV | `'sources.csv'` | `str` or `Path` | CSV file containing paths to images, videos, or directories. | +| CSV | `'sources.csv'` | `str` or `Path` | CSV file containing paths to images, videos, or directories. | | video ✅ | `'video.mp4'` | `str` or `Path` | Video file in formats like MP4, AVI, etc. | | directory ✅ | `'path/'` | `str` or `Path` | Path to a directory containing images or videos. | | glob ✅ | `'path/*.jpg'` | `str` | Glob pattern to match multiple files. Use the `*` character as a wildcard. | @@ -77,204 +77,204 @@ Below are code examples for using each source type: !!! example "Prediction sources" === "image" - Run inference on an image file. + Run inference on an image file. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define path to the image file source = 'path/to/image.jpg' - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "screenshot" Run inference on the current screen content as a screenshot. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define current screenshot as source source = 'screen' - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "URL" Run inference on an image or video hosted remotely via URL. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define remote image or video URL source = 'https://ultralytics.com/images/bus.jpg' - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "PIL" Run inference on an image opened with Python Imaging Library (PIL). ```python from PIL import Image from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Open an image using PIL source = Image.open('path/to/image.jpg') - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "OpenCV" Run inference on an image read with OpenCV. ```python import cv2 from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Read an image using OpenCV source = cv2.imread('path/to/image.jpg') - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "numpy" Run inference on an image represented as a numpy array. ```python import numpy as np from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Create a random numpy array of HWC shape (640, 640, 3) with values in range [0, 255] and type uint8 source = np.random.randint(low=0, high=255, size=(640, 640, 3), dtype='uint8') - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "torch" Run inference on an image represented as a PyTorch tensor. ```python import torch from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Create a random torch tensor of BCHW shape (1, 3, 640, 640) with values in range [0, 1] and type float32 source = torch.rand(1, 3, 640, 640, dtype=torch.float32) - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "CSV" Run inference on a collection of images, URLs, videos and directories listed in a CSV file. ```python import torch from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define a path to a CSV file with images, URLs, videos and directories source = 'path/to/file.csv' - + # Run inference on the source results = model(source) # list of Results objects ``` - + === "video" Run inference on a video file. By using `stream=True`, you can create a generator of Results objects to reduce memory usage. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define path to video file source = 'path/to/video.mp4' - + # Run inference on the source results = model(source, stream=True) # generator of Results objects ``` - + === "directory" Run inference on all images and videos in a directory. To also capture images and videos in subdirectories use a glob pattern, i.e. `path/to/dir/**/*`. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define path to directory containing images and videos for inference source = 'path/to/dir' - + # Run inference on the source results = model(source, stream=True) # generator of Results objects ``` - + === "glob" Run inference on all images and videos that match a glob expression with `*` characters. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define a glob search for all JPG files in a directory source = 'path/to/dir/*.jpg' - + # OR define a recursive glob search for all JPG files including subdirectories source = 'path/to/dir/**/*.jpg' - + # Run inference on the source results = model(source, stream=True) # generator of Results objects ``` - + === "YouTube" Run inference on a YouTube video. By using `stream=True`, you can create a generator of Results objects to reduce memory usage for long videos. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define source as YouTube video URL source = 'https://youtu.be/Zgi9g1ksQHc' - + # Run inference on the source results = model(source, stream=True) # generator of Results objects ``` - + === "Stream" Run inference on remote streaming sources using RTSP, RTMP, and IP address protocols. ```python from ultralytics import YOLO - + # Load a pretrained YOLOv8n model model = YOLO('yolov8n.pt') - + # Define source as RTSP, RTMP or IP streaming address source = 'rtsp://example.com/media.mp4' - + # Run inference on the source results = model(source, stream=True) # generator of Results objects ``` @@ -417,7 +417,7 @@ operations are cached, meaning they're only calculated once per object, and thos masks = results[0].masks # Masks object masks.xy # x, y segments (pixels), List[segment] * N masks.xyn # x, y segments (normalized), List[segment] * N - masks.data # raw masks tensor, (N, H, W) or masks.masks + masks.data # raw masks tensor, (N, H, W) or masks.masks ``` ### Keypoints @@ -432,7 +432,7 @@ operations are cached, meaning they're only calculated once per object, and thos keypoints.xy # x, y keypoints (pixels), (num_dets, num_kpts, 2/3), the last dimension can be 2 or 3, depends the model. keypoints.xyn # x, y keypoints (normalized), (num_dets, num_kpts, 2/3) keypoints.conf # confidence score(num_dets, num_kpts) of each keypoint if the last dimension is 3. - keypoints.data # raw keypoints tensor, (num_dets, num_kpts, 2/3) + keypoints.data # raw keypoints tensor, (num_dets, num_kpts, 2/3) ``` ### probs @@ -448,7 +448,7 @@ operations are cached, meaning they're only calculated once per object, and thos probs.top1 # The top1 indices of classification, a value with Int type. probs.top5conf # The top5 scores of classification, a tensor with shape (5, ). probs.top1conf # The top1 scores of classification. a value with torch.tensor type. - keypoints.data # raw probs tensor, (num_class, ) + keypoints.data # raw probs tensor, (num_class, ) ``` Class reference documentation for `Results` module and its components can be found [here](../reference/engine/results.md) @@ -489,37 +489,37 @@ Here's a Python script using OpenCV (cv2) and YOLOv8 to run inference on video f ```python import cv2 from ultralytics import YOLO - + # Load the YOLOv8 model model = YOLO('yolov8n.pt') - + # Open the video file video_path = "path/to/your/video/file.mp4" cap = cv2.VideoCapture(video_path) - + # Loop through the video frames while cap.isOpened(): # Read a frame from the video success, frame = cap.read() - + if success: # Run YOLOv8 inference on the frame results = model(frame) - + # Visualize the results on the frame annotated_frame = results[0].plot() - + # Display the annotated frame cv2.imshow("YOLOv8 Inference", annotated_frame) - + # Break the loop if 'q' is pressed if cv2.waitKey(1) & 0xFF == ord("q"): break else: # Break the loop if the end of the video is reached break - + # Release the video capture object and close the display window cap.release() cv2.destroyAllWindows() - ``` \ No newline at end of file + ``` diff --git a/docs/modes/track.md b/docs/modes/track.md index 9c50d85..bc99826 100644 --- a/docs/modes/track.md +++ b/docs/modes/track.md @@ -27,21 +27,21 @@ Use a trained YOLOv8n/YOLOv8n-seg model to run tracker on video streams. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official detection model model = YOLO('yolov8n-seg.pt') # load an official segmentation model model = YOLO('path/to/best.pt') # load a custom model - + # Track with the model - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") ``` === "CLI" - + ```bash yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" # official detection model yolo track model=yolov8n-seg.pt source=... # official segmentation model @@ -62,15 +62,15 @@ to [predict page](https://docs.ultralytics.com/modes/predict/). !!! example "" === "Python" - + ```python from ultralytics import YOLO - + model = YOLO('yolov8n.pt') - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) ``` === "CLI" - + ```bash yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show @@ -84,18 +84,18 @@ any configurations(expect the `tracker_type`) you need to. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + model = YOLO('yolov8n.pt') - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml') + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml') ``` === "CLI" - + ```bash yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" tracker='custom_tracker.yaml' ``` Please refer to [ultralytics/cfg/trackers](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/trackers) -page \ No newline at end of file +page diff --git a/docs/modes/train.md b/docs/modes/train.md index 89b1b9c..d853495 100644 --- a/docs/modes/train.md +++ b/docs/modes/train.md @@ -21,20 +21,20 @@ Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. See Argum Device is determined automatically. If a GPU is available then it will be used, otherwise training will start on CPU. === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.yaml') # build a new model from YAML model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights - + # Train the model model.train(data='coco128.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Build a new model from YAML and start training from scratch yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 @@ -53,18 +53,18 @@ The training device can be specified using the `device` argument. If no argument !!! example "Multi-GPU Training Example" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model with 2 GPUs model.train(data='coco128.yaml', epochs=100, imgsz=640, device=[0, 1]) ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model using GPUs 0 and 1 yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 device=0,1 @@ -79,18 +79,18 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de !!! example "MPS Training Example" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) - + # Train the model with 2 GPUs model.train(data='coco128.yaml', epochs=100, imgsz=640, device='mps') ``` === "CLI" - + ```bash # Start training from a pretrained *.pt model using GPUs 0 and 1 yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 device=mps @@ -111,18 +111,18 @@ Below is an example of how to resume an interrupted training using Python and vi !!! example "Resume Training Example" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('path/to/last.pt') # load a partially trained model - + # Resume training model.train(resume=True) ``` === "CLI" - + ```bash # Resume an interrupted training yolo train resume model=path/to/last.pt @@ -239,4 +239,4 @@ tensorboard --logdir ultralytics/runs # replace with 'runs' directory This will load TensorBoard and direct it to the directory where your training logs are saved. -After setting up your logger, you can then proceed with your model training. All training metrics will be automatically logged in your chosen platform, and you can access these logs to monitor your model's performance over time, compare different models, and identify areas for improvement. \ No newline at end of file +After setting up your logger, you can then proceed with your model training. All training metrics will be automatically logged in your chosen platform, and you can access these logs to monitor your model's performance over time, compare different models, and identify areas for improvement. diff --git a/docs/modes/val.md b/docs/modes/val.md index 8ec6fc5..cd8fd5f 100644 --- a/docs/modes/val.md +++ b/docs/modes/val.md @@ -19,14 +19,14 @@ Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Validate the model metrics = model.val() # no arguments needed, dataset and settings remembered metrics.box.map # map50-95 @@ -35,7 +35,7 @@ Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need metrics.box.maps # a list contains map50-95 of each category ``` === "CLI" - + ```bash yolo detect val model=yolov8n.pt # val official model yolo detect val model=path/to/best.pt # val custom model @@ -61,4 +61,4 @@ Validation settings for YOLO models refer to the various hyperparameters and con | `plots` | `False` | show plots during training | | `rect` | `False` | rectangular val with each batch collated for minimum padding | | `split` | `val` | dataset split to use for validation, i.e. 'val', 'test' or 'train' | -| \ No newline at end of file +| diff --git a/docs/quickstart.md b/docs/quickstart.md index a910983..bc50c32 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -19,7 +19,7 @@ Ultralytics provides various installation methods including pip, conda, and Dock # Install the ultralytics package using pip pip install ultralytics ``` - + === "Conda install" Conda is an alternative package manager to pip which may also be used for installation. Visit Anaconda for more details at [https://anaconda.org/conda-forge/ultralytics](https://anaconda.org/conda-forge/ultralytics). Ultralytics feedstock repository for updating the conda package is at [https://github.com/conda-forge/ultralytics-feedstock/](https://github.com/conda-forge/ultralytics-feedstock/). @@ -30,16 +30,16 @@ Ultralytics provides various installation methods including pip, conda, and Dock # Install the ultralytics package using conda conda install ultralytics ``` - + === "Git clone" Clone the `ultralytics` repository if you are interested in contributing to the development or wish to experiment with the latest source code. After cloning, navigate into the directory and install the package in editable mode `-e` using pip. ```bash # Clone the ultralytics repository git clone https://github.com/ultralytics/ultralytics - + # Navigate to the cloned directory cd ultralytics - + # Install the package in editable mode for development pip install -e . ``` @@ -48,27 +48,27 @@ Ultralytics provides various installation methods including pip, conda, and Dock Utilize Docker to execute the `ultralytics` package in an isolated container. By employing the official `ultralytics` image from [Docker Hub](https://hub.docker.com/r/ultralytics/ultralytics), you can avoid local installation. Below are the commands to get the latest image and execute it: Docker Pulls - + ```bash # Set image name as a variable t=ultralytics/ultralytics:latest - + # Pull the latest ultralytics image from Docker Hub sudo docker pull $t - + # Run the ultralytics image in a container with GPU support sudo docker run -it --ipc=host --gpus all $t ``` - + The above command initializes a Docker container with the latest `ultralytics` image. The `-it` flag assigns a pseudo-TTY and maintains stdin open, enabling you to interact with the container. The `--ipc=host` flag sets the IPC (Inter-Process Communication) namespace to the host, which is essential for sharing memory between processes. The `--gpus all` flag enables access to all available GPUs inside the container, which is crucial for tasks that require GPU computation. - + Note: To work with files on your local machine within the container, use Docker volumes for mounting a local directory into the container: - + ```bash # Mount local directory to a directory inside the container sudo docker run -it --ipc=host --gpus all -v /path/on/host:/path/in/container $t ``` - + Alter `/path/on/host` with the directory path on your local machine, and `/path/in/container` with the desired path inside the Docker container for accessibility. See the `ultralytics` [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) file for a list of dependencies. Note that all examples above install all required dependencies. @@ -160,24 +160,111 @@ For example, users can load a model, train it, evaluate its performance on a val ```python from ultralytics import YOLO - + # Create a new YOLO model from scratch model = YOLO('yolov8n.yaml') - + # Load a pretrained YOLO model (recommended for training) model = YOLO('yolov8n.pt') - + # Train the model using the 'coco128.yaml' dataset for 3 epochs results = model.train(data='coco128.yaml', epochs=3) - + # Evaluate the model's performance on the validation set results = model.val() - + # Perform object detection on an image using the model results = model('https://ultralytics.com/images/bus.jpg') - + # Export the model to ONNX format success = model.export(format='onnx') ``` -[Python Guide](usage/python.md){.md-button .md-button--primary} \ No newline at end of file +[Python Guide](usage/python.md){.md-button .md-button--primary} + +## Ultralytics Settings + +The Ultralytics library provides a powerful settings management system to enable fine-grained control over your experiments. By making use of the `SettingsManager` housed within the `ultralytics.utils` module, users can readily access and alter their settings. These are stored in a YAML file and can be viewed or modified either directly within the Python environment or via the Command-Line Interface (CLI). + +### Inspecting Settings + +To gain insight into the current configuration of your settings, you can view them directly: + +!!! example "View settings" + + === "Python" + You can use Python to view your settings. Start by importing the `settings` object from the `ultralytics` module. Print and return settings using the following commands: + ```python + from ultralytics import settings + + # View all settings + print(settings) + + # Return a specific setting + value = settings['runs_dir'] + ``` + + === "CLI" + Alternatively, the command-line interface allows you to check your settings with a simple command: + ```bash + yolo settings + ``` + +### Modifying Settings + +Ultralytics allows users to easily modify their settings. Changes can be performed in the following ways: + +!!! example "Update settings" + + === "Python" + Within the Python environment, call the `update` method on the `settings` object to change your settings: + ```python + from ultralytics import settings + + # Update a setting + settings.update({'runs_dir': '/path/to/runs'}) + + # Update multiple settings + settings.update({'runs_dir': '/path/to/runs', 'tensorboard': False}) + + # Reset settings to default values + settings.reset() + ``` + + === "CLI" + If you prefer using the command-line interface, the following command will allow you to modify your settings: + ```bash + # Update a setting + yolo settings runs_dir='/path/to/runs' + + # Update multiple settings + yolo settings runs_dir='/path/to/runs' tensorboard=False + + # Reset settings to default values + yolo settings reset + ``` + +### Understanding Settings + +The table below provides an overview of the settings available for adjustment within Ultralytics. Each setting is outlined along with an example value, the data type, and a brief description. + +| Name | Example Value | Data Type | Description | +|--------------------|-----------------------|-----------|------------------------------------------------------------------------------------------------------------------| +| `settings_version` | `'0.0.4'` | `str` | Ultralytics _settings_ version (different from Ultralytics [pip](https://pypi.org/project/ultralytics/) version) | +| `datasets_dir` | `'/path/to/datasets'` | `str` | The directory where the datasets are stored | +| `weights_dir` | `'/path/to/weights'` | `str` | The directory where the model weights are stored | +| `runs_dir` | `'/path/to/runs'` | `str` | The directory where the experiment runs are stored | +| `uuid` | `'a1b2c3d4'` | `str` | The unique identifier for the current settings | +| `sync` | `True` | `bool` | Whether to sync analytics and crashes to HUB | +| `api_key` | `''` | `str` | Ultralytics HUB [API Key](https://hub.ultralytics.com/settings?tab=api+keys) | +| `clearml` | `True` | `bool` | Whether to use ClearML logging | +| `comet` | `True` | `bool` | Whether to use [Comet ML](https://bit.ly/yolov8-readme-comet) for experiment tracking and visualization | +| `dvc` | `True` | `bool` | Whether to use DVC for version control | +| `hub` | `True` | `bool` | Whether to use [Ultralytics HUB](https://hub.ultralytics.com) integration | +| `mlflow` | `True` | `bool` | Whether to use MLFlow for experiment tracking | +| `neptune` | `True` | `bool` | Whether to use Neptune for experiment tracking | +| `raytune` | `True` | `bool` | Whether to use Ray Tune for hyperparameter tuning | +| `tensorboard` | `True` | `bool` | Whether to use TensorBoard for visualization | +| `wandb` | `True` | `bool` | Whether to use Weights & Biases logging | + +As you navigate through your projects or experiments, be sure to revisit these settings to ensure that they are optimally configured for your needs. diff --git a/docs/reference/cfg/__init__.md b/docs/reference/cfg/__init__.md index e400d20..e96d0b0 100644 --- a/docs/reference/cfg/__init__.md +++ b/docs/reference/cfg/__init__.md @@ -18,9 +18,9 @@ keywords: Ultralytics, YOLO, Configuration, cfg2dict, handle_deprecation, merge_ ### ::: ultralytics.cfg._handle_deprecation

-## check_cfg_mismatch +## check_dict_alignment --- -### ::: ultralytics.cfg.check_cfg_mismatch +### ::: ultralytics.cfg.check_dict_alignment

## merge_equals_args @@ -38,6 +38,16 @@ keywords: Ultralytics, YOLO, Configuration, cfg2dict, handle_deprecation, merge_ ### ::: ultralytics.cfg.handle_yolo_settings

+## parse_key_value_pair +--- +### ::: ultralytics.cfg.parse_key_value_pair +

+ +## smart_value +--- +### ::: ultralytics.cfg.smart_value +

+ ## entrypoint --- ### ::: ultralytics.cfg.entrypoint @@ -46,4 +56,4 @@ keywords: Ultralytics, YOLO, Configuration, cfg2dict, handle_deprecation, merge_ ## copy_default_cfg --- ### ::: ultralytics.cfg.copy_default_cfg -

\ No newline at end of file +

diff --git a/docs/reference/data/annotator.md b/docs/reference/data/annotator.md index 11fbd16..2cac116 100644 --- a/docs/reference/data/annotator.md +++ b/docs/reference/data/annotator.md @@ -6,4 +6,4 @@ keywords: Ultralytics, Auto-Annotate, Machine Learning, AI, Annotation, Data Pro ## auto_annotate --- ### ::: ultralytics.data.annotator.auto_annotate -

\ No newline at end of file +

diff --git a/docs/reference/data/augment.md b/docs/reference/data/augment.md index c6c6498..b59b226 100644 --- a/docs/reference/data/augment.md +++ b/docs/reference/data/augment.md @@ -96,4 +96,4 @@ keywords: Ultralytics, Data Augmentation, BaseTransform, MixUp, RandomHSV, Lette ## classify_albumentations --- ### ::: ultralytics.data.augment.classify_albumentations -

\ No newline at end of file +

diff --git a/docs/reference/data/base.md b/docs/reference/data/base.md index 7336a7a..ff786b0 100644 --- a/docs/reference/data/base.md +++ b/docs/reference/data/base.md @@ -6,4 +6,4 @@ keywords: Ultralytics, docs, BaseDataset, data manipulation, dataset creation ## BaseDataset --- ### ::: ultralytics.data.base.BaseDataset -

\ No newline at end of file +

diff --git a/docs/reference/data/build.md b/docs/reference/data/build.md index 705972a..a64fa81 100644 --- a/docs/reference/data/build.md +++ b/docs/reference/data/build.md @@ -36,4 +36,4 @@ keywords: Ultralytics, YOLO v3, Data build, DataLoader, InfiniteDataLoader, seed ## load_inference_source --- ### ::: ultralytics.data.build.load_inference_source -

\ No newline at end of file +

diff --git a/docs/reference/data/converter.md b/docs/reference/data/converter.md index 9fbefec..4600cff 100644 --- a/docs/reference/data/converter.md +++ b/docs/reference/data/converter.md @@ -31,4 +31,4 @@ keywords: Ultralytics, Data Converter, coco91_to_coco80_class, merge_multi_segme ## delete_dsstore --- ### ::: ultralytics.data.converter.delete_dsstore -

\ No newline at end of file +

diff --git a/docs/reference/data/dataset.md b/docs/reference/data/dataset.md index 015c934..fe94f61 100644 --- a/docs/reference/data/dataset.md +++ b/docs/reference/data/dataset.md @@ -16,4 +16,4 @@ keywords: Ultralytics, YOLO, YOLODataset, SemanticDataset, data handling, data m ## SemanticDataset --- ### ::: ultralytics.data.dataset.SemanticDataset -

\ No newline at end of file +

diff --git a/docs/reference/data/loaders.md b/docs/reference/data/loaders.md index e257bc1..2de372e 100644 --- a/docs/reference/data/loaders.md +++ b/docs/reference/data/loaders.md @@ -41,4 +41,4 @@ keywords: Ultralytics, data loaders, LoadStreams, LoadImages, LoadTensor, YOLO, ## get_best_youtube_url --- ### ::: ultralytics.data.loaders.get_best_youtube_url -

\ No newline at end of file +

diff --git a/docs/reference/data/utils.md b/docs/reference/data/utils.md index 99a8e76..fbfb97f 100644 --- a/docs/reference/data/utils.md +++ b/docs/reference/data/utils.md @@ -71,4 +71,4 @@ keywords: Ultralytics, data utils, YOLO, img2label_paths, exif_size, polygon2mas ## autosplit --- ### ::: ultralytics.data.utils.autosplit -

\ No newline at end of file +

diff --git a/docs/reference/engine/exporter.md b/docs/reference/engine/exporter.md index 7630669..04ffead 100644 --- a/docs/reference/engine/exporter.md +++ b/docs/reference/engine/exporter.md @@ -31,4 +31,4 @@ keywords: Ultralytics, Exporter, iOSDetectModel, Export Formats, Try export ## export --- ### ::: ultralytics.engine.exporter.export -

\ No newline at end of file +

diff --git a/docs/reference/engine/model.md b/docs/reference/engine/model.md index c86ba2e..6ce7bc3 100644 --- a/docs/reference/engine/model.md +++ b/docs/reference/engine/model.md @@ -6,4 +6,4 @@ keywords: Ultralytics, YOLO, engine model, documentation, guide, implementation, ## YOLO --- ### ::: ultralytics.engine.model.YOLO -

\ No newline at end of file +

diff --git a/docs/reference/engine/predictor.md b/docs/reference/engine/predictor.md index d474060..96a2fff 100644 --- a/docs/reference/engine/predictor.md +++ b/docs/reference/engine/predictor.md @@ -6,4 +6,4 @@ keywords: Ultralytics, BasePredictor, YOLO, prediction, engine ## BasePredictor --- ### ::: ultralytics.engine.predictor.BasePredictor -

\ No newline at end of file +

diff --git a/docs/reference/engine/results.md b/docs/reference/engine/results.md index 9348338..c997279 100644 --- a/docs/reference/engine/results.md +++ b/docs/reference/engine/results.md @@ -31,4 +31,4 @@ keywords: Ultralytics, engine, results, base tensor, boxes, keypoints ## Probs --- ### ::: ultralytics.engine.results.Probs -

\ No newline at end of file +

diff --git a/docs/reference/engine/trainer.md b/docs/reference/engine/trainer.md index 89cb689..5f272ac 100644 --- a/docs/reference/engine/trainer.md +++ b/docs/reference/engine/trainer.md @@ -6,4 +6,4 @@ keywords: Ultralytics, BaseTrainer, Machine Learning, Training Control, Python l ## BaseTrainer --- ### ::: ultralytics.engine.trainer.BaseTrainer -

\ No newline at end of file +

diff --git a/docs/reference/engine/validator.md b/docs/reference/engine/validator.md index 5dce001..036b554 100644 --- a/docs/reference/engine/validator.md +++ b/docs/reference/engine/validator.md @@ -6,4 +6,4 @@ keywords: Ultralytics, BaseValidator, Ultralytics engine, module, components ## BaseValidator --- ### ::: ultralytics.engine.validator.BaseValidator -

\ No newline at end of file +

diff --git a/docs/reference/hub/__init__.md b/docs/reference/hub/__init__.md index 0dc1c3c..c7ef3b7 100644 --- a/docs/reference/hub/__init__.md +++ b/docs/reference/hub/__init__.md @@ -41,4 +41,4 @@ keywords: Ultralytics, hub functions, model export, dataset check, reset model, ## check_dataset --- ### ::: ultralytics.hub.check_dataset -

\ No newline at end of file +

diff --git a/docs/reference/hub/auth.md b/docs/reference/hub/auth.md index 95375e7..d3fdabe 100644 --- a/docs/reference/hub/auth.md +++ b/docs/reference/hub/auth.md @@ -6,4 +6,4 @@ keywords: Ultralytics, Auth, API documentation, User Authentication, AI, Machine ## Auth --- ### ::: ultralytics.hub.auth.Auth -

\ No newline at end of file +

diff --git a/docs/reference/hub/session.md b/docs/reference/hub/session.md index 9982a03..6c348bb 100644 --- a/docs/reference/hub/session.md +++ b/docs/reference/hub/session.md @@ -6,4 +6,4 @@ keywords: Ultralytics, HUBTrainingSession, Documentation, Model Training, AI, Ma ## HUBTrainingSession --- ### ::: ultralytics.hub.session.HUBTrainingSession -

\ No newline at end of file +

diff --git a/docs/reference/hub/utils.md b/docs/reference/hub/utils.md index c876ab2..219e309 100644 --- a/docs/reference/hub/utils.md +++ b/docs/reference/hub/utils.md @@ -21,4 +21,4 @@ keywords: Ultralytics, Events, request_with_credentials, smart_request, Ultralyt ## smart_request --- ### ::: ultralytics.hub.utils.smart_request -

\ No newline at end of file +

diff --git a/docs/reference/models/fastsam/model.md b/docs/reference/models/fastsam/model.md index 8c9b805..f23f8f9 100644 --- a/docs/reference/models/fastsam/model.md +++ b/docs/reference/models/fastsam/model.md @@ -6,4 +6,4 @@ keywords: Ultralytics, FastSAM model, Model documentation, Efficient model train ## FastSAM --- ### ::: ultralytics.models.fastsam.model.FastSAM -

\ No newline at end of file +

diff --git a/docs/reference/models/fastsam/predict.md b/docs/reference/models/fastsam/predict.md index 4f0faf8..6feb2ff 100644 --- a/docs/reference/models/fastsam/predict.md +++ b/docs/reference/models/fastsam/predict.md @@ -6,4 +6,4 @@ keywords: Ultralytics, FastSAMPredictor, predictive modeling, AI optimization, m ## FastSAMPredictor --- ### ::: ultralytics.models.fastsam.predict.FastSAMPredictor -

\ No newline at end of file +

diff --git a/docs/reference/models/fastsam/prompt.md b/docs/reference/models/fastsam/prompt.md index 6c116c7..e0efa09 100644 --- a/docs/reference/models/fastsam/prompt.md +++ b/docs/reference/models/fastsam/prompt.md @@ -6,4 +6,4 @@ keywords: Ultralytics, FastSAMPrompt, machine learning, model, guide, documentat ## FastSAMPrompt --- ### ::: ultralytics.models.fastsam.prompt.FastSAMPrompt -

\ No newline at end of file +

diff --git a/docs/reference/models/fastsam/utils.md b/docs/reference/models/fastsam/utils.md index d6eeee5..cc88ff1 100644 --- a/docs/reference/models/fastsam/utils.md +++ b/docs/reference/models/fastsam/utils.md @@ -11,4 +11,4 @@ keywords: Ultralytics, bounding boxes, Bboxes, image borders, object detection, ## bbox_iou --- ### ::: ultralytics.models.fastsam.utils.bbox_iou -

\ No newline at end of file +

diff --git a/docs/reference/models/fastsam/val.md b/docs/reference/models/fastsam/val.md index e28968d..899da72 100644 --- a/docs/reference/models/fastsam/val.md +++ b/docs/reference/models/fastsam/val.md @@ -6,4 +6,4 @@ keywords: Ultralytics, FastSAMValidator, model, synthetic, AI, machine learning, ## FastSAMValidator --- ### ::: ultralytics.models.fastsam.val.FastSAMValidator -

\ No newline at end of file +

diff --git a/docs/reference/models/nas/model.md b/docs/reference/models/nas/model.md index d4b8e2c..7a872f4 100644 --- a/docs/reference/models/nas/model.md +++ b/docs/reference/models/nas/model.md @@ -6,4 +6,4 @@ keywords: Ultralytics, NAS model, NAS guide, machine learning, model documentati ## NAS --- ### ::: ultralytics.models.nas.model.NAS -

\ No newline at end of file +

diff --git a/docs/reference/models/nas/predict.md b/docs/reference/models/nas/predict.md index 2828009..6d9732b 100644 --- a/docs/reference/models/nas/predict.md +++ b/docs/reference/models/nas/predict.md @@ -6,4 +6,4 @@ keywords: NASPredictor, Ultralytics, Ultralytics model, model architecture, effi ## NASPredictor --- ### ::: ultralytics.models.nas.predict.NASPredictor -

\ No newline at end of file +

diff --git a/docs/reference/models/nas/val.md b/docs/reference/models/nas/val.md index daf8c01..a005d07 100644 --- a/docs/reference/models/nas/val.md +++ b/docs/reference/models/nas/val.md @@ -6,4 +6,4 @@ keywords: Ultralytics, NASValidator, models.nas.val.NASValidator, AI models, all ## NASValidator --- ### ::: ultralytics.models.nas.val.NASValidator -

\ No newline at end of file +

diff --git a/docs/reference/models/rtdetr/model.md b/docs/reference/models/rtdetr/model.md index 6495901..d9f5157 100644 --- a/docs/reference/models/rtdetr/model.md +++ b/docs/reference/models/rtdetr/model.md @@ -6,4 +6,4 @@ keywords: Ultralytics, RTDETR model, Ultralytics models, object detection, Ultra ## RTDETR --- ### ::: ultralytics.models.rtdetr.model.RTDETR -

\ No newline at end of file +

diff --git a/docs/reference/models/rtdetr/predict.md b/docs/reference/models/rtdetr/predict.md index b353eef..65ceb74 100644 --- a/docs/reference/models/rtdetr/predict.md +++ b/docs/reference/models/rtdetr/predict.md @@ -6,4 +6,4 @@ keywords: Ultralytics, RTDETRPredictor, model documentation, guide, real-time ob ## RTDETRPredictor --- ### ::: ultralytics.models.rtdetr.predict.RTDETRPredictor -

\ No newline at end of file +

diff --git a/docs/reference/models/rtdetr/train.md b/docs/reference/models/rtdetr/train.md index 4826c33..02540f1 100644 --- a/docs/reference/models/rtdetr/train.md +++ b/docs/reference/models/rtdetr/train.md @@ -11,4 +11,4 @@ keywords: Ultralytics, RTDETRTrainer, model training, Ultralytics models, PyTorc ## train --- ### ::: ultralytics.models.rtdetr.train.train -

\ No newline at end of file +

diff --git a/docs/reference/models/rtdetr/val.md b/docs/reference/models/rtdetr/val.md index dab038e..489efe2 100644 --- a/docs/reference/models/rtdetr/val.md +++ b/docs/reference/models/rtdetr/val.md @@ -11,4 +11,4 @@ keywords: Ultralytics, RTDETRDataset, RTDETRValidator, real-time object detectio ## RTDETRValidator --- ### ::: ultralytics.models.rtdetr.val.RTDETRValidator -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/amg.md b/docs/reference/models/sam/amg.md index aa10d3a..844020d 100644 --- a/docs/reference/models/sam/amg.md +++ b/docs/reference/models/sam/amg.md @@ -86,4 +86,4 @@ keywords: Ultralytics, Mask Data, Transformation, Encoding, RLE encoding, Image ## batched_mask_to_box --- ### ::: ultralytics.models.sam.amg.batched_mask_to_box -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/build.md b/docs/reference/models/sam/build.md index e6f9b03..c5a8035 100644 --- a/docs/reference/models/sam/build.md +++ b/docs/reference/models/sam/build.md @@ -31,4 +31,4 @@ keywords: Ultralytics, SAM, build sam, vision transformer, vits, build_sam_vit_l ## build_sam --- ### ::: ultralytics.models.sam.build.build_sam -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/model.md b/docs/reference/models/sam/model.md index 22d9771..3bbc85e 100644 --- a/docs/reference/models/sam/model.md +++ b/docs/reference/models/sam/model.md @@ -6,4 +6,4 @@ keywords: Ultralytics, YOLO, SAM Model, Documentations, Machine Learning, AI, Co ## SAM --- ### ::: ultralytics.models.sam.model.SAM -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/modules/decoders.md b/docs/reference/models/sam/modules/decoders.md index b256cd3..764e154 100644 --- a/docs/reference/models/sam/modules/decoders.md +++ b/docs/reference/models/sam/modules/decoders.md @@ -11,4 +11,4 @@ keywords: Ultralytics, MaskDecoder, SAM modules, decoders, MLP, YOLO, machine le ## MLP --- ### ::: ultralytics.models.sam.modules.decoders.MLP -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/modules/encoders.md b/docs/reference/models/sam/modules/encoders.md index 9127c69..b57b0e7 100644 --- a/docs/reference/models/sam/modules/encoders.md +++ b/docs/reference/models/sam/modules/encoders.md @@ -51,4 +51,4 @@ keywords: Ultralytics, Encoders, Modules, Documentation, ImageEncoderViT, Positi ## add_decomposed_rel_pos --- ### ::: ultralytics.models.sam.modules.encoders.add_decomposed_rel_pos -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/modules/sam.md b/docs/reference/models/sam/modules/sam.md index 719e5ed..353aaeb 100644 --- a/docs/reference/models/sam/modules/sam.md +++ b/docs/reference/models/sam/modules/sam.md @@ -6,4 +6,4 @@ keywords: Ultralytics, Sam module, deep learning, model training, Ultralytics do ## Sam --- ### ::: ultralytics.models.sam.modules.sam.Sam -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/modules/tiny_encoder.md b/docs/reference/models/sam/modules/tiny_encoder.md index c0a45f5..0225b67 100644 --- a/docs/reference/models/sam/modules/tiny_encoder.md +++ b/docs/reference/models/sam/modules/tiny_encoder.md @@ -56,4 +56,4 @@ keywords: Ultralytics, Tiny Encoder, Conv2d_BN, MBConv, ConvLayer, Attention, Ba ## TinyViT --- ### ::: ultralytics.models.sam.modules.tiny_encoder.TinyViT -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/modules/transformer.md b/docs/reference/models/sam/modules/transformer.md index 71f5262..62a0b1c 100644 --- a/docs/reference/models/sam/modules/transformer.md +++ b/docs/reference/models/sam/modules/transformer.md @@ -16,4 +16,4 @@ keywords: Ultralytics, TwoWayTransformer, Attention, AI models, transformers ## Attention --- ### ::: ultralytics.models.sam.modules.transformer.Attention -

\ No newline at end of file +

diff --git a/docs/reference/models/sam/predict.md b/docs/reference/models/sam/predict.md index ed9e190..2722f39 100644 --- a/docs/reference/models/sam/predict.md +++ b/docs/reference/models/sam/predict.md @@ -6,4 +6,4 @@ keywords: Ultralytics, predictor, models, sam.predict.Predictor, AI, machine lea ## Predictor --- ### ::: ultralytics.models.sam.predict.Predictor -

\ No newline at end of file +

diff --git a/docs/reference/models/utils/loss.md b/docs/reference/models/utils/loss.md index c3e51a4..41c8f39 100644 --- a/docs/reference/models/utils/loss.md +++ b/docs/reference/models/utils/loss.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, Documentation, DETRLoss, Detection Loss, Loss funct ## RTDETRDetectionLoss --- ### ::: ultralytics.models.utils.loss.RTDETRDetectionLoss -

\ No newline at end of file +

diff --git a/docs/reference/models/utils/ops.md b/docs/reference/models/utils/ops.md index ecca6ba..290b237 100644 --- a/docs/reference/models/utils/ops.md +++ b/docs/reference/models/utils/ops.md @@ -16,4 +16,4 @@ keywords: Ultralytics, YOLO, HungarianMatcher, inverse_sigmoid, detection models ## inverse_sigmoid --- ### ::: ultralytics.models.utils.ops.inverse_sigmoid -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/classify/predict.md b/docs/reference/models/yolo/classify/predict.md index cc344a5..81a8d60 100644 --- a/docs/reference/models/yolo/classify/predict.md +++ b/docs/reference/models/yolo/classify/predict.md @@ -11,4 +11,4 @@ keywords: Ultralytics, classification predictor, predict, YOLO, AI models, model ## predict --- ### ::: ultralytics.models.yolo.classify.predict.predict -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/classify/train.md b/docs/reference/models/yolo/classify/train.md index 02034f6..567c22f 100644 --- a/docs/reference/models/yolo/classify/train.md +++ b/docs/reference/models/yolo/classify/train.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, Classification Trainer, deep learning, training pro ## train --- ### ::: ultralytics.models.yolo.classify.train.train -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/classify/val.md b/docs/reference/models/yolo/classify/val.md index 18a3889..860b071 100644 --- a/docs/reference/models/yolo/classify/val.md +++ b/docs/reference/models/yolo/classify/val.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, ClassificationValidator, model validation, model fi ## val --- ### ::: ultralytics.models.yolo.classify.val.val -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/detect/predict.md b/docs/reference/models/yolo/detect/predict.md index b7e8bf1..1c5c869 100644 --- a/docs/reference/models/yolo/detect/predict.md +++ b/docs/reference/models/yolo/detect/predict.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, DetectionPredictor, detect, predict, object detecti ## predict --- ### ::: ultralytics.models.yolo.detect.predict.predict -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/detect/train.md b/docs/reference/models/yolo/detect/train.md index 1ef7ca8..efe2d8b 100644 --- a/docs/reference/models/yolo/detect/train.md +++ b/docs/reference/models/yolo/detect/train.md @@ -11,4 +11,4 @@ keywords: Ultralytics YOLO, YOLO, Detection Trainer, Model Training, Machine Lea ## train --- ### ::: ultralytics.models.yolo.detect.train.train -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/detect/val.md b/docs/reference/models/yolo/detect/val.md index 849d8ab..cab8dfb 100644 --- a/docs/reference/models/yolo/detect/val.md +++ b/docs/reference/models/yolo/detect/val.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, Detection Validator, model valuation, precision, re ## val --- ### ::: ultralytics.models.yolo.detect.val.val -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/pose/predict.md b/docs/reference/models/yolo/pose/predict.md index c9952c6..5e6b50e 100644 --- a/docs/reference/models/yolo/pose/predict.md +++ b/docs/reference/models/yolo/pose/predict.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, PosePredictor, machine learning, AI, predictive mod ## predict --- ### ::: ultralytics.models.yolo.pose.predict.predict -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/pose/train.md b/docs/reference/models/yolo/pose/train.md index fc53f7d..b3fb1b7 100644 --- a/docs/reference/models/yolo/pose/train.md +++ b/docs/reference/models/yolo/pose/train.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, PoseTrainer, pose training, AI modeling, custom dat ## train --- ### ::: ultralytics.models.yolo.pose.train.train -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/pose/val.md b/docs/reference/models/yolo/pose/val.md index 6473ad9..1084a97 100644 --- a/docs/reference/models/yolo/pose/val.md +++ b/docs/reference/models/yolo/pose/val.md @@ -11,4 +11,4 @@ keywords: PoseValidator, Ultralytics, YOLO, Object detection, Pose validation ## val --- ### ::: ultralytics.models.yolo.pose.val.val -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/segment/predict.md b/docs/reference/models/yolo/segment/predict.md index 355cc02..5b23755 100644 --- a/docs/reference/models/yolo/segment/predict.md +++ b/docs/reference/models/yolo/segment/predict.md @@ -11,4 +11,4 @@ keywords: YOLO, Ultralytics, object detection, segmentation predictor ## predict --- ### ::: ultralytics.models.yolo.segment.predict.predict -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/segment/train.md b/docs/reference/models/yolo/segment/train.md index 7509214..ec4830b 100644 --- a/docs/reference/models/yolo/segment/train.md +++ b/docs/reference/models/yolo/segment/train.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, SegmentationTrainer, image segmentation, object det ## train --- ### ::: ultralytics.models.yolo.segment.train.train -

\ No newline at end of file +

diff --git a/docs/reference/models/yolo/segment/val.md b/docs/reference/models/yolo/segment/val.md index 627c76f..eef0922 100644 --- a/docs/reference/models/yolo/segment/val.md +++ b/docs/reference/models/yolo/segment/val.md @@ -11,4 +11,4 @@ keywords: Ultralytics, YOLO, SegmentationValidator, model segmentation, image cl ## val --- ### ::: ultralytics.models.yolo.segment.val.val -

\ No newline at end of file +

diff --git a/docs/reference/nn/autobackend.md b/docs/reference/nn/autobackend.md index 4576dbf..6dc601d 100644 --- a/docs/reference/nn/autobackend.md +++ b/docs/reference/nn/autobackend.md @@ -11,4 +11,4 @@ keywords: Ultralytics, AutoBackend, check_class_names, YOLO, YOLO models, optimi ## check_class_names --- ### ::: ultralytics.nn.autobackend.check_class_names -

\ No newline at end of file +

diff --git a/docs/reference/nn/modules/block.md b/docs/reference/nn/modules/block.md index 91551aa..ee62389 100644 --- a/docs/reference/nn/modules/block.md +++ b/docs/reference/nn/modules/block.md @@ -86,4 +86,4 @@ keywords: YOLO, Ultralytics, neural network, nn.modules.block, Proto, HGBlock, S ## BottleneckCSP --- ### ::: ultralytics.nn.modules.block.BottleneckCSP -

\ No newline at end of file +

diff --git a/docs/reference/nn/modules/conv.md b/docs/reference/nn/modules/conv.md index c64faf6..66e3571 100644 --- a/docs/reference/nn/modules/conv.md +++ b/docs/reference/nn/modules/conv.md @@ -71,4 +71,4 @@ keywords: Ultralytics, Convolution Modules, Conv2, DWConv, ConvTranspose, GhostC ## autopad --- ### ::: ultralytics.nn.modules.conv.autopad -

\ No newline at end of file +

diff --git a/docs/reference/nn/modules/head.md b/docs/reference/nn/modules/head.md index 74a3a87..51565ea 100644 --- a/docs/reference/nn/modules/head.md +++ b/docs/reference/nn/modules/head.md @@ -26,4 +26,4 @@ keywords: Ultralytics, YOLO, Detection, Pose, RTDETRDecoder, nn modules, guides ## RTDETRDecoder --- ### ::: ultralytics.nn.modules.head.RTDETRDecoder -

\ No newline at end of file +

diff --git a/docs/reference/nn/modules/transformer.md b/docs/reference/nn/modules/transformer.md index ebdb8ee..8f5f724 100644 --- a/docs/reference/nn/modules/transformer.md +++ b/docs/reference/nn/modules/transformer.md @@ -51,4 +51,4 @@ keywords: Ultralytics, Ultralytics documentation, TransformerEncoderLayer, Trans ## DeformableTransformerDecoder --- ### ::: ultralytics.nn.modules.transformer.DeformableTransformerDecoder -

\ No newline at end of file +

diff --git a/docs/reference/nn/modules/utils.md b/docs/reference/nn/modules/utils.md index fabb9ba..e3212c2 100644 --- a/docs/reference/nn/modules/utils.md +++ b/docs/reference/nn/modules/utils.md @@ -26,4 +26,4 @@ keywords: Ultralytics, neural network, nn.modules.utils, bias_init_with_prob, in ## multi_scale_deformable_attn_pytorch --- ### ::: ultralytics.nn.modules.utils.multi_scale_deformable_attn_pytorch -

\ No newline at end of file +

diff --git a/docs/reference/nn/tasks.md b/docs/reference/nn/tasks.md index d96af09..8285fa4 100644 --- a/docs/reference/nn/tasks.md +++ b/docs/reference/nn/tasks.md @@ -1,8 +1,3 @@ ---- -description: Explore Ultralytics YOLO docs to understand task-specific models like DetectionModel, PoseModel, RTDETRDetectionModel and more. Plus, learn about ensemble, model loading. -keywords: Ultralytics, YOLO docs, DetectionModel, SegmentationModel, ClassificationModel, Ensemble, torch_safe_load, yaml_model_load, guess_model_task ---- - ## BaseModel --- ### ::: ultralytics.nn.tasks.BaseModel @@ -76,4 +71,4 @@ keywords: Ultralytics, YOLO docs, DetectionModel, SegmentationModel, Classificat ## guess_model_task --- ### ::: ultralytics.nn.tasks.guess_model_task -

\ No newline at end of file +

diff --git a/docs/reference/trackers/basetrack.md b/docs/reference/trackers/basetrack.md index f157a78..323c5d7 100644 --- a/docs/reference/trackers/basetrack.md +++ b/docs/reference/trackers/basetrack.md @@ -11,4 +11,4 @@ keywords: Ultralytics, TrackState, BaseTrack, Ultralytics tracker, Ultralytics d ## BaseTrack --- ### ::: ultralytics.trackers.basetrack.BaseTrack -

\ No newline at end of file +

diff --git a/docs/reference/trackers/bot_sort.md b/docs/reference/trackers/bot_sort.md index 0e5410a..dae14e4 100644 --- a/docs/reference/trackers/bot_sort.md +++ b/docs/reference/trackers/bot_sort.md @@ -11,4 +11,4 @@ keywords: Ultralytics, BOTSORT, BOTrack, tracking system, official documentation ## BOTSORT --- ### ::: ultralytics.trackers.bot_sort.BOTSORT -

\ No newline at end of file +

diff --git a/docs/reference/trackers/byte_tracker.md b/docs/reference/trackers/byte_tracker.md index b7bac9d..cd9e1b6 100644 --- a/docs/reference/trackers/byte_tracker.md +++ b/docs/reference/trackers/byte_tracker.md @@ -11,4 +11,4 @@ keywords: STrack, Ultralytics, BYTETracker, documentation, Ultralytics tracker, ## BYTETracker --- ### ::: ultralytics.trackers.byte_tracker.BYTETracker -

\ No newline at end of file +

diff --git a/docs/reference/trackers/track.md b/docs/reference/trackers/track.md index 14cb5c7..9f517f8 100644 --- a/docs/reference/trackers/track.md +++ b/docs/reference/trackers/track.md @@ -16,4 +16,4 @@ keywords: Ultralytics, YOLO, on predict start, register tracker, prediction func ## register_tracker --- ### ::: ultralytics.trackers.track.register_tracker -

\ No newline at end of file +

diff --git a/docs/reference/trackers/utils/gmc.md b/docs/reference/trackers/utils/gmc.md index 63b56f0..9ca814a 100644 --- a/docs/reference/trackers/utils/gmc.md +++ b/docs/reference/trackers/utils/gmc.md @@ -6,4 +6,4 @@ keywords: Ultralytics, GMC utility, Ultralytics documentation, Ultralytics track ## GMC --- ### ::: ultralytics.trackers.utils.gmc.GMC -

\ No newline at end of file +

diff --git a/docs/reference/trackers/utils/kalman_filter.md b/docs/reference/trackers/utils/kalman_filter.md index cad52ba..b3ad228 100644 --- a/docs/reference/trackers/utils/kalman_filter.md +++ b/docs/reference/trackers/utils/kalman_filter.md @@ -11,4 +11,4 @@ keywords: Ultralytics, KalmanFilterXYAH, tracker, documentation, guide ## KalmanFilterXYWH --- ### ::: ultralytics.trackers.utils.kalman_filter.KalmanFilterXYWH -

\ No newline at end of file +

diff --git a/docs/reference/trackers/utils/matching.md b/docs/reference/trackers/utils/matching.md index 60e96d8..4b9a0bb 100644 --- a/docs/reference/trackers/utils/matching.md +++ b/docs/reference/trackers/utils/matching.md @@ -61,4 +61,4 @@ keywords: Ultralytics, Trackers Utils, Matching, merge_matches, linear_assignmen ## bbox_ious --- ### ::: ultralytics.trackers.utils.matching.bbox_ious -

\ No newline at end of file +

diff --git a/docs/reference/utils/__init__.md b/docs/reference/utils/__init__.md index ec01602..3835ef1 100644 --- a/docs/reference/utils/__init__.md +++ b/docs/reference/utils/__init__.md @@ -28,6 +28,11 @@ keywords: Ultralytics, Utils, utilitarian functions, colorstr, yaml_save, set_lo ### ::: ultralytics.utils.TryExcept

+## SettingsManager +--- +### ::: ultralytics.utils.SettingsManager +

+ ## plt_settings --- ### ::: ultralytics.utils.plt_settings @@ -148,14 +153,9 @@ keywords: Ultralytics, Utils, utilitarian functions, colorstr, yaml_save, set_lo ### ::: ultralytics.utils.set_sentry

-## get_settings +## update_dict_recursive --- -### ::: ultralytics.utils.get_settings -

- -## set_settings ---- -### ::: ultralytics.utils.set_settings +### ::: ultralytics.utils.update_dict_recursive

## deprecation_warn @@ -171,4 +171,4 @@ keywords: Ultralytics, Utils, utilitarian functions, colorstr, yaml_save, set_lo ## url2file --- ### ::: ultralytics.utils.url2file -

\ No newline at end of file +

diff --git a/docs/reference/utils/autobatch.md b/docs/reference/utils/autobatch.md index d708303..a6c7be2 100644 --- a/docs/reference/utils/autobatch.md +++ b/docs/reference/utils/autobatch.md @@ -11,4 +11,4 @@ keywords: Ultralytics, check_train_batch_size, autobatch, utility, machine learn ## autobatch --- ### ::: ultralytics.utils.autobatch.autobatch -

\ No newline at end of file +

diff --git a/docs/reference/utils/benchmarks.md b/docs/reference/utils/benchmarks.md index bde28cd..672d373 100644 --- a/docs/reference/utils/benchmarks.md +++ b/docs/reference/utils/benchmarks.md @@ -11,4 +11,4 @@ keywords: Ultralytics, ProfileModels, benchmarks, model profiling, performance o ## benchmark --- ### ::: ultralytics.utils.benchmarks.benchmark -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/base.md b/docs/reference/utils/callbacks/base.md index e832a07..a491155 100644 --- a/docs/reference/utils/callbacks/base.md +++ b/docs/reference/utils/callbacks/base.md @@ -136,4 +136,4 @@ keywords: Ultralytics, Callbacks, On-train, On-validation, On-pretrain, On-predi ## add_integration_callbacks --- ### ::: ultralytics.utils.callbacks.base.add_integration_callbacks -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/clearml.md b/docs/reference/utils/callbacks/clearml.md index 7657c34..42f097c 100644 --- a/docs/reference/utils/callbacks/clearml.md +++ b/docs/reference/utils/callbacks/clearml.md @@ -36,4 +36,4 @@ keywords: Ultralytics, clearML, callbacks, pretrain routine start, validation en ## on_train_end --- ### ::: ultralytics.utils.callbacks.clearml.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/comet.md b/docs/reference/utils/callbacks/comet.md index 96f31b1..fa7af61 100644 --- a/docs/reference/utils/callbacks/comet.md +++ b/docs/reference/utils/callbacks/comet.md @@ -121,4 +121,4 @@ keywords: Ultralytics, Comet Callbacks, Training optimisation, Logging, Experime ## on_train_end --- ### ::: ultralytics.utils.callbacks.comet.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/dvc.md b/docs/reference/utils/callbacks/dvc.md index 638a085..7e32912 100644 --- a/docs/reference/utils/callbacks/dvc.md +++ b/docs/reference/utils/callbacks/dvc.md @@ -51,4 +51,4 @@ keywords: Ultralytics, YOLO, callbacks, logger, training, pretraining, machine l ## on_train_end --- ### ::: ultralytics.utils.callbacks.dvc.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/hub.md b/docs/reference/utils/callbacks/hub.md index eb16dd8..f8dba4d 100644 --- a/docs/reference/utils/callbacks/hub.md +++ b/docs/reference/utils/callbacks/hub.md @@ -41,4 +41,4 @@ keywords: Ultralytics, callbacks, on_pretrain_routine_end, on_model_save, on_tra ## on_export_start --- ### ::: ultralytics.utils.callbacks.hub.on_export_start -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/mlflow.md b/docs/reference/utils/callbacks/mlflow.md index 90d2dc0..a5f95c1 100644 --- a/docs/reference/utils/callbacks/mlflow.md +++ b/docs/reference/utils/callbacks/mlflow.md @@ -16,4 +16,4 @@ keywords: Ultralytics, MLflow, Callbacks, on_pretrain_routine_end, on_train_end, ## on_train_end --- ### ::: ultralytics.utils.callbacks.mlflow.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/neptune.md b/docs/reference/utils/callbacks/neptune.md index 0cdb4b5..92de1f5 100644 --- a/docs/reference/utils/callbacks/neptune.md +++ b/docs/reference/utils/callbacks/neptune.md @@ -41,4 +41,4 @@ keywords: Ultralytics, Neptune callbacks, on_train_epoch_end, on_val_end, _log_p ## on_train_end --- ### ::: ultralytics.utils.callbacks.neptune.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/raytune.md b/docs/reference/utils/callbacks/raytune.md index 61cb7a8..dc7a2a1 100644 --- a/docs/reference/utils/callbacks/raytune.md +++ b/docs/reference/utils/callbacks/raytune.md @@ -6,4 +6,4 @@ keywords: Ultralytics, YOLO, on_fit_epoch_end, callbacks, documentation, deep le ## on_fit_epoch_end --- ### ::: ultralytics.utils.callbacks.raytune.on_fit_epoch_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/tensorboard.md b/docs/reference/utils/callbacks/tensorboard.md index ba45245..d6b494d 100644 --- a/docs/reference/utils/callbacks/tensorboard.md +++ b/docs/reference/utils/callbacks/tensorboard.md @@ -21,4 +21,4 @@ keywords: Ultralytics, YOLO, documentation, callback utilities, log_scalars, on_ ## on_fit_epoch_end --- ### ::: ultralytics.utils.callbacks.tensorboard.on_fit_epoch_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/callbacks/wb.md b/docs/reference/utils/callbacks/wb.md index e9eac55..17a8a92 100644 --- a/docs/reference/utils/callbacks/wb.md +++ b/docs/reference/utils/callbacks/wb.md @@ -26,4 +26,4 @@ keywords: Ultralytics, callbacks, _log_plots, on_fit_epoch_end, on_train_end ## on_train_end --- ### ::: ultralytics.utils.callbacks.wb.on_train_end -

\ No newline at end of file +

diff --git a/docs/reference/utils/checks.md b/docs/reference/utils/checks.md index 65a55c5..3161113 100644 --- a/docs/reference/utils/checks.md +++ b/docs/reference/utils/checks.md @@ -91,4 +91,4 @@ keywords: Ultralytics, utility checks, ASCII, check_version, pip_update, check_p ## print_args --- ### ::: ultralytics.utils.checks.print_args -

\ No newline at end of file +

diff --git a/docs/reference/utils/dist.md b/docs/reference/utils/dist.md index c3020a9..325c52c 100644 --- a/docs/reference/utils/dist.md +++ b/docs/reference/utils/dist.md @@ -21,4 +21,4 @@ keywords: Ultralytics, DDP, DDP utility functions, Distributed Data Processing, ## ddp_cleanup --- ### ::: ultralytics.utils.dist.ddp_cleanup -

\ No newline at end of file +

diff --git a/docs/reference/utils/downloads.md b/docs/reference/utils/downloads.md index dba1f57..9abfd03 100644 --- a/docs/reference/utils/downloads.md +++ b/docs/reference/utils/downloads.md @@ -36,4 +36,4 @@ keywords: Ultralytics, YOLO, download utilities, is_url, check_disk_space, get_g ## download --- ### ::: ultralytics.utils.downloads.download -

\ No newline at end of file +

diff --git a/docs/reference/utils/errors.md b/docs/reference/utils/errors.md index 6842c4e..2c21c5f 100644 --- a/docs/reference/utils/errors.md +++ b/docs/reference/utils/errors.md @@ -6,4 +6,4 @@ keywords: Ultralytics, HUBModelError, Machine Learning, Error troubleshooting, U ## HUBModelError --- ### ::: ultralytics.utils.errors.HUBModelError -

\ No newline at end of file +

diff --git a/docs/reference/utils/files.md b/docs/reference/utils/files.md index 31f63d7..677c14d 100644 --- a/docs/reference/utils/files.md +++ b/docs/reference/utils/files.md @@ -8,6 +8,11 @@ keywords: Ultralytics, utility functions, file operations, working directory, fi ### ::: ultralytics.utils.files.WorkingDirectory

+## spaces_in_path +--- +### ::: ultralytics.utils.files.spaces_in_path +

+ ## increment_path --- ### ::: ultralytics.utils.files.increment_path @@ -36,4 +41,4 @@ keywords: Ultralytics, utility functions, file operations, working directory, fi ## make_dirs --- ### ::: ultralytics.utils.files.make_dirs -

\ No newline at end of file +

diff --git a/docs/reference/utils/instance.md b/docs/reference/utils/instance.md index 64922ec..35d4eda 100644 --- a/docs/reference/utils/instance.md +++ b/docs/reference/utils/instance.md @@ -16,4 +16,4 @@ keywords: Ultralytics, Bboxes, _ntuple, utility, ultralytics utils.instance ## _ntuple --- ### ::: ultralytics.utils.instance._ntuple -

\ No newline at end of file +

diff --git a/docs/reference/utils/loss.md b/docs/reference/utils/loss.md index b55b881..b4aceb9 100644 --- a/docs/reference/utils/loss.md +++ b/docs/reference/utils/loss.md @@ -41,4 +41,4 @@ keywords: Ultralytics, Loss functions, VarifocalLoss, BboxLoss, v8DetectionLoss, ## v8ClassificationLoss --- ### ::: ultralytics.utils.loss.v8ClassificationLoss -

\ No newline at end of file +

diff --git a/docs/reference/utils/metrics.md b/docs/reference/utils/metrics.md index 788a34f..0fe138c 100644 --- a/docs/reference/utils/metrics.md +++ b/docs/reference/utils/metrics.md @@ -91,4 +91,4 @@ keywords: Ultralytics, YOLO, YOLOv3, YOLOv4, metrics, confusion matrix, detectio ## ap_per_class --- ### ::: ultralytics.utils.metrics.ap_per_class -

\ No newline at end of file +

diff --git a/docs/reference/utils/ops.md b/docs/reference/utils/ops.md index fa272d5..3e77626 100644 --- a/docs/reference/utils/ops.md +++ b/docs/reference/utils/ops.md @@ -141,4 +141,4 @@ keywords: Ultralytics YOLO, Utility Operations, segment2box, make_divisible, cli ## clean_str --- ### ::: ultralytics.utils.ops.clean_str -

\ No newline at end of file +

diff --git a/docs/reference/utils/patches.md b/docs/reference/utils/patches.md index fae2cc7..2dfdf9d 100644 --- a/docs/reference/utils/patches.md +++ b/docs/reference/utils/patches.md @@ -21,4 +21,4 @@ keywords: Ultralytics, Utils, Patches, imread, imshow, torch_save, image process ## torch_save --- ### ::: ultralytics.utils.patches.torch_save -

\ No newline at end of file +

diff --git a/docs/reference/utils/plotting.md b/docs/reference/utils/plotting.md index 9497689..f19bbc6 100644 --- a/docs/reference/utils/plotting.md +++ b/docs/reference/utils/plotting.md @@ -41,4 +41,4 @@ keywords: Ultralytics, plotting, utils, color annotation, label plotting, image ## feature_visualization --- ### ::: ultralytics.utils.plotting.feature_visualization -

\ No newline at end of file +

diff --git a/docs/reference/utils/tal.md b/docs/reference/utils/tal.md index 641ea97..b277c3e 100644 --- a/docs/reference/utils/tal.md +++ b/docs/reference/utils/tal.md @@ -31,4 +31,4 @@ keywords: Ultralytics, task aligned assigner, select highest overlaps, make anch ## bbox2dist --- ### ::: ultralytics.utils.tal.bbox2dist -

\ No newline at end of file +

diff --git a/docs/reference/utils/torch_utils.md b/docs/reference/utils/torch_utils.md index cf93c50..1a2188a 100644 --- a/docs/reference/utils/torch_utils.md +++ b/docs/reference/utils/torch_utils.md @@ -136,4 +136,4 @@ keywords: Ultralytics, Torch Utils, Model EMA, Early Stopping, Smart Inference, ## profile --- ### ::: ultralytics.utils.torch_utils.profile -

\ No newline at end of file +

diff --git a/docs/reference/utils/tuner.md b/docs/reference/utils/tuner.md index f5310cc..58c1f72 100644 --- a/docs/reference/utils/tuner.md +++ b/docs/reference/utils/tuner.md @@ -6,4 +6,4 @@ keywords: Ultralytics, run_ray_tune, machine learning tuning, machine learning e ## run_ray_tune --- ### ::: ultralytics.utils.tuner.run_ray_tune -

\ No newline at end of file +

diff --git a/docs/stylesheets/style.css b/docs/stylesheets/style.css index e5259c2..31a529b 100644 --- a/docs/stylesheets/style.css +++ b/docs/stylesheets/style.css @@ -36,4 +36,4 @@ th, td { div.highlight { max-height: 20rem; overflow-y: auto; /* for adding a scrollbar when needed */ -} \ No newline at end of file +} diff --git a/docs/tasks/classify.md b/docs/tasks/classify.md index ae0a424..0c28b78 100644 --- a/docs/tasks/classify.md +++ b/docs/tasks/classify.md @@ -49,15 +49,15 @@ see the [Configuration](../usage/cfg.md) page. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.yaml') # build a new model from YAML model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) model = YOLO('yolov8n-cls.yaml').load('yolov8n-cls.pt') # build from YAML and transfer weights - + # Train the model model.train(data='mnist160', epochs=100, imgsz=64) ``` @@ -87,21 +87,21 @@ it's training `data` and arguments as model attributes. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Validate the model metrics = model.val() # no arguments needed, dataset and settings remembered metrics.top1 # top1 accuracy metrics.top5 # top5 accuracy ``` === "CLI" - + ```bash yolo classify val model=yolov8n-cls.pt # val official model yolo classify val model=path/to/best.pt # val custom model @@ -114,19 +114,19 @@ Use a trained YOLOv8n-cls model to run predictions on images. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Predict with the model results = model('https://ultralytics.com/images/bus.jpg') # predict on an image ``` === "CLI" - + ```bash yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model @@ -141,19 +141,19 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-cls.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom trained - + # Export the model model.export(format='onnx') ``` === "CLI" - + ```bash yolo export model=yolov8n-cls.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model @@ -178,4 +178,4 @@ i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your mo | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | `imgsz` | | [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-cls_ncnn_model/` | ✅ | `imgsz`, `half` | -See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. \ No newline at end of file +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/detect.md b/docs/tasks/detect.md index e649872..df412a5 100644 --- a/docs/tasks/detect.md +++ b/docs/tasks/detect.md @@ -41,20 +41,20 @@ Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a ful !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.yaml') # build a new model from YAML model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights - + # Train the model model.train(data='coco128.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Build a new model from YAML and start training from scratch yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 @@ -77,14 +77,14 @@ Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Validate the model metrics = model.val() # no arguments needed, dataset and settings remembered metrics.box.map # map50-95 @@ -93,7 +93,7 @@ Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need metrics.box.maps # a list contains map50-95 of each category ``` === "CLI" - + ```bash yolo detect val model=yolov8n.pt # val official model yolo detect val model=path/to/best.pt # val custom model @@ -106,19 +106,19 @@ Use a trained YOLOv8n model to run predictions on images. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Predict with the model results = model('https://ultralytics.com/images/bus.jpg') # predict on an image ``` === "CLI" - + ```bash yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model @@ -133,19 +133,19 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom trained - + # Export the model model.export(format='onnx') ``` === "CLI" - + ```bash yolo export model=yolov8n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model @@ -169,4 +169,4 @@ Available YOLOv8 export formats are in the table below. You can predict or valid | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` | | [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` | -See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. \ No newline at end of file +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/index.md b/docs/tasks/index.md index 111635a..43cdb37 100644 --- a/docs/tasks/index.md +++ b/docs/tasks/index.md @@ -48,4 +48,4 @@ video frame with high accuracy and speed. YOLOv8 supports multiple tasks, including detection, segmentation, classification, and keypoints detection. Each of these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose -the appropriate task for your computer vision application. \ No newline at end of file +the appropriate task for your computer vision application. diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md index 062c554..4ea4428 100644 --- a/docs/tasks/pose.md +++ b/docs/tasks/pose.md @@ -52,20 +52,20 @@ Train a YOLOv8-pose model on the COCO128-pose dataset. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.yaml') # build a new model from YAML model = YOLO('yolov8n-pose.pt') # load a pretrained model (recommended for training) model = YOLO('yolov8n-pose.yaml').load('yolov8n-pose.pt') # build from YAML and transfer weights - + # Train the model model.train(data='coco8-pose.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Build a new model from YAML and start training from scratch yolo pose train data=coco8-pose.yaml model=yolov8n-pose.yaml epochs=100 imgsz=640 @@ -90,14 +90,14 @@ training `data` and arguments as model attributes. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Validate the model metrics = model.val() # no arguments needed, dataset and settings remembered metrics.box.map # map50-95 @@ -106,7 +106,7 @@ training `data` and arguments as model attributes. metrics.box.maps # a list contains map50-95 of each category ``` === "CLI" - + ```bash yolo pose val model=yolov8n-pose.pt # val official model yolo pose val model=path/to/best.pt # val custom model @@ -119,19 +119,19 @@ Use a trained YOLOv8n-pose model to run predictions on images. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Predict with the model results = model('https://ultralytics.com/images/bus.jpg') # predict on an image ``` === "CLI" - + ```bash yolo pose predict model=yolov8n-pose.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo pose predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model @@ -146,19 +146,19 @@ Export a YOLOv8n Pose model to a different format like ONNX, CoreML, etc. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-pose.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom trained - + # Export the model model.export(format='onnx') ``` === "CLI" - + ```bash yolo export model=yolov8n-pose.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model @@ -183,4 +183,4 @@ i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your m | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | `imgsz` | | [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-pose_ncnn_model/` | ✅ | `imgsz`, `half` | -See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. \ No newline at end of file +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/tasks/segment.md b/docs/tasks/segment.md index 5fca146..696136a 100644 --- a/docs/tasks/segment.md +++ b/docs/tasks/segment.md @@ -49,20 +49,20 @@ arguments see the [Configuration](../usage/cfg.md) page. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.yaml') # build a new model from YAML model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) model = YOLO('yolov8n-seg.yaml').load('yolov8n.pt') # build from YAML and transfer weights - + # Train the model model.train(data='coco128-seg.yaml', epochs=100, imgsz=640) ``` === "CLI" - + ```bash # Build a new model from YAML and start training from scratch yolo segment train data=coco128-seg.yaml model=yolov8n-seg.yaml epochs=100 imgsz=640 @@ -86,14 +86,14 @@ retains it's training `data` and arguments as model attributes. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Validate the model metrics = model.val() # no arguments needed, dataset and settings remembered metrics.box.map # map50-95(B) @@ -106,7 +106,7 @@ retains it's training `data` and arguments as model attributes. metrics.seg.maps # a list contains map50-95(M) of each category ``` === "CLI" - + ```bash yolo segment val model=yolov8n-seg.pt # val official model yolo segment val model=path/to/best.pt # val custom model @@ -119,19 +119,19 @@ Use a trained YOLOv8n-seg model to run predictions on images. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom model - + # Predict with the model results = model('https://ultralytics.com/images/bus.jpg') # predict on an image ``` === "CLI" - + ```bash yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model @@ -146,19 +146,19 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. !!! example "" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n-seg.pt') # load an official model model = YOLO('path/to/best.pt') # load a custom trained - + # Export the model model.export(format='onnx') ``` === "CLI" - + ```bash yolo export model=yolov8n-seg.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model @@ -183,4 +183,4 @@ i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your mo | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | `imgsz` | | [ncnn](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n-seg_ncnn_model/` | ✅ | `imgsz`, `half` | -See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. \ No newline at end of file +See full `export` details in the [Export](https://docs.ultralytics.com/modes/export/) page. diff --git a/docs/usage/callbacks.md b/docs/usage/callbacks.md index c379e23..e2e6885 100644 --- a/docs/usage/callbacks.md +++ b/docs/usage/callbacks.md @@ -20,10 +20,10 @@ In this example, we want to return the original frame with each result object. H def on_predict_batch_end(predictor): # Retrieve the batch data _, im0s, _, _ = predictor.batch - + # Ensure that im0s is a list im0s = im0s if isinstance(im0s, list) else [im0s] - + # Combine the prediction results with the corresponding frames predictor.results = zip(predictor.results, im0s) @@ -85,4 +85,4 @@ Here are all supported callbacks. See callbacks [source code](https://github.com | Callback | Description | |-------------------|------------------------------------------| | `on_export_start` | Triggered when the export process starts | -| `on_export_end` | Triggered when the export process ends | \ No newline at end of file +| `on_export_end` | Triggered when the export process ends | diff --git a/docs/usage/cfg.md b/docs/usage/cfg.md index 8c2957c..465d26c 100644 --- a/docs/usage/cfg.md +++ b/docs/usage/cfg.md @@ -13,19 +13,19 @@ YOLOv8 'yolo' CLI commands use the following syntax: !!! example "" === "CLI" - + ```bash yolo TASK MODE ARGS ``` === "Python" - + ```python from ultralytics import YOLO - + # Load a YOLOv8 model from a pre-trained weights file model = YOLO('yolov8n.pt') - + # Run MODE mode using the custom arguments ARGS (guess TASK) model.MODE(ARGS) ``` @@ -45,9 +45,9 @@ Where: YOLO models can be used for a variety of tasks, including detection, segmentation, classification and pose. These tasks differ in the type of output they produce and the specific problem they are designed to solve. -**Detect**: For identifying and localizing objects or regions of interest in an image or video. -**Segment**: For dividing an image or video into regions or pixels that correspond to different objects or classes. -**Classify**: For predicting the class label of an input image. +**Detect**: For identifying and localizing objects or regions of interest in an image or video. +**Segment**: For dividing an image or video into regions or pixels that correspond to different objects or classes. +**Classify**: For predicting the class label of an input image. **Pose**: For identifying objects and estimating their keypoints in an image or video. | Key | Value | Description | @@ -61,11 +61,11 @@ differ in the type of output they produce and the specific problem they are desi YOLO models can be used in different modes depending on the specific problem you are trying to solve. These modes include: -**Train**: For training a YOLOv8 model on a custom dataset. -**Val**: For validating a YOLOv8 model after it has been trained. -**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. -**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. -**Track**: For tracking objects in real-time using a YOLOv8 model. +**Train**: For training a YOLOv8 model on a custom dataset. +**Val**: For validating a YOLOv8 model after it has been trained. +**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. +**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. +**Track**: For tracking objects in real-time using a YOLOv8 model. **Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. | Key | Value | Description | @@ -251,4 +251,4 @@ it easier to debug and optimize the training process. | `name` | `'exp'` | experiment name. `exp` gets automatically incremented if not specified, i.e, `exp`, `exp2` ... | | `exist_ok` | `False` | whether to overwrite existing experiment | | `plots` | `False` | save plots during train/val | -| `save` | `False` | save train checkpoints and predict results | \ No newline at end of file +| `save` | `False` | save train checkpoints and predict results | diff --git a/docs/usage/cli.md b/docs/usage/cli.md index 74fde4c..1974c61 100644 --- a/docs/usage/cli.md +++ b/docs/usage/cli.md @@ -74,7 +74,7 @@ Where: !!! warning "Warning" - Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` beteen arguments. + Arguments must be passed as `arg=val` pairs, split by an equals `=` sign and delimited by spaces ` ` between pairs. Do not use `--` argument prefixes or commas `,` between arguments. - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ @@ -88,7 +88,7 @@ the [Configuration](cfg.md) page. !!! example "Example" === "Train" - + Start training YOLOv8n on COCO128 for 100 epochs at image-size 640. ```bash yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 diff --git a/docs/usage/engine.md b/docs/usage/engine.md index 7aabaad..9acf801 100644 --- a/docs/usage/engine.md +++ b/docs/usage/engine.md @@ -84,4 +84,4 @@ To know more about Callback triggering events and entry point, checkout our [Cal ## Other engine components There are other components that can be customized similarly like `Validators` and `Predictors` -See Reference section for more information on these. \ No newline at end of file +See Reference section for more information on these. diff --git a/docs/usage/hyperparameter_tuning.md b/docs/usage/hyperparameter_tuning.md index df5e7ae..4d34753 100644 --- a/docs/usage/hyperparameter_tuning.md +++ b/docs/usage/hyperparameter_tuning.md @@ -29,7 +29,7 @@ To install the required packages, run: !!! tip "Installation" ```bash - # Install and update Ultralytics and Ray Tune pacakges + # Install and update Ultralytics and Ray Tune packages pip install -U ultralytics 'ray[tune]' # Optionally install W&B for logging @@ -99,7 +99,7 @@ In this example, we demonstrate how to use a custom search space for hyperparame ```python from ultralytics import YOLO - # Define a YOLO model + # Define a YOLO model model = YOLO("yolov8n.pt") # Run Ray Tune on the model @@ -166,4 +166,4 @@ plt.show() In this documentation, we covered common workflows to analyze the results of experiments run with Ray Tune using Ultralytics. The key steps include loading the experiment results from a directory, performing basic experiment-level and trial-level analysis and plotting metrics. -Explore further by looking into Ray Tune’s [Analyze Results](https://docs.ray.io/en/latest/tune/examples/tune_analyze_results.html) docs page to get the most out of your hyperparameter tuning experiments. \ No newline at end of file +Explore further by looking into Ray Tune’s [Analyze Results](https://docs.ray.io/en/latest/tune/examples/tune_analyze_results.html) docs page to get the most out of your hyperparameter tuning experiments. diff --git a/docs/usage/python.md b/docs/usage/python.md index b2dfc53..610d927 100644 --- a/docs/usage/python.md +++ b/docs/usage/python.md @@ -19,22 +19,22 @@ format with just a few lines of code. ```python from ultralytics import YOLO - + # Create a new YOLO model from scratch model = YOLO('yolov8n.yaml') - + # Load a pretrained YOLO model (recommended for training) model = YOLO('yolov8n.pt') - + # Train the model using the 'coco128.yaml' dataset for 3 epochs results = model.train(data='coco128.yaml', epochs=3) - + # Evaluate the model's performance on the validation set results = model.val() - + # Perform object detection on an image using the model results = model('https://ultralytics.com/images/bus.jpg') - + # Export the model to ONNX format success = model.export(format='onnx') ``` @@ -135,7 +135,7 @@ predicts the classes and locations of objects in the input images or videos. === "Results usage" ```python # results would be a list of Results object including all the predictions by default - # but be careful as it could occupy a lot memory when there're many images, + # but be careful as it could occupy a lot memory when there're many images, # especially the task is segmentation. # 1. return as a list results = model.predict(source="folder") @@ -161,7 +161,7 @@ predicts the classes and locations of objects in the input images or videos. # Classification result.probs # cls prob, (num_class, ) - # Each result is composed of torch.Tensor by default, + # Each result is composed of torch.Tensor by default, # in which you can easily use following functionality: result = result.cuda() result = result.cpu() @@ -210,18 +210,18 @@ for applications such as surveillance systems or self-driving cars. !!! example "Track" === "Python" - + ```python from ultralytics import YOLO - + # Load a model model = YOLO('yolov8n.pt') # load an official detection model model = YOLO('yolov8n-seg.pt') # load an official segmentation model model = YOLO('path/to/best.pt') # load a custom model - + # Track with the model - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) - results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") ``` [Track Examples](../modes/track.md){ .md-button .md-button--primary} @@ -237,11 +237,11 @@ their specific use case based on their requirements for speed and accuracy. !!! example "Benchmark" === "Python" - + Benchmark an official YOLOv8n model across all export formats. ```python from ultralytics.utils.benchmarks import benchmark - + # Benchmark benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0) ``` diff --git a/docs/yolov5/environments/aws_quickstart_tutorial.md b/docs/yolov5/environments/aws_quickstart_tutorial.md index 42679bc..f0399e8 100644 --- a/docs/yolov5/environments/aws_quickstart_tutorial.md +++ b/docs/yolov5/environments/aws_quickstart_tutorial.md @@ -85,4 +85,4 @@ sudo swapon /swapfile free -h # check memory ``` -Now you have successfully set up and run YOLOv5 on an AWS Deep Learning instance. Enjoy training, testing, and deploying your object detection models! \ No newline at end of file +Now you have successfully set up and run YOLOv5 on an AWS Deep Learning instance. Enjoy training, testing, and deploying your object detection models! diff --git a/docs/yolov5/environments/docker_image_quickstart_tutorial.md b/docs/yolov5/environments/docker_image_quickstart_tutorial.md index 27c2e9f..44dcb17 100644 --- a/docs/yolov5/environments/docker_image_quickstart_tutorial.md +++ b/docs/yolov5/environments/docker_image_quickstart_tutorial.md @@ -61,4 +61,4 @@ python detect.py --weights yolov5s.pt --source path/to/images # run inference o python export.py --weights yolov5s.pt --include onnx coreml tflite # export models to other formats ``` -

\ No newline at end of file +

diff --git a/docs/yolov5/environments/google_cloud_quickstart_tutorial.md b/docs/yolov5/environments/google_cloud_quickstart_tutorial.md index 2fe407d..0b0e9ea 100644 --- a/docs/yolov5/environments/google_cloud_quickstart_tutorial.md +++ b/docs/yolov5/environments/google_cloud_quickstart_tutorial.md @@ -46,4 +46,4 @@ python detect.py --weights yolov5s.pt --source path/to/images # run inference o python export.py --weights yolov5s.pt --include onnx coreml tflite # export models to other formats ``` -GCP terminal \ No newline at end of file +GCP terminal diff --git a/docs/yolov5/index.md b/docs/yolov5/index.md index b1a40b1..1620422 100644 --- a/docs/yolov5/index.md +++ b/docs/yolov5/index.md @@ -87,4 +87,4 @@ This badge signifies that all [YOLOv5 GitHub Actions](https://github.com/ultraly -
\ No newline at end of file +
diff --git a/docs/yolov5/quickstart_tutorial.md b/docs/yolov5/quickstart_tutorial.md index 7a6d801..7072f53 100644 --- a/docs/yolov5/quickstart_tutorial.md +++ b/docs/yolov5/quickstart_tutorial.md @@ -77,4 +77,4 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - yolov5x 16 ``` - \ No newline at end of file + diff --git a/docs/yolov5/tutorials/architecture_description.md b/docs/yolov5/tutorials/architecture_description.md index 7ba0cb8..418fe8d 100644 --- a/docs/yolov5/tutorials/architecture_description.md +++ b/docs/yolov5/tutorials/architecture_description.md @@ -160,9 +160,9 @@ The objectness losses of the three prediction layers (`P3`, `P4`, `P5`) are weig The YOLOv5 architecture makes some important changes to the box prediction strategy compared to earlier versions of YOLO. In YOLOv2 and YOLOv3, the box coordinates were directly predicted using the activation of the last layer. -![b_x](https://latex.codecogs.com/svg.image?b_x=\sigma(t_x)+c_x) -![b_y](https://latex.codecogs.com/svg.image?b_y=\sigma(t_y)+c_y) -![b_w](https://latex.codecogs.com/svg.image?b_w=p_w\cdot&space;e^{t_w}) +![b_x](https://latex.codecogs.com/svg.image?b_x=\sigma(t_x)+c_x) +![b_y](https://latex.codecogs.com/svg.image?b_y=\sigma(t_y)+c_y) +![b_w](https://latex.codecogs.com/svg.image?b_w=p_w\cdot&space;e^{t_w}) ![b_h](https://latex.codecogs.com/svg.image?b_h=p_h\cdot&space;e^{t_h}) @@ -171,9 +171,9 @@ However, in YOLOv5, the formula for predicting the box coordinates has been upda The revised formulas for calculating the predicted bounding box are as follows: -![bx](https://latex.codecogs.com/svg.image?b_x=(2\cdot\sigma(t_x)-0.5)+c_x) -![by](https://latex.codecogs.com/svg.image?b_y=(2\cdot\sigma(t_y)-0.5)+c_y) -![bw](https://latex.codecogs.com/svg.image?b_w=p_w\cdot(2\cdot\sigma(t_w))^2) +![bx](https://latex.codecogs.com/svg.image?b_x=(2\cdot\sigma(t_x)-0.5)+c_x) +![by](https://latex.codecogs.com/svg.image?b_y=(2\cdot\sigma(t_y)-0.5)+c_y) +![bw](https://latex.codecogs.com/svg.image?b_w=p_w\cdot(2\cdot\sigma(t_w))^2) ![bh](https://latex.codecogs.com/svg.image?b_h=p_h\cdot(2\cdot\sigma(t_h))^2) Compare the center point offset before and after scaling. The center point offset range is adjusted from (0, 1) to (-0.5, 1.5). @@ -221,4 +221,4 @@ This way, the build targets process ensures that each ground truth object is pro In conclusion, YOLOv5 represents a significant step forward in the development of real-time object detection models. By incorporating various new features, enhancements, and training strategies, it surpasses previous versions of the YOLO family in performance and efficiency. -The primary enhancements in YOLOv5 include the use of a dynamic architecture, an extensive range of data augmentation techniques, innovative training strategies, as well as important adjustments in computing losses and the process of building targets. All these innovations significantly improve the accuracy and efficiency of object detection while retaining a high degree of speed, which is the trademark of YOLO models. \ No newline at end of file +The primary enhancements in YOLOv5 include the use of a dynamic architecture, an extensive range of data augmentation techniques, innovative training strategies, as well as important adjustments in computing losses and the process of building targets. All these innovations significantly improve the accuracy and efficiency of object detection while retaining a high degree of speed, which is the trademark of YOLO models. diff --git a/docs/yolov5/tutorials/clearml_logging_integration.md b/docs/yolov5/tutorials/clearml_logging_integration.md index 6c1c5da..8e42507 100644 --- a/docs/yolov5/tutorials/clearml_logging_integration.md +++ b/docs/yolov5/tutorials/clearml_logging_integration.md @@ -240,4 +240,4 @@ ClearML comes with autoscalers too! This tool will automatically spin up new rem Check out the autoscalers getting started video below. -[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) \ No newline at end of file +[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/docs/yolov5/tutorials/comet_logging_integration.md b/docs/yolov5/tutorials/comet_logging_integration.md index 6ca7b17..f2c6ba8 100644 --- a/docs/yolov5/tutorials/comet_logging_integration.md +++ b/docs/yolov5/tutorials/comet_logging_integration.md @@ -261,4 +261,4 @@ comet optimizer -j utils/loggers/comet/hpo.py \ Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) -hyperparameter-yolo \ No newline at end of file +hyperparameter-yolo diff --git a/docs/yolov5/tutorials/hyperparameter_evolution.md b/docs/yolov5/tutorials/hyperparameter_evolution.md index e8a078f..25b2043 100644 --- a/docs/yolov5/tutorials/hyperparameter_evolution.md +++ b/docs/yolov5/tutorials/hyperparameter_evolution.md @@ -64,10 +64,10 @@ copy_paste: 0.0 # segment copy-paste (probability) Fitness is the value we seek to maximize. In YOLOv5 we define a default fitness function as a weighted combination of metrics: `mAP@0.5` contributes 10% of the weight and `mAP@0.5:0.95` contributes the remaining 90%, with [Precision `P` and Recall `R`](https://en.wikipedia.org/wiki/Precision_and_recall) absent. You may adjust these as you see fit or use the default fitness definition in utils/metrics.py (recommended). ```python -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) ``` ## 3. Evolve @@ -163,4 +163,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/model_ensembling.md b/docs/yolov5/tutorials/model_ensembling.md index 3e13435..a7e2645 100644 --- a/docs/yolov5/tutorials/model_ensembling.md +++ b/docs/yolov5/tutorials/model_ensembling.md @@ -4,7 +4,7 @@ description: Learn how to ensemble YOLOv5 models for improved mAP and Recall! Cl keywords: YOLOv5, object detection, ensemble learning, mAP, Recall --- -📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. +📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. UPDATED 25 September 2022. From [https://en.wikipedia.org/wiki/Ensemble_learning](https://en.wikipedia.org/wiki/Ensemble_learning): @@ -34,7 +34,7 @@ Output: val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) -Fusing layers... +Fusing layers... Model Summary: 476 layers, 87730285 parameters, 0 gradients val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] @@ -76,9 +76,9 @@ Output: val: data=./data/coco.yaml, weights=['yolov5x.pt', 'yolov5l6.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) -Fusing layers... +Fusing layers... Model Summary: 476 layers, 87730285 parameters, 0 gradients # Model 1 -Fusing layers... +Fusing layers... Model Summary: 501 layers, 77218620 parameters, 0 gradients # Model 2 Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] # Ensemble notice @@ -117,9 +117,9 @@ Output: detect: weights=['yolov5x.pt', 'yolov5l6.pt'], source=data/images, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_width=3, hide_labels=False, hide_conf=False, half=False YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) -Fusing layers... +Fusing layers... Model Summary: 476 layers, 87730285 parameters, 0 gradients -Fusing layers... +Fusing layers... Model Summary: 501 layers, 77218620 parameters, 0 gradients Ensemble created with ['yolov5x.pt', 'yolov5l6.pt'] @@ -144,4 +144,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/model_export.md b/docs/yolov5/tutorials/model_export.md index 77869c9..e8bf262 100644 --- a/docs/yolov5/tutorials/model_export.md +++ b/docs/yolov5/tutorials/model_export.md @@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv5, model export, PyTorch, TorchScript, ONNX, OpenVIN # TFLite, ONNX, CoreML, TensorRT Export -📚 This guide explains how to export a trained YOLOv5 🚀 model from PyTorch to ONNX and TorchScript formats. +📚 This guide explains how to export a trained YOLOv5 🚀 model from PyTorch to ONNX and TorchScript formats. UPDATED 8 December 2022. ## Before You Start @@ -116,7 +116,7 @@ YOLOv5 🚀 v6.2-104-ge3e5122 Python-3.7.13 torch-1.12.1+cu113 CPU Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt... 100% 14.1M/14.1M [00:00<00:00, 274MB/s] -Fusing layers... +Fusing layers... YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients PyTorch: starting from yolov5s.pt with output shape (1, 25200, 85) (14.1 MB) @@ -129,8 +129,8 @@ ONNX: export success ✅ 2.3s, saved as yolov5s.onnx (28.0 MB) Export complete (5.5s) Results saved to /content/yolov5 -Detect: python detect.py --weights yolov5s.onnx -Validate: python val.py --weights yolov5s.onnx +Detect: python detect.py --weights yolov5s.onnx +Validate: python val.py --weights yolov5s.onnx PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.onnx') Visualize: https://netron.app/ ``` @@ -243,4 +243,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/model_pruning_and_sparsity.md b/docs/yolov5/tutorials/model_pruning_and_sparsity.md index e3fb1c3..9b8882d 100644 --- a/docs/yolov5/tutorials/model_pruning_and_sparsity.md +++ b/docs/yolov5/tutorials/model_pruning_and_sparsity.md @@ -4,7 +4,7 @@ description: Improve YOLOv5 model efficiency by pruning with Ultralytics. Unders keywords: YOLOv5, YOLO, Ultralytics, model pruning, PyTorch, machine learning, deep learning, computer vision, object detection --- -📚 This guide explains how to apply **pruning** to YOLOv5 🚀 models. +📚 This guide explains how to apply **pruning** to YOLOv5 🚀 models. UPDATED 25 September 2022. ## Before You Start @@ -31,7 +31,7 @@ Output: val: data=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False YOLOv5 🚀 v6.0-224-g4c40933 torch 1.10.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB) -Fusing layers... +Fusing layers... Model Summary: 444 layers, 86705005 parameters, 0 gradients val: Scanning '/content/datasets/coco/val2017.cache' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/multi_gpu_training.md b/docs/yolov5/tutorials/multi_gpu_training.md index 91d010e..8802193 100644 --- a/docs/yolov5/tutorials/multi_gpu_training.md +++ b/docs/yolov5/tutorials/multi_gpu_training.md @@ -4,7 +4,7 @@ description: Learn how to train datasets on single or multiple GPUs using YOLOv5 keywords: YOLOv5, multi-GPU Training, YOLOv5 training, deep learning, machine learning, object detection, Ultralytics --- -📚 This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 🚀 on single or multiple machine(s). +📚 This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 🚀 on single or multiple machine(s). UPDATED 25 December 2022. ## Before You Start @@ -136,9 +136,9 @@ cd .. && rm -rf app && git clone https://github.com/ultralytics/yolov5 -b master cp data/coco.yaml data/coco_profile.yaml # profile -python train.py --batch-size 16 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0 -python -m torch.distributed.run --nproc_per_node 2 train.py --batch-size 32 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1 -python -m torch.distributed.run --nproc_per_node 4 train.py --batch-size 64 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1,2,3 +python train.py --batch-size 16 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0 +python -m torch.distributed.run --nproc_per_node 2 train.py --batch-size 32 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1 +python -m torch.distributed.run --nproc_per_node 4 train.py --batch-size 64 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1,2,3 python -m torch.distributed.run --nproc_per_node 8 train.py --batch-size 128 --data coco_profile.yaml --weights yolov5l.pt --epochs 1 --device 0,1,2,3,4,5,6,7 ``` @@ -188,4 +188,4 @@ If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralyti ## Credits -I would like to thank @MagicFrogSJTU, who did all the heavy lifting, and @glenn-jocher for guiding us along the way. \ No newline at end of file +I would like to thank @MagicFrogSJTU, who did all the heavy lifting, and @glenn-jocher for guiding us along the way. diff --git a/docs/yolov5/tutorials/neural_magic_pruning_quantization.md b/docs/yolov5/tutorials/neural_magic_pruning_quantization.md index efddec2..d06fc74 100644 --- a/docs/yolov5/tutorials/neural_magic_pruning_quantization.md +++ b/docs/yolov5/tutorials/neural_magic_pruning_quantization.md @@ -145,7 +145,7 @@ An example request, using Python's `requests` package: import requests, json # list of images for inference (local files on client side) -path = ['basilica.jpg'] +path = ['basilica.jpg'] files = [('request', open(img, 'rb')) for img in path] # send request over HTTP to /predict/from_files endpoint @@ -268,4 +268,4 @@ deepsparse.benchmark zoo:cv/detection/yolov5-s/pytorch/ultralytics/coco/pruned35 ## Get Started With DeepSparse -**Research or Testing?** DeepSparse Community is free for research and testing. Get started with our [Documentation](https://docs.neuralmagic.com/). \ No newline at end of file +**Research or Testing?** DeepSparse Community is free for research and testing. Get started with our [Documentation](https://docs.neuralmagic.com/). diff --git a/docs/yolov5/tutorials/pytorch_hub_model_loading.md b/docs/yolov5/tutorials/pytorch_hub_model_loading.md index 4463848..377bad8 100644 --- a/docs/yolov5/tutorials/pytorch_hub_model_loading.md +++ b/docs/yolov5/tutorials/pytorch_hub_model_loading.md @@ -4,7 +4,7 @@ description: Detailed guide on loading YOLOv5 from PyTorch Hub. Includes example keywords: Ultralytics, YOLOv5, PyTorch, loading YOLOv5, PyTorch Hub, inference, multi-GPU inference, training --- -📚 This guide explains how to load YOLOv5 🚀 from PyTorch Hub at [https://pytorch.org/hub/ultralytics_yolov5](https://pytorch.org/hub/ultralytics_yolov5). +📚 This guide explains how to load YOLOv5 🚀 from PyTorch Hub at [https://pytorch.org/hub/ultralytics_yolov5](https://pytorch.org/hub/ultralytics_yolov5). UPDATED 26 March 2023. ## Before You Start @@ -65,7 +65,7 @@ im2 = cv2.imread('bus.jpg')[..., ::-1] # OpenCV image (BGR to RGB) results = model([im1, im2], size=640) # batch of images # Results -results.print() +results.print() results.save() # or .show() results.xyxy[0] # im1 predictions (tensor) @@ -301,7 +301,7 @@ model = torch.hub.load('path/to/yolov5', 'custom', path='path/to/best.pt', sourc PyTorch Hub supports inference on most YOLOv5 export formats, including custom trained models. See [TFLite, ONNX, CoreML, TensorRT Export tutorial](https://docs.ultralytics.com/yolov5/tutorials/model_export) for details on exporting models. -💡 ProTip: **TensorRT** may be up to 2-5X faster than PyTorch on [**GPU benchmarks**](https://github.com/ultralytics/yolov5/pull/6963) +💡 ProTip: **TensorRT** may be up to 2-5X faster than PyTorch on [**GPU benchmarks**](https://github.com/ultralytics/yolov5/pull/6963) 💡 ProTip: **ONNX** and **OpenVINO** may be up to 2-3X faster than PyTorch on [**CPU benchmarks**](https://github.com/ultralytics/yolov5/pull/6613) ```python @@ -328,4 +328,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/roboflow_datasets_integration.md b/docs/yolov5/tutorials/roboflow_datasets_integration.md index 6114543..f495337 100644 --- a/docs/yolov5/tutorials/roboflow_datasets_integration.md +++ b/docs/yolov5/tutorials/roboflow_datasets_integration.md @@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv5, Roboflow, data organization, data labelling, data # Roboflow Datasets -You can now use Roboflow to organize, label, prepare, version, and host your datasets for training YOLOv5 🚀 models. Roboflow is free to use with YOLOv5 if you make your workspace public. +You can now use Roboflow to organize, label, prepare, version, and host your datasets for training YOLOv5 🚀 models. Roboflow is free to use with YOLOv5 if you make your workspace public. UPDATED 7 June 2023. !!! warning @@ -50,4 +50,4 @@ We have released a custom training tutorial demonstrating all of the above capab The real world is messy and your model will invariably encounter situations your dataset didn't anticipate. Using [active learning](https://blog.roboflow.com/what-is-active-learning/) is an important strategy to iteratively improve your dataset and model. With the Roboflow and YOLOv5 integration, you can quickly make improvements on your model deployments by using a battle tested machine learning pipeline. -

\ No newline at end of file +

diff --git a/docs/yolov5/tutorials/running_on_jetson_nano.md b/docs/yolov5/tutorials/running_on_jetson_nano.md index bc5570f..f5eeeed 100644 --- a/docs/yolov5/tutorials/running_on_jetson_nano.md +++ b/docs/yolov5/tutorials/running_on_jetson_nano.md @@ -6,7 +6,7 @@ keywords: TensorRT, NVIDIA Jetson, DeepStream SDK, deployment, Ultralytics, YOLO # Deploy on NVIDIA Jetson using TensorRT and DeepStream SDK -📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. +📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. UPDATED 18 November 2022. ## Hardware Verification @@ -117,7 +117,7 @@ pip3 install torch-1.10.0-cp36-cp36m-linux_aarch64.whl sudo apt install -y libjpeg-dev zlib1g-dev git clone --branch v0.11.1 https://github.com/pytorch/vision torchvision cd torchvision -sudo python3 setup.py install +sudo python3 setup.py install ``` Here a list of the corresponding torchvision version that you need to install according to the PyTorch version: @@ -310,11 +310,11 @@ The following table summarizes how different models perform on **Jetson Xavier N | Model Name | Precision | Inference Size | Inference Time (ms) | FPS | |------------|-----------|----------------|---------------------|-----| -| YOLOv5s | FP32 | 320x320 | 16.66 | 60 | -| | FP32 | 640x640 | 33.33 | 30 | -| | INT8 | 640x640 | 16.66 | 60 | -| YOLOv5n | FP32 | 640x640 | 16.66 | 60 | +| YOLOv5s | FP32 | 320x320 | 16.66 | 60 | +| | FP32 | 640x640 | 33.33 | 30 | +| | INT8 | 640x640 | 16.66 | 60 | +| YOLOv5n | FP32 | 640x640 | 16.66 | 60 | ### Additional -This tutorial is written by our friends at seeed @lakshanthad and Elaine \ No newline at end of file +This tutorial is written by our friends at seeed @lakshanthad and Elaine diff --git a/docs/yolov5/tutorials/test_time_augmentation.md b/docs/yolov5/tutorials/test_time_augmentation.md index 30aadcb..e2c19ca 100644 --- a/docs/yolov5/tutorials/test_time_augmentation.md +++ b/docs/yolov5/tutorials/test_time_augmentation.md @@ -6,7 +6,7 @@ keywords: YOLOv5, Ultralytics, Test-Time Augmentation, TTA, mAP, Recall, model p # Test-Time Augmentation (TTA) -📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. +📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. UPDATED 25 September 2022. ## Before You Start @@ -33,7 +33,7 @@ Output: val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) -Fusing layers... +Fusing layers... Model Summary: 476 layers, 87730285 parameters, 0 gradients val: Scanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2846.03it/s] @@ -72,7 +72,7 @@ Output: val: data=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=832, conf_thres=0.001, iou_thres=0.6, task=val, device=, single_cls=False, augment=True, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16280.875MB) -Fusing layers... +Fusing layers... /usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /pytorch/c10/core/TensorImpl.h:1156.) return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode) Model Summary: 476 layers, 87730285 parameters, 0 gradients @@ -115,7 +115,7 @@ YOLOv5 🚀 v5.0-267-g6a3ee7c torch 1.9.0+cu102 CUDA:0 (Tesla P100-PCIE-16GB, 16 Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt... 100% 14.1M/14.1M [00:00<00:00, 81.9MB/s] -Fusing layers... +Fusing layers... Model Summary: 224 layers, 7266973 parameters, 0 gradients image 1/2 /content/yolov5/data/images/bus.jpg: 832x640 4 persons, 1 bus, 1 fire hydrant, Done. (0.029s) image 2/2 /content/yolov5/data/images/zidane.jpg: 480x832 3 persons, 3 ties, Done. (0.024s) @@ -162,4 +162,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/tips_for_best_training_results.md b/docs/yolov5/tutorials/tips_for_best_training_results.md index cc2dc2d..b2488f7 100644 --- a/docs/yolov5/tutorials/tips_for_best_training_results.md +++ b/docs/yolov5/tutorials/tips_for_best_training_results.md @@ -4,7 +4,7 @@ description: Our comprehensive guide provides insights on how to train your YOLO keywords: Ultralytics, YOLOv5, Training guide, dataset preparation, model selection, training settings, mAP results, Machine Learning, Object Detection --- -📚 This guide explains how to produce the best mAP and training results with YOLOv5 🚀. +📚 This guide explains how to produce the best mAP and training results with YOLOv5 🚀. UPDATED 25 May 2022. Most of the time good results can be obtained with no changes to the models or training settings, **provided your dataset is sufficiently large and well labelled**. If at first you don't get good results, there are steps you might be able to take to improve, but we always recommend users **first train with all default settings** before considering any changes. This helps establish a performance baseline and spot areas for improvement. @@ -63,4 +63,4 @@ Before modifying anything, **first train with default settings to establish a pe If you'd like to know more, a good place to start is Karpathy's 'Recipe for Training Neural Networks', which has great ideas for training that apply broadly across all ML domains: [http://karpathy.github.io/2019/04/25/recipe/](http://karpathy.github.io/2019/04/25/recipe/) -Good luck 🍀 and let us know if you have any other questions! \ No newline at end of file +Good luck 🍀 and let us know if you have any other questions! diff --git a/docs/yolov5/tutorials/train_custom_data.md b/docs/yolov5/tutorials/train_custom_data.md index ee8ca2d..2f8a6ae 100644 --- a/docs/yolov5/tutorials/train_custom_data.md +++ b/docs/yolov5/tutorials/train_custom_data.md @@ -4,7 +4,7 @@ description: Learn how to train your data on custom datasets using YOLOv5. Simpl keywords: YOLOv5, train on custom dataset, image collection, model training, object detection, image labelling, Ultralytics, PyTorch, machine learning --- -📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. +📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. UPDATED 7 June 2023. ## Before You Start @@ -152,11 +152,11 @@ python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt !!! tip "Tip" - 💡 Add `--cache ram` or `--cache disk` to speed up training (requires significant RAM/disk resources). + 💡 Add `--cache ram` or `--cache disk` to speed up training (requires significant RAM/disk resources). !!! tip "Tip" - 💡 Always train from a local dataset. Mounted or network drives like Google Drive will be very slow. + 💡 Always train from a local dataset. Mounted or network drives like Google Drive will be very slow. All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc. For more details see the Training section of our tutorial notebook. Open In Colab Open In Kaggle @@ -234,4 +234,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/docs/yolov5/tutorials/transfer_learning_with_frozen_layers.md b/docs/yolov5/tutorials/transfer_learning_with_frozen_layers.md index 12b73dd..7a3f752 100644 --- a/docs/yolov5/tutorials/transfer_learning_with_frozen_layers.md +++ b/docs/yolov5/tutorials/transfer_learning_with_frozen_layers.md @@ -4,7 +4,7 @@ description: Learn to freeze YOLOv5 layers for efficient transfer learning. Opti keywords: YOLOv5, freeze layers, transfer learning, model retraining, Ultralytics --- -📚 This guide explains how to **freeze** YOLOv5 🚀 layers when **transfer learning**. Transfer learning is a useful way to quickly retrain a model on new data without having to retrain the entire network. Instead, part of the initial weights are frozen in place, and the rest of the weights are used to compute loss and are updated by the optimizer. This requires less resources than normal training and allows for faster training times, though it may also result in reductions to final trained accuracy. +📚 This guide explains how to **freeze** YOLOv5 🚀 layers when **transfer learning**. Transfer learning is a useful way to quickly retrain a model on new data without having to retrain the entire network. Instead, part of the initial weights are frozen in place, and the rest of the weights are used to compute loss and are updated by the optimizer. This requires less resources than normal training and allows for faster training times, though it may also result in reductions to final trained accuracy. UPDATED 25 September 2022. ## Before You Start @@ -22,13 +22,13 @@ pip install -r requirements.txt # install All layers that match the train.py `freeze` list in train.py will be frozen by setting their gradients to zero before training starts. ```python - # Freeze - freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze - for k, v in model.named_parameters(): - v.requires_grad = True # train all layers - if any(x in k for x in freeze): - print(f'freezing {k}') - v.requires_grad = False + # Freeze + freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print(f'freezing {k}') + v.requires_grad = False ``` To see a list of module names: @@ -60,43 +60,43 @@ model.24.m.2.bias Looking at the model architecture we can see that the model backbone is layers 0-9: ```yaml -# YOLOv5 backbone - backbone: - # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 - ] - - # YOLOv5 head - head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] +# YOLOv5 backbone + backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + + # YOLOv5 head + head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] ``` so we can define the freeze list to contain all modules with 'model.0.' - 'model.9.' in their names: @@ -152,4 +152,4 @@ YOLOv5 is designed to be run in the following up-to-date verified environments ( YOLOv5 CI -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. \ No newline at end of file +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index b512491..617e12e 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,13 +1,14 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.140' +__version__ = '8.0.141' from ultralytics.engine.model import YOLO from ultralytics.hub import start from ultralytics.models import RTDETR, SAM from ultralytics.models.fastsam import FastSAM from ultralytics.models.nas import NAS +from ultralytics.utils import SETTINGS as settings from ultralytics.utils.checks import check_yolo as checks from ultralytics.utils.downloads import download -__all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'start' # allow simpler import +__all__ = '__version__', 'YOLO', 'NAS', 'SAM', 'FastSAM', 'RTDETR', 'checks', 'download', 'start', 'settings' # allow simpler import diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py index 65aae08..05797e7 100644 --- a/ultralytics/cfg/__init__.py +++ b/ultralytics/cfg/__init__.py @@ -9,9 +9,9 @@ from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Union -from ultralytics.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR, - IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn, get_settings, - yaml_load, yaml_print) +from ultralytics.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, SETTINGS, SETTINGS_YAML, + IterableSimpleNamespace, __version__, checks, colorstr, deprecation_warn, yaml_load, + yaml_print) # Define valid tasks and modes MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark' @@ -28,7 +28,6 @@ TASK2METRIC = { 'classify': 'metrics/accuracy_top1', 'pose': 'metrics/mAP50-95(P)'} - CLI_HELP_MSG = \ f""" Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax: @@ -111,7 +110,7 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove # Merge overrides if overrides: overrides = cfg2dict(overrides) - check_cfg_mismatch(cfg, overrides) + check_dict_alignment(cfg, overrides) cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides) # Special handling for numeric project/name @@ -147,9 +146,7 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove def _handle_deprecation(custom): - """ - Hardcoded function to handle deprecated config keys - """ + """Hardcoded function to handle deprecated config keys""" for key in custom.copy().keys(): if key == 'hide_labels': @@ -165,7 +162,7 @@ def _handle_deprecation(custom): return custom -def check_cfg_mismatch(base: Dict, custom: Dict, e=None): +def check_dict_alignment(base: Dict, custom: Dict, e=None): """ This function checks for any mismatched keys between a custom configuration list and a base configuration list. If any mismatched keys are found, the function prints out similar keys from the base list and exits the program. @@ -175,13 +172,13 @@ def check_cfg_mismatch(base: Dict, custom: Dict, e=None): base (dict): a dictionary of base configuration options """ custom = _handle_deprecation(custom) - base, custom = (set(x.keys()) for x in (base, custom)) - mismatched = [x for x in custom if x not in base] + base_keys, custom_keys = (set(x.keys()) for x in (base, custom)) + mismatched = [k for k in custom_keys if k not in base_keys] if mismatched: string = '' for x in mismatched: - matches = get_close_matches(x, base) # key list - matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches] + matches = get_close_matches(x, base_keys) # key list + matches = [f'{k}={base[k]}' if base.get(k) is not None else k for k in matches] match_str = f'Similar arguments are i.e. {matches}.' if matches else '' string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n" raise SyntaxError(string + CLI_HELP_MSG) from e @@ -251,12 +248,39 @@ def handle_yolo_settings(args: List[str]) -> None: Example: python my_script.py yolo settings reset """ - path = USER_CONFIG_DIR / 'settings.yaml' # get SETTINGS YAML file path - if any(args) and args[0] == 'reset': - path.unlink() # delete the settings file - get_settings() # create new settings - LOGGER.info('Settings reset successfully') # inform the user that settings have been reset - yaml_print(path) # print the current settings + if any(args): + if args[0] == 'reset': + SETTINGS_YAML.unlink() # delete the settings file + SETTINGS.reset() # create new settings + LOGGER.info('Settings reset successfully') # inform the user that settings have been reset + else: + new = dict(parse_key_value_pair(a) for a in args) + check_dict_alignment(SETTINGS, new) + SETTINGS.update(new) + + yaml_print(SETTINGS_YAML) # print the current settings + + +def parse_key_value_pair(pair): + """Parse one 'key=value' pair and return key and value.""" + re.sub(r' *= *', '=', pair) # remove spaces around equals sign + k, v = pair.split('=', 1) # split on first '=' sign + assert v, f"missing '{k}' value" + return k, smart_value(v) + + +def smart_value(v): + """Convert a string to an underlying type such as int, float, bool, etc.""" + if v.lower() == 'none': + return None + elif v.lower() == 'true': + return True + elif v.lower() == 'false': + return False + else: + with contextlib.suppress(Exception): + return eval(v) + return v def entrypoint(debug=''): @@ -305,25 +329,14 @@ def entrypoint(debug=''): a = a[:-1] if '=' in a: try: - re.sub(r' *= *', '=', a) # remove spaces around equals sign - k, v = a.split('=', 1) # split on first '=' sign - assert v, f"missing '{k}' value" + k, v = parse_key_value_pair(a) if k == 'cfg': # custom.yaml passed LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}') overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'} else: - if v.lower() == 'none': - v = None - elif v.lower() == 'true': - v = True - elif v.lower() == 'false': - v = False - else: - with contextlib.suppress(Exception): - v = eval(v) overrides[k] = v except (NameError, SyntaxError, ValueError, AssertionError) as e: - check_cfg_mismatch(full_args_dict, {a: ''}, e) + check_dict_alignment(full_args_dict, {a: ''}, e) elif a in TASKS: overrides['task'] = a @@ -338,13 +351,13 @@ def entrypoint(debug=''): raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}") else: - check_cfg_mismatch(full_args_dict, {a: ''}) + check_dict_alignment(full_args_dict, {a: ''}) # Check keys - check_cfg_mismatch(full_args_dict, overrides) + check_dict_alignment(full_args_dict, overrides) # Mode - mode = overrides.get('mode', None) + mode = overrides.get('mode') if mode is None: mode = DEFAULT_CFG.mode or 'predict' LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") diff --git a/ultralytics/hub/auth.py b/ultralytics/hub/auth.py index d8d728f..4da163f 100644 --- a/ultralytics/hub/auth.py +++ b/ultralytics/hub/auth.py @@ -3,7 +3,7 @@ import requests from ultralytics.hub.utils import HUB_API_ROOT, PREFIX, request_with_credentials -from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab, set_settings +from ultralytics.utils import LOGGER, SETTINGS, emojis, is_colab API_KEY_URL = 'https://hub.ultralytics.com/settings?tab=api+keys' @@ -45,7 +45,7 @@ class Auth: # Update SETTINGS with the new API key after successful authentication if success: - set_settings({'api_key': self.api_key}) + SETTINGS.update({'api_key': self.api_key}) # Log that the new login was successful if verbose: LOGGER.info(f'{PREFIX}New authentication successful ✅') diff --git a/ultralytics/utils/__init__.py b/ultralytics/utils/__init__.py index 44559a6..1f391c8 100644 --- a/ultralytics/utils/__init__.py +++ b/ultralytics/utils/__init__.py @@ -713,62 +713,105 @@ def set_sentry(): logging.getLogger(logger).setLevel(logging.CRITICAL) -def get_settings(file=SETTINGS_YAML, version='0.0.3'): +def update_dict_recursive(d, u): """ - Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist. + Recursively updates the dictionary `d` with the key-value pairs from the dictionary `u` without overwriting + entire sub-dictionaries. Note that function recursion is intended and not a problem, as this allows for updating + nested dictionaries at any arbitrary depth. Args: - file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR. - version (str): Settings version. If min settings version not met, new default settings will be saved. + d (dict): The dictionary to be updated. + u (dict): The dictionary to update `d` with. Returns: - (dict): Dictionary of settings key-value pairs. + (dict): The recursively updated dictionary. """ - import hashlib + for k, v in u.items(): + d[k] = update_dict_recursive(d.get(k, {}), v) if isinstance(v, dict) else v + return d - from ultralytics.utils.checks import check_version - from ultralytics.utils.torch_utils import torch_distributed_zero_first - git_dir = get_git_dir() - root = git_dir or Path() - datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() - defaults = { - 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory. - 'weights_dir': str(root / 'weights'), # default weights directory. - 'runs_dir': str(root / 'runs'), # default runs directory. - 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # SHA-256 anonymized UUID hash - 'sync': True, # sync analytics to help with YOLO development - 'api_key': '', # Ultralytics HUB API key (https://hub.ultralytics.com/) - 'settings_version': version} # Ultralytics settings version - - with torch_distributed_zero_first(RANK): - if not file.exists(): - yaml_save(file, defaults) - settings = yaml_load(file) - - # Check that settings keys and types match defaults - correct = \ - settings \ - and settings.keys() == defaults.keys() \ - and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \ - and check_version(settings['settings_version'], version) - if not correct: - LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a ' - 'recent ultralytics package update, but may have overwritten previous settings. ' - f"\nView and update settings with 'yolo settings' or at '{file}'") - settings = defaults # merge **defaults with **settings (prefer **settings) - yaml_save(file, settings) # save updated defaults - - return settings +class SettingsManager(dict): + """ + Manages Ultralytics settings stored in a YAML file. + Args: + file (str | Path): Path to the Ultralytics settings YAML file. Default is USER_CONFIG_DIR / 'settings.yaml'. + version (str): Settings version. In case of local version mismatch, new default settings will be saved. + """ + + def __init__(self, file=SETTINGS_YAML, version='0.0.4'): + import copy + import hashlib + + from ultralytics.utils.checks import check_version + from ultralytics.utils.torch_utils import torch_distributed_zero_first + + git_dir = get_git_dir() + root = git_dir or Path() + datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() + + self.file = Path(file) + self.version = version + self.defaults = { + 'settings_version': version, + 'datasets_dir': str(datasets_root / 'datasets'), + 'weights_dir': str(root / 'weights'), + 'runs_dir': str(root / 'runs'), + 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), + 'sync': True, + 'api_key': '', + 'clearml': True, # integrations + 'comet': True, + 'dvc': True, + 'hub': True, + 'mlflow': True, + 'neptune': True, + 'raytune': True, + 'tensorboard': True, + 'wandb': True} + + super().__init__(copy.deepcopy(self.defaults)) + + with torch_distributed_zero_first(RANK): + if not self.file.exists(): + self.save() + + self.load() + correct_keys = self.keys() == self.defaults.keys() + correct_types = all(type(a) == type(b) for a, b in zip(self.values(), self.defaults.values())) + correct_version = check_version(self['settings_version'], self.version) + if not (correct_keys and correct_types and correct_version): + LOGGER.warning( + 'WARNING ⚠️ Ultralytics settings reset to default values. This may be due to a possible problem ' + 'with your settings or a recent ultralytics package update. ' + f"\nView settings with 'yolo settings' or at '{self.file}'" + "\nUpdate settings with 'yolo settings key=value', i.e. 'yolo settings runs_dir=path/to/dir'.") + self.reset() + + def load(self): + """Loads settings from the YAML file.""" + self.update(yaml_load(self.file)) + + def save(self): + """Saves the current settings to the YAML file.""" + yaml_save(self.file, dict(self)) + + def update(self, *args, **kwargs): + """Updates a setting value in the current settings and saves the settings.""" + new = dict(*args, **kwargs) + if any(isinstance(v, dict) for v in new.values()): + update_dict_recursive(self, new) + else: + # super().update(*args, **kwargs) + super().update(new) + self.save() -def set_settings(kwargs, file=SETTINGS_YAML): - """ - Function that runs on a first-time ultralytics package installation to set up global settings and create necessary - directories. - """ - SETTINGS.update(kwargs) - yaml_save(file, SETTINGS) + def reset(self): + """Resets the settings to default and saves them.""" + self.clear() + self.update(self.defaults) + self.save() def deprecation_warn(arg, new_arg, version=None): @@ -794,7 +837,7 @@ def url2file(url): # Check first-install steps PREFIX = colorstr('Ultralytics: ') -SETTINGS = get_settings() +SETTINGS = SettingsManager() # initialize settings DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ 'Docker' if is_docker() else platform.system() diff --git a/ultralytics/utils/callbacks/clearml.py b/ultralytics/utils/callbacks/clearml.py index a5a01d5..b13af40 100644 --- a/ultralytics/utils/callbacks/clearml.py +++ b/ultralytics/utils/callbacks/clearml.py @@ -5,7 +5,7 @@ import re import matplotlib.image as mpimg import matplotlib.pyplot as plt -from ultralytics.utils import LOGGER, TESTS_RUNNING +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING from ultralytics.utils.torch_utils import model_info_for_loggers try: @@ -16,6 +16,7 @@ try: assert hasattr(clearml, '__version__') # verify package is not directory assert not TESTS_RUNNING # do not log pytest + assert SETTINGS['clearml'] is True # verify integration is enabled except (ImportError, AssertionError): clearml = None diff --git a/ultralytics/utils/callbacks/comet.py b/ultralytics/utils/callbacks/comet.py index 0366442..a6f6c40 100644 --- a/ultralytics/utils/callbacks/comet.py +++ b/ultralytics/utils/callbacks/comet.py @@ -3,7 +3,7 @@ import os from pathlib import Path -from ultralytics.utils import LOGGER, RANK, TESTS_RUNNING, ops +from ultralytics.utils import LOGGER, RANK, SETTINGS, TESTS_RUNNING, ops from ultralytics.utils.torch_utils import model_info_for_loggers try: @@ -11,6 +11,7 @@ try: assert not TESTS_RUNNING # do not log pytest assert hasattr(comet_ml, '__version__') # verify package is not directory + assert SETTINGS['comet'] is True # verify integration is enabled except (ImportError, AssertionError): comet_ml = None diff --git a/ultralytics/utils/callbacks/dvc.py b/ultralytics/utils/callbacks/dvc.py index 16042d1..c81c492 100644 --- a/ultralytics/utils/callbacks/dvc.py +++ b/ultralytics/utils/callbacks/dvc.py @@ -3,7 +3,7 @@ import os import pkg_resources as pkg -from ultralytics.utils import LOGGER, TESTS_RUNNING +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING from ultralytics.utils.torch_utils import model_info_for_loggers try: @@ -12,6 +12,7 @@ try: import dvclive assert not TESTS_RUNNING # do not log pytest + assert SETTINGS['dvc'] is True # verify integration is enabled ver = version('dvclive') if pkg.parse_version(ver) < pkg.parse_version('2.11.0'): diff --git a/ultralytics/utils/callbacks/hub.py b/ultralytics/utils/callbacks/hub.py index edb8078..3215e45 100644 --- a/ultralytics/utils/callbacks/hub.py +++ b/ultralytics/utils/callbacks/hub.py @@ -4,7 +4,7 @@ import json from time import time from ultralytics.hub.utils import PREFIX, events -from ultralytics.utils import LOGGER +from ultralytics.utils import LOGGER, SETTINGS from ultralytics.utils.torch_utils import model_info_for_loggers @@ -84,4 +84,4 @@ callbacks = { 'on_train_start': on_train_start, 'on_val_start': on_val_start, 'on_predict_start': on_predict_start, - 'on_export_start': on_export_start} + 'on_export_start': on_export_start} if SETTINGS['hub'] is True else {} # verify enabled diff --git a/ultralytics/utils/callbacks/mlflow.py b/ultralytics/utils/callbacks/mlflow.py index 76bc0c1..70ddb73 100644 --- a/ultralytics/utils/callbacks/mlflow.py +++ b/ultralytics/utils/callbacks/mlflow.py @@ -4,13 +4,14 @@ import os import re from pathlib import Path -from ultralytics.utils import LOGGER, TESTS_RUNNING, colorstr +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr try: import mlflow assert not TESTS_RUNNING # do not log pytest assert hasattr(mlflow, '__version__') # verify package is not directory + assert SETTINGS['mlflow'] is True # verify integration is enabled except (ImportError, AssertionError): mlflow = None diff --git a/ultralytics/utils/callbacks/neptune.py b/ultralytics/utils/callbacks/neptune.py index da5ccf4..f72a63b 100644 --- a/ultralytics/utils/callbacks/neptune.py +++ b/ultralytics/utils/callbacks/neptune.py @@ -3,7 +3,7 @@ import matplotlib.image as mpimg import matplotlib.pyplot as plt -from ultralytics.utils import LOGGER, TESTS_RUNNING +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING from ultralytics.utils.torch_utils import model_info_for_loggers try: @@ -12,6 +12,7 @@ try: assert not TESTS_RUNNING # do not log pytest assert hasattr(neptune, '__version__') + assert SETTINGS['neptune'] is True # verify integration is enabled except (ImportError, AssertionError): neptune = None diff --git a/ultralytics/utils/callbacks/raytune.py b/ultralytics/utils/callbacks/raytune.py index 1f53225..0ca1d2b 100644 --- a/ultralytics/utils/callbacks/raytune.py +++ b/ultralytics/utils/callbacks/raytune.py @@ -1,9 +1,13 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +from ultralytics.utils import SETTINGS + try: import ray from ray import tune from ray.air import session + + assert SETTINGS['raytune'] is True # verify integration is enabled except (ImportError, AssertionError): tune = None diff --git a/ultralytics/utils/callbacks/tensorboard.py b/ultralytics/utils/callbacks/tensorboard.py index bd2300b..696a1b4 100644 --- a/ultralytics/utils/callbacks/tensorboard.py +++ b/ultralytics/utils/callbacks/tensorboard.py @@ -1,11 +1,12 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from ultralytics.utils import LOGGER, TESTS_RUNNING, colorstr +from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr try: from torch.utils.tensorboard import SummaryWriter assert not TESTS_RUNNING # do not log pytest + assert SETTINGS['tensorboard'] is True # verify integration is enabled # TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows except (ImportError, AssertionError, TypeError): diff --git a/ultralytics/utils/callbacks/wb.py b/ultralytics/utils/callbacks/wb.py index 605a228..bff080f 100644 --- a/ultralytics/utils/callbacks/wb.py +++ b/ultralytics/utils/callbacks/wb.py @@ -1,5 +1,5 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -from ultralytics.utils import TESTS_RUNNING +from ultralytics.utils import SETTINGS, TESTS_RUNNING from ultralytics.utils.torch_utils import model_info_for_loggers try: @@ -7,6 +7,7 @@ try: assert hasattr(wb, '__version__') assert not TESTS_RUNNING # do not log pytest + assert SETTINGS['wandb'] is True # verify integration is enabled except (ImportError, AssertionError): wb = None @@ -16,7 +17,7 @@ _processed_plots = {} def _log_plots(plots, step): for name, params in plots.items(): timestamp = params['timestamp'] - if _processed_plots.get(name, None) != timestamp: + if _processed_plots.get(name) != timestamp: wb.run.log({name.stem: wb.Image(str(name))}, step=step) _processed_plots[name] = timestamp diff --git a/ultralytics/utils/files.py b/ultralytics/utils/files.py index 90c8d28..d6fe7b1 100644 --- a/ultralytics/utils/files.py +++ b/ultralytics/utils/files.py @@ -38,7 +38,7 @@ def spaces_in_path(path): path (str | Path): The original path. Yields: - Path: Temporary path with spaces replaced by underscores if spaces were present, otherwise the original path. + (Path): Temporary path with spaces replaced by underscores if spaces were present, otherwise the original path. Examples: with spaces_in_path('/path/with spaces') as new_path: