From c340f84ce9325de720fbd9ada6523a28fc432651 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jun 2023 20:14:46 +0200 Subject: [PATCH] `ultralytics 8.0.117` NAS export, classify and tasks banner URL fixes (#3145) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- docs/tasks/index.md | 2 +- examples/tutorial.ipynb | 2 +- mkdocs.yml | 2 +- ultralytics/__init__.py | 2 +- ultralytics/yolo/engine/exporter.py | 6 ++++-- ultralytics/yolo/nas/model.py | 12 ++++++++++-- 8 files changed, 22 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index ccf8aa3..5c3c0ea 100644 --- a/README.md +++ b/README.md @@ -102,9 +102,9 @@ path = model.export(format="onnx") # export the model to ONNX format ##
Models
-YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://docs.ultralytics.com/tasks/segment) and [Pose](https://docs.ultralytics.com/tasks/pose) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/modes/classify) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet) dataset. [Track](https://docs.ultralytics.com/modes/track) mode is available for all Detect, Segment and Pose models. +YOLOv8 [Detect](https://docs.ultralytics.com/tasks/detect), [Segment](https://docs.ultralytics.com/tasks/segment) and [Pose](https://docs.ultralytics.com/tasks/pose) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco) dataset are available here, as well as YOLOv8 [Classify](https://docs.ultralytics.com/tasks/classify) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet) dataset. [Track](https://docs.ultralytics.com/modes/track) mode is available for all Detect, Segment and Pose models. - + All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. diff --git a/README.zh-CN.md b/README.zh-CN.md index 3cde911..e401710 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -102,9 +102,9 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 ##
模型
-在[COCO](https://docs.ultralytics.com/datasets/detect/coco)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect),[分割](https://docs.ultralytics.com/tasks/segment)和[姿态](https://docs.ultralytics.com/tasks/pose)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/modes/classify)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track)模式。 +在[COCO](https://docs.ultralytics.com/datasets/detect/coco)数据集上预训练的YOLOv8 [检测](https://docs.ultralytics.com/tasks/detect),[分割](https://docs.ultralytics.com/tasks/segment)和[姿态](https://docs.ultralytics.com/tasks/pose)模型可以在这里找到,以及在[ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet)数据集上预训练的YOLOv8 [分类](https://docs.ultralytics.com/tasks/classify)模型。所有的检测,分割和姿态模型都支持[追踪](https://docs.ultralytics.com/modes/track)模式。 - + 所有[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models)在首次使用时会自动从最新的Ultralytics [发布版本](https://github.com/ultralytics/assets/releases)下载。 diff --git a/docs/tasks/index.md b/docs/tasks/index.md index 2534431..982bb62 100644 --- a/docs/tasks/index.md +++ b/docs/tasks/index.md @@ -10,7 +10,7 @@ perform [detection](detect.md), [segmentation](segment.md), [classification](cla and [pose](pose.md) estimation. Each of these tasks has a different objective and use case.
- + ## [Detection](detect.md) diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb index 9842bf7..50a6dac 100644 --- a/examples/tutorial.ipynb +++ b/examples/tutorial.ipynb @@ -548,7 +548,7 @@ "\n", "YOLOv8 can train, val, predict and export models for the most common tasks in vision AI: [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/), [Classify](https://docs.ultralytics.com/tasks/classify/) and [Pose](https://docs.ultralytics.com/tasks/pose/). See [YOLOv8 Tasks Docs](https://docs.ultralytics.com/tasks/) for more information.\n", "\n", - "
\n" + "
\n" ], "metadata": { "id": "Phm9ccmOKye5" diff --git a/mkdocs.yml b/mkdocs.yml index c1f2049..e18ed6b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -17,7 +17,7 @@ theme: icon: repo: fontawesome/brands/github font: - text: Roboto + text: Helvetica code: Roboto Mono palette: diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 95febf1..94be8f8 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.116' +__version__ = '8.0.117' from ultralytics.hub import start from ultralytics.vit.rtdetr import RTDETR diff --git a/ultralytics/yolo/engine/exporter.py b/ultralytics/yolo/engine/exporter.py index d63c22c..8017a2f 100644 --- a/ultralytics/yolo/engine/exporter.py +++ b/ultralytics/yolo/engine/exporter.py @@ -172,7 +172,8 @@ class Exporter: # Input im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device) - file = Path(getattr(model, 'pt_path', None) or getattr(model, 'yaml_file', None) or model.yaml['yaml_file']) + file = Path( + getattr(model, 'pt_path', None) or getattr(model, 'yaml_file', None) or model.yaml.get('yaml_file', '')) if file.suffix == '.yaml': file = Path(file.name) @@ -207,7 +208,8 @@ class Exporter: self.im = im self.model = model self.file = file - self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y) + self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else \ + tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y) self.pretty_name = Path(self.model.yaml.get('yaml_file', self.file)).stem.replace('yolo', 'YOLO') trained_on = f'trained on {Path(self.args.data).name}' if self.args.data else '(untrained)' description = f'Ultralytics {self.pretty_name} model {trained_on}' diff --git a/ultralytics/yolo/nas/model.py b/ultralytics/yolo/nas/model.py index f375dac..bfe7dcd 100644 --- a/ultralytics/yolo/nas/model.py +++ b/ultralytics/yolo/nas/model.py @@ -1,6 +1,12 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -# NAS model interface +YOLO-NAS model interface. + +Usage - Predict: + from ultralytics import NAS + + model = NAS('yolo_nas_s') + results = model.predict('ultralytics/assets/bus.jpg') """ from pathlib import Path @@ -33,11 +39,13 @@ class NAS: self.model.args = DEFAULT_CFG_DICT # attach args to model # Standardize model - self.model.fuse = lambda verbose: self.model + self.model.fuse = lambda verbose=True: self.model self.model.stride = torch.tensor([32]) self.model.names = dict(enumerate(self.model._class_names)) self.model.is_fused = lambda: False # for info() self.model.yaml = {} # for info() + self.model.pt_path = model # for export() + self.model.task = 'detect' # for export() self.info() @smart_inference_mode()