From 0a36b83e7addd5e21438d32a614bbdf3f330e6d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Apr 2023 17:00:08 +0200 Subject: [PATCH] `ultralytics 8.0.88` Pose TFLite model fix (#2261) --- docs/index.md | 2 +- mkdocs.yml | 2 +- ultralytics/__init__.py | 2 +- ultralytics/hub/utils.py | 6 ++++-- ultralytics/nn/modules.py | 23 +++++++++++++++-------- 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/docs/index.md b/docs/index.md index bd0c542..ca2b3c2 100644 --- a/docs/index.md +++ b/docs/index.md @@ -31,6 +31,6 @@ Explore the YOLOv8 Docs, a comprehensive resource designed to help you understan - [YOLOv3](https://pjreddie.com/media/files/papers/YOLOv3.pdf), launched in 2018, further enhanced the model's performance using a more efficient backbone network, multiple anchors and spatial pyramid pooling. - [YOLOv4](https://arxiv.org/abs/2004.10934) was released in 2020, introducing innovations like Mosaic data augmentation, a new anchor-free detection head, and a new loss function. - [YOLOv5](https://github.com/ultralytics/yolov5) further improved the model's performance and added new features such as hyperparameter optimization, integrated experiment tracking and automatic export to popular export formats. -- [YOLOv6](https://github.com/meituan/YOLOv6) was open-sourced by [Meituan](https://about.meituan.com/en) in 2022 and is in use in many of the company's autonomous delivery robots. +- [YOLOv6](https://github.com/meituan/YOLOv6) was open-sourced by [Meituan](https://about.meituan.com/) in 2022 and is in use in many of the company's autonomous delivery robots. - [YOLOv7](https://github.com/WongKinYiu/yolov7) added additional tasks such as pose estimation on the COCO keypoints dataset. - [YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of YOLO by Ultralytics. As a cutting-edge, state-of-the-art (SOTA) model, YOLOv8 builds on the success of previous versions, introducing new features and improvements for enhanced performance, flexibility, and efficiency. YOLOv8 supports a full range of vision AI tasks, including [detection](tasks/detect.md), [segmentation](tasks/segment.md), [pose estimation](tasks/pose.md), [tracking](modes/track.md), and [classification](tasks/classify.md). This versatility allows users to leverage YOLOv8's capabilities across diverse applications and domains. diff --git a/mkdocs.yml b/mkdocs.yml index 615a0be..990f007 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -127,7 +127,7 @@ plugins: # Primary navigation nav: - Home: - - index.md + - Home: index.md - Quickstart: quickstart.md - Modes: - modes/index.md diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 4455e26..e2e0eff 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.87' +__version__ = '8.0.88' from ultralytics.hub import start from ultralytics.yolo.engine.model import YOLO diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py index e2492ee..26f560d 100644 --- a/ultralytics/hub/utils.py +++ b/ultralytics/hub/utils.py @@ -161,7 +161,7 @@ class Events: Initializes the Events object with default values for events, rate_limit, and metadata. """ self.events = [] # events list - self.rate_limit = 10.0 # rate limit (seconds) + self.rate_limit = 60.0 # rate limit (seconds) self.t = 0.0 # rate limit timer (seconds) self.metadata = { 'cli': Path(sys.argv[0]).name == 'yolo', @@ -204,7 +204,9 @@ class Events: # Time is over rate limiter, send now data = {'client_id': SETTINGS['uuid'], 'events': self.events} # SHA-256 anonymized UUID hash and events list - smart_request('post', self.url, json=data, retry=0, code=3) # equivalent to requests.post(self.url, json=data) + + # POST equivalent to requests.post(self.url, json=data) + smart_request('post', self.url, json=data, retry=0, verbose=False) # Reset events and rate limit timer self.events = [] diff --git a/ultralytics/nn/modules.py b/ultralytics/nn/modules.py index 4c198dc..d913f60 100644 --- a/ultralytics/nn/modules.py +++ b/ultralytics/nn/modules.py @@ -541,18 +541,25 @@ class Pose(Detect): x = self.detect(self, x) if self.training: return x, kpt - pred_kpt = self.kpts_decode(kpt) + pred_kpt = self.kpts_decode(bs, kpt) return torch.cat([x, pred_kpt], 1) if self.export else (torch.cat([x[0], pred_kpt], 1), (x[1], kpt)) - def kpts_decode(self, kpts): + def kpts_decode(self, bs, kpts): """Decodes keypoints.""" ndim = self.kpt_shape[1] - y = kpts.clone() - if ndim == 3: - y[:, 2::3].sigmoid_() # inplace sigmoid - y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides - y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides - return y + if self.export: # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug + y = kpts.view(bs, *self.kpt_shape, -1) + a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * self.strides + if ndim == 3: + a = torch.cat((a, y[:, :, 1:2].sigmoid()), 2) + return a.view(bs, self.nk, -1) + else: + y = kpts.clone() + if ndim == 3: + y[:, 2::3].sigmoid_() # inplace sigmoid + y[:, 0::ndim] = (y[:, 0::ndim] * 2.0 + (self.anchors[0] - 0.5)) * self.strides + y[:, 1::ndim] = (y[:, 1::ndim] * 2.0 + (self.anchors[1] - 0.5)) * self.strides + return y class Classify(nn.Module):