`ultralytics 8.0.54` TFLite export improvements and fixes (#1447)

Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
single_channel
Glenn Jocher 2 years ago committed by GitHub
parent 30fc4b537f
commit 701fba4770
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -79,7 +79,7 @@ pip install ultralytics
YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command: YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command:
```bash ```bash
yolo predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg'
``` ```
`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 `yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8

@ -71,7 +71,7 @@ pip install ultralytics
YOLOv8 可以直接在命令行界面CLI中使用 `yolo` 命令运行: YOLOv8 可以直接在命令行界面CLI中使用 `yolo` 命令运行:
```bash ```bash
yolo predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg'
``` ```
`yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com) `yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com)

@ -22,11 +22,11 @@ export arguments.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model = YOLO('path/to/best.pt') # load a custom trained
# Export the model # Export the model
model.export(format="onnx") model.export(format='onnx')
``` ```
=== "CLI" === "CLI"

@ -26,9 +26,9 @@ Use a trained YOLOv8n/YOLOv8n-seg model to run tracker on video streams.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official detection model model = YOLO('yolov8n.pt') # load an official detection model
model = YOLO("yolov8n-seg.pt") # load an official segmentation model model = YOLO('yolov8n-seg.pt') # load an official segmentation model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Track with the model # Track with the model
results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True)
@ -60,7 +60,7 @@ to [predict page](https://docs.ultralytics.com/modes/predict/).
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
model = YOLO("yolov8n.pt") model = YOLO('yolov8n.pt')
results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True)
``` ```
=== "CLI" === "CLI"
@ -82,7 +82,7 @@ any configurations(expect the `tracker_type`) you need to.
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
model = YOLO("yolov8n.pt") model = YOLO('yolov8n.pt')
results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml') results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml')
``` ```
=== "CLI" === "CLI"

@ -21,16 +21,24 @@ training arguments.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch model = YOLO('yolov8n.yaml') # build a new model from YAML
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights
# Train the model # Train the model
model.train(data="coco128.yaml", epochs=100, imgsz=640) model.train(data='coco128.yaml', epochs=100, imgsz=640)
``` ```
=== "CLI" === "CLI"
```bash ```bash
# Build a new model from YAML and start training from scratch
yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640
# Start training from a pretrained *.pt model
yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640
# Build a new model from YAML, transfer pretrained weights to it and start training
yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640
``` ```
## Arguments ## Arguments

@ -21,8 +21,8 @@ training `data` and arguments as model attributes. See Arguments section below f
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Validate the model # Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered metrics = model.val() # no arguments needed, dataset and settings remembered

@ -60,14 +60,14 @@ classification into their Python projects using YOLOv8.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch model = YOLO('yolov8n.yaml') # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
# Use the model # Use the model
results = model.train(data="coco128.yaml", epochs=3) # train the model results = model.train(data='coco128.yaml', epochs=3) # train the model
results = model.val() # evaluate model performance on the validation set results = model.val() # evaluate model performance on the validation set
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
success = model.export(format="onnx") # export the model to ONNX format success = model.export(format='onnx') # export the model to ONNX format
``` ```
[Python Guide](usage/python.md){.md-button .md-button--primary} [Python Guide](usage/python.md){.md-button .md-button--primary}

@ -26,11 +26,11 @@ see the [Configuration](../usage/cfg.md) page.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-cls.yaml") # build a new model from scratch model = YOLO('yolov8n-cls.yaml') # build a new model from scratch
model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training)
# Train the model # Train the model
model.train(data="mnist160", epochs=100, imgsz=64) model.train(data='mnist160', epochs=100, imgsz=64)
``` ```
=== "CLI" === "CLI"
@ -51,8 +51,8 @@ it's training `data` and arguments as model attributes.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-cls.pt") # load an official model model = YOLO('yolov8n-cls.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Validate the model # Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered metrics = model.val() # no arguments needed, dataset and settings remembered
@ -78,17 +78,17 @@ Use a trained YOLOv8n-cls model to run predictions on images.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-cls.pt") # load an official model model = YOLO('yolov8n-cls.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Predict with the model # Predict with the model
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
``` ```
=== "CLI" === "CLI"
```bash ```bash
yolo classify predict model=yolov8n-cls.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model
yolo classify predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model
``` ```
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page.
@ -105,11 +105,11 @@ Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-cls.pt") # load an official model model = YOLO('yolov8n-cls.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model = YOLO('path/to/best.pt') # load a custom trained
# Export the model # Export the model
model.export(format="onnx") model.export(format='onnx')
``` ```
=== "CLI" === "CLI"

@ -26,11 +26,11 @@ the [Configuration](../usage/cfg.md) page.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch model = YOLO('yolov8n.yaml') # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
# Train the model # Train the model
model.train(data="coco128.yaml", epochs=100, imgsz=640) model.train(data='coco128.yaml', epochs=100, imgsz=640)
``` ```
=== "CLI" === "CLI"
@ -51,8 +51,8 @@ training `data` and arguments as model attributes.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Validate the model # Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered metrics = model.val() # no arguments needed, dataset and settings remembered
@ -80,17 +80,17 @@ Use a trained YOLOv8n model to run predictions on images.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Predict with the model # Predict with the model
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
``` ```
=== "CLI" === "CLI"
```bash ```bash
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model
``` ```
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page.
@ -107,11 +107,11 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model = YOLO('path/to/best.pt') # load a custom trained
# Export the model # Export the model
model.export(format="onnx") model.export(format='onnx')
``` ```
=== "CLI" === "CLI"

@ -28,11 +28,11 @@ train an OpenPose model on a custom dataset, see the OpenPose Training page.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch model = YOLO('yolov8n.yaml') # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
# Train the model # Train the model
model.train(data="coco128.yaml", epochs=100, imgsz=640) model.train(data='coco128.yaml', epochs=100, imgsz=640)
``` ```
=== "CLI" === "CLI"
@ -53,8 +53,8 @@ training `data` and arguments as model attributes.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Validate the model # Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered metrics = model.val() # no arguments needed, dataset and settings remembered
@ -82,17 +82,17 @@ Use a trained YOLOv8n model to run predictions on images.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Predict with the model # Predict with the model
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
``` ```
=== "CLI" === "CLI"
```bash ```bash
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model
``` ```
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page.
@ -109,11 +109,11 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.pt") # load an official model model = YOLO('yolov8n.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model = YOLO('path/to/best.pt') # load a custom trained
# Export the model # Export the model
model.export(format="onnx") model.export(format='onnx')
``` ```
=== "CLI" === "CLI"

@ -26,11 +26,11 @@ arguments see the [Configuration](../usage/cfg.md) page.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-seg.yaml") # build a new model from scratch model = YOLO('yolov8n-seg.yaml') # build a new model from scratch
model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training) model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training)
# Train the model # Train the model
model.train(data="coco128-seg.yaml", epochs=100, imgsz=640) model.train(data='coco128-seg.yaml', epochs=100, imgsz=640)
``` ```
=== "CLI" === "CLI"
@ -51,8 +51,8 @@ retains it's training `data` and arguments as model attributes.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-seg.pt") # load an official model model = YOLO('yolov8n-seg.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Validate the model # Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered metrics = model.val() # no arguments needed, dataset and settings remembered
@ -84,17 +84,17 @@ Use a trained YOLOv8n-seg model to run predictions on images.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-seg.pt") # load an official model model = YOLO('yolov8n-seg.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom model model = YOLO('path/to/best.pt') # load a custom model
# Predict with the model # Predict with the model
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
``` ```
=== "CLI" === "CLI"
```bash ```bash
yolo segment predict model=yolov8n-seg.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model
yolo segment predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model
``` ```
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page.
@ -111,11 +111,11 @@ Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc.
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n-seg.pt") # load an official model model = YOLO('yolov8n-seg.pt') # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model = YOLO('path/to/best.pt') # load a custom trained
# Export the model # Export the model
model.export(format="onnx") model.export(format='onnx')
``` ```
=== "CLI" === "CLI"

@ -17,7 +17,7 @@ def on_predict_batch_end(predictor):
im0s = im0s if isinstance(im0s, list) else [im0s] im0s = im0s if isinstance(im0s, list) else [im0s]
predictor.results = zip(predictor.results, im0s) predictor.results = zip(predictor.results, im0s)
model = YOLO(f"yolov8n.pt") model = YOLO(f'yolov8n.pt')
model.add_callback("on_predict_batch_end", on_predict_batch_end) model.add_callback("on_predict_batch_end", on_predict_batch_end)
for (result, frame) in model.track/predict(): for (result, frame) in model.track/predict():
pass pass

@ -59,8 +59,8 @@ Use a trained YOLOv8n model to run predictions on images.
!!! example "" !!! example ""
```bash ```bash
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model
``` ```
## Export ## Export

@ -6,7 +6,7 @@ The simplest way of simply using YOLOv8 directly in a Python environment.
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
model = YOLO("yolov8n.pt") # pass any model type model = YOLO('yolov8n.pt') # pass any model type
model.train(epochs=5) model.train(epochs=5)
``` ```
@ -14,8 +14,8 @@ The simplest way of simply using YOLOv8 directly in a Python environment.
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
model = YOLO("yolov8n.yaml") model = YOLO('yolov8n.yaml')
model.train(data="coco128.yaml", epochs=5) model.train(data='coco128.yaml', epochs=5)
``` ```
=== "Resume" === "Resume"
@ -31,8 +31,8 @@ The simplest way of simply using YOLOv8 directly in a Python environment.
```python ```python
from ultralytics import YOLO from ultralytics import YOLO
model = YOLO("yolov8n.yaml") model = YOLO('yolov8n.yaml')
model.train(data="coco128.yaml", epochs=5) model.train(data='coco128.yaml', epochs=5)
model.val() # It'll automatically evaluate the data you trained. model.val() # It'll automatically evaluate the data you trained.
``` ```
@ -44,7 +44,7 @@ The simplest way of simply using YOLOv8 directly in a Python environment.
# It'll use the data yaml file in model.pt if you don't set data. # It'll use the data yaml file in model.pt if you don't set data.
model.val() model.val()
# or you can set the data you want to val # or you can set the data you want to val
model.val(data="coco128.yaml") model.val(data='coco128.yaml')
``` ```
!!! example "Predict" !!! example "Predict"

@ -3,7 +3,7 @@
# Base ---------------------------------------- # Base ----------------------------------------
matplotlib>=3.2.2 matplotlib>=3.2.2
numpy>=1.18.5 numpy>=1.21.6
opencv-python>=4.6.0 opencv-python>=4.6.0
Pillow>=7.1.2 Pillow>=7.1.2
PyYAML>=5.3.1 PyYAML>=5.3.1

@ -207,9 +207,9 @@ def test_predict_callback_and_setup():
def test_result(): def test_result():
model = YOLO('yolov8n-seg.pt') model = YOLO('yolov8n-seg.pt')
res = model([SOURCE, SOURCE]) res = model([SOURCE, SOURCE])
res[0].cpu().numpy()
res[0].plot(show_conf=False) res[0].plot(show_conf=False)
print(res[0].path) res[0] = res[0].cpu().numpy()
print(res[0].path, res[0].masks.masks)
model = YOLO('yolov8n.pt') model = YOLO('yolov8n.pt')
res = model(SOURCE) res = model(SOURCE)

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, GPL-3.0 license # Ultralytics YOLO 🚀, GPL-3.0 license
__version__ = '8.0.53' __version__ = '8.0.54'
from ultralytics.yolo.engine.model import YOLO from ultralytics.yolo.engine.model import YOLO
from ultralytics.yolo.utils.checks import check_yolo as checks from ultralytics.yolo.utils.checks import check_yolo as checks

@ -411,12 +411,12 @@ class Detect(nn.Module):
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
self.shape = shape self.shape = shape
if self.export and self.format == 'edgetpu': # FlexSplitV ops issue
x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops
box = x_cat[:, :self.reg_max * 4] box = x_cat[:, :self.reg_max * 4]
cls = x_cat[:, self.reg_max * 4:] cls = x_cat[:, self.reg_max * 4:]
else: else:
box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1) box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides
y = torch.cat((dbox, cls.sigmoid()), 1) y = torch.cat((dbox, cls.sigmoid()), 1)
return y if self.export else (y, x) return y if self.export else (y, x)

@ -11,8 +11,8 @@ import torch.nn as nn
from ultralytics.nn.modules import (C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify, from ultralytics.nn.modules import (C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify,
Concat, Conv, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Ensemble, Focus, Concat, Conv, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Ensemble, Focus,
GhostBottleneck, GhostConv, Segment) GhostBottleneck, GhostConv, Segment)
from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, colorstr, emojis, yaml_load from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load
from ultralytics.yolo.utils.checks import check_requirements, check_yaml from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_yaml
from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights, from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights,
intersect_dicts, make_divisible, model_info, scale_img, time_sync) intersect_dicts, make_divisible, model_info, scale_img, time_sync)
@ -151,15 +151,19 @@ class BaseModel(nn.Module):
m.strides = fn(m.strides) m.strides = fn(m.strides)
return self return self
def load(self, weights): def load(self, weights, verbose=True):
""" """Load the weights into the model.
This function loads the weights of the model from a file
Args: Args:
weights (str): The weights to load into the model. weights (dict) or (torch.nn.Module): The pre-trained weights to be loaded.
verbose (bool, optional): Whether to log the transfer progress. Defaults to True.
""" """
# Force all tasks to implement this function model = weights['model'] if isinstance(weights, dict) else weights # torchvision models are not dicts
raise NotImplementedError('This function needs to be implemented by derived classes!') csd = model.float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(csd, strict=False) # load
if verbose:
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights')
class DetectionModel(BaseModel): class DetectionModel(BaseModel):
@ -234,13 +238,6 @@ class DetectionModel(BaseModel):
y[-1] = y[-1][..., i:] # small y[-1] = y[-1][..., i:] # small
return y return y
def load(self, weights, verbose=True):
csd = weights.float().state_dict() # checkpoint state_dict as FP32
csd = intersect_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(csd, strict=False) # load
if verbose and RANK == -1:
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights')
class SegmentationModel(DetectionModel): class SegmentationModel(DetectionModel):
# YOLOv8 segmentation model # YOLOv8 segmentation model
@ -293,12 +290,6 @@ class ClassificationModel(BaseModel):
self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict
self.info() self.info()
def load(self, weights):
model = weights['model'] if isinstance(weights, dict) else weights # torchvision models are not dicts
csd = model.float().state_dict()
csd = intersect_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(csd, strict=False) # load
@staticmethod @staticmethod
def reshape_outputs(model, nc): def reshape_outputs(model, nc):
# Update a TorchVision classification model to class count 'n' if required # Update a TorchVision classification model to class count 'n' if required
@ -338,6 +329,7 @@ def torch_safe_load(weight):
""" """
from ultralytics.yolo.utils.downloads import attempt_download_asset from ultralytics.yolo.utils.downloads import attempt_download_asset
check_suffix(file=weight, suffix='.pt')
file = attempt_download_asset(weight) # search online if missing locally file = attempt_download_asset(weight) # search online if missing locally
try: try:
return torch.load(file, map_location='cpu'), file # load return torch.load(file, map_location='cpu'), file # load

@ -54,11 +54,10 @@ CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay',
'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou') # fractional floats limited to 0.0 - 1.0 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou') # fractional floats limited to 0.0 - 1.0
CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride', CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
'line_thickness', 'workspace', 'nbs', 'save_period') 'line_thickness', 'workspace', 'nbs', 'save_period')
CFG_BOOL_KEYS = ('save', 'exist_ok', 'pretrained', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect', CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect', 'cos_lr',
'cos_lr', 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt',
'save_txt', 'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'agnostic_nms',
'agnostic_nms', 'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader')
'v5loader')
# Define valid tasks and modes # Define valid tasks and modes
MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark' MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark'
@ -290,6 +289,8 @@ def entrypoint(debug=''):
from ultralytics.yolo.engine.model import YOLO from ultralytics.yolo.engine.model import YOLO
overrides['model'] = model overrides['model'] = model
model = YOLO(model, task=task) model = YOLO(model, task=task)
if isinstance(overrides.get('pretrained'), str):
model.load(overrides['pretrained'])
# Task Update # Task Update
if task != model.task: if task != model.task:

@ -188,7 +188,7 @@ class Exporter:
m.dynamic = self.args.dynamic m.dynamic = self.args.dynamic
m.export = True m.export = True
m.format = self.args.format m.format = self.args.format
elif isinstance(m, C2f) and not edgetpu: elif isinstance(m, C2f) and not any((saved_model, pb, tflite, edgetpu, tfjs)):
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
m.forward = m.forward_split m.forward = m.forward_split

@ -8,8 +8,8 @@ from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, Segmentat
guess_model_task, nn) guess_model_task, nn)
from ultralytics.yolo.cfg import get_cfg from ultralytics.yolo.cfg import get_cfg
from ultralytics.yolo.engine.exporter import Exporter from ultralytics.yolo.engine.exporter import Exporter
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, ONLINE, RANK, ROOT, from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks,
callbacks, is_git_dir, is_pip_package, yaml_load) is_git_dir, yaml_load)
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml
from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS
from ultralytics.yolo.utils.torch_utils import smart_inference_mode from ultralytics.yolo.utils.torch_utils import smart_inference_mode
@ -153,16 +153,10 @@ class YOLO:
f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only " f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only "
f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.") f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.")
def _check_pip_update(self): @smart_inference_mode()
""" def reset_weights(self):
Inform user of ultralytics package update availability
"""
if ONLINE and is_pip_package():
check_pip_update_available()
def reset(self):
""" """
Resets the model modules. Resets the model modules parameters to randomly initialized values, losing all training information.
""" """
self._check_is_pytorch_model() self._check_is_pytorch_model()
for m in self.model.modules(): for m in self.model.modules():
@ -170,6 +164,18 @@ class YOLO:
m.reset_parameters() m.reset_parameters()
for p in self.model.parameters(): for p in self.model.parameters():
p.requires_grad = True p.requires_grad = True
return self
@smart_inference_mode()
def load(self, weights='yolov8n.pt'):
"""
Transfers parameters with matching names and shapes from 'weights' to model.
"""
self._check_is_pytorch_model()
if isinstance(weights, (str, Path)):
weights, self.ckpt = attempt_load_one_weight(weights)
self.model.load(weights)
return self
def info(self, verbose=False): def info(self, verbose=False):
""" """
@ -299,7 +305,7 @@ class YOLO:
**kwargs (Any): Any number of arguments representing the training configuration. **kwargs (Any): Any number of arguments representing the training configuration.
""" """
self._check_is_pytorch_model() self._check_is_pytorch_model()
self._check_pip_update() check_pip_update_available()
overrides = self.overrides.copy() overrides = self.overrides.copy()
overrides.update(kwargs) overrides.update(kwargs)
if kwargs.get('cfg'): if kwargs.get('cfg'):

@ -48,7 +48,7 @@ class Results:
self.probs = probs if probs is not None else None self.probs = probs if probs is not None else None
self.names = names self.names = names
self.path = path self.path = path
self._keys = [k for k in ('boxes', 'masks', 'probs') if getattr(self, k) is not None] self._keys = ('boxes', 'masks', 'probs')
def pandas(self): def pandas(self):
pass pass
@ -56,7 +56,7 @@ class Results:
def __getitem__(self, idx): def __getitem__(self, idx):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names) r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys: for k in self.keys:
setattr(r, k, getattr(self, k)[idx]) setattr(r, k, getattr(self, k)[idx])
return r return r
@ -70,30 +70,30 @@ class Results:
def cpu(self): def cpu(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names) r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys: for k in self.keys:
setattr(r, k, getattr(self, k).cpu()) setattr(r, k, getattr(self, k).cpu())
return r return r
def numpy(self): def numpy(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names) r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys: for k in self.keys:
setattr(r, k, getattr(self, k).numpy()) setattr(r, k, getattr(self, k).numpy())
return r return r
def cuda(self): def cuda(self):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names) r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys: for k in self.keys:
setattr(r, k, getattr(self, k).cuda()) setattr(r, k, getattr(self, k).cuda())
return r return r
def to(self, *args, **kwargs): def to(self, *args, **kwargs):
r = Results(orig_img=self.orig_img, path=self.path, names=self.names) r = Results(orig_img=self.orig_img, path=self.path, names=self.names)
for k in self._keys: for k in self.keys:
setattr(r, k, getattr(self, k).to(*args, **kwargs)) setattr(r, k, getattr(self, k).to(*args, **kwargs))
return r return r
def __len__(self): def __len__(self):
for k in self._keys: for k in self.keys:
return len(getattr(self, k)) return len(getattr(self, k))
def __str__(self): def __str__(self):
@ -107,6 +107,10 @@ class Results:
name = self.__class__.__name__ name = self.__class__.__name__
raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
@property
def keys(self):
return [k for k in self._keys if getattr(self, k) is not None]
def plot(self, show_conf=True, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): def plot(self, show_conf=True, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
""" """
Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image. Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image.

@ -46,14 +46,14 @@ HELP_MSG = \
from ultralytics import YOLO from ultralytics import YOLO
# Load a model # Load a model
model = YOLO("yolov8n.yaml") # build a new model from scratch model = YOLO('yolov8n.yaml') # build a new model from scratch
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Use the model # Use the model
results = model.train(data="coco128.yaml", epochs=3) # train the model results = model.train(data="coco128.yaml", epochs=3) # train the model
results = model.val() # evaluate model performance on the validation set results = model.val() # evaluate model performance on the validation set
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
success = model.export(format="onnx") # export the model to ONNX format success = model.export(format='onnx') # export the model to ONNX format
3. Use the command line interface (CLI): 3. Use the command line interface (CLI):

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, GPL-3.0 license # Ultralytics YOLO 🚀, GPL-3.0 license
""" """
AutoBatch utils Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.
""" """
from copy import deepcopy from copy import deepcopy
@ -13,18 +13,35 @@ from ultralytics.yolo.utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True): def check_train_batch_size(model, imgsz=640, amp=True):
# Check YOLOv5 training batch size """
Check YOLO training batch size using the autobatch() function.
Args:
model (torch.nn.Module): YOLO model to check batch size for.
imgsz (int): Image size used for training.
amp (bool): If True, use automatic mixed precision (AMP) for training.
Returns:
int: Optimal batch size computed using the autobatch() function.
"""
with torch.cuda.amp.autocast(amp): with torch.cuda.amp.autocast(amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
def autobatch(model, imgsz=640, fraction=0.7, batch_size=16): def autobatch(model, imgsz=640, fraction=0.67, batch_size=16):
# Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory """
# Usage: Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.
# import torch
# from utils.autobatch import autobatch Args:
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) model: YOLO model to compute batch size for.
# print(autobatch(model)) imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640.
fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.67.
batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16.
Returns:
int: The optimal batch size.
"""
# Check device # Check device
prefix = colorstr('AutoBatch: ') prefix = colorstr('AutoBatch: ')

@ -1,5 +1,5 @@
# Ultralytics YOLO 🚀, GPL-3.0 license # Ultralytics YOLO 🚀, GPL-3.0 license
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr
try: try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
@ -18,9 +18,12 @@ def _log_scalars(scalars, step=0):
def on_pretrain_routine_start(trainer): def on_pretrain_routine_start(trainer):
global writer if SummaryWriter:
try: try:
global writer
writer = SummaryWriter(str(trainer.save_dir)) writer = SummaryWriter(str(trainer.save_dir))
prefix = colorstr('TensorBoard: ')
LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
except Exception as e: except Exception as e:
LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}') LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')

@ -20,8 +20,8 @@ import requests
import torch import torch
from matplotlib import font_manager from matplotlib import font_manager
from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ROOT, USER_CONFIG_DIR, TryExcept, colorstr, downloads, emojis, from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, TryExcept, colorstr, downloads,
is_colab, is_docker, is_jupyter, is_online) emojis, is_colab, is_docker, is_jupyter, is_online, is_pip_package)
def is_ascii(s) -> bool: def is_ascii(s) -> bool:
@ -141,6 +141,8 @@ def check_pip_update_available():
Returns: Returns:
bool: True if an update is available, False otherwise. bool: True if an update is available, False otherwise.
""" """
if ONLINE and is_pip_package():
with contextlib.suppress(ConnectionError):
from ultralytics import __version__ from ultralytics import __version__
latest = check_latest_pypi_version() latest = check_latest_pypi_version()
if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available
@ -235,11 +237,11 @@ def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''):
# Check file(s) for acceptable suffix # Check file(s) for acceptable suffix
if file and suffix: if file and suffix:
if isinstance(suffix, str): if isinstance(suffix, str):
suffix = [suffix] suffix = (suffix, )
for f in file if isinstance(file, (list, tuple)) else [file]: for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix s = Path(f).suffix.lower() # file suffix
if len(s): if len(s):
assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}'
def check_yolov5u_filename(file: str, verbose: bool = True): def check_yolov5u_filename(file: str, verbose: bool = True):

@ -76,7 +76,7 @@ class DetectionPredictor(BasePredictor):
if self.args.save_crop: if self.args.save_crop:
save_one_box(d.xyxy, save_one_box(d.xyxy,
imc, imc,
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg', file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg',
BGR=True) BGR=True)
return log_string return log_string

@ -58,10 +58,9 @@ class DetectionTrainer(BaseTrainer):
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
def get_model(self, cfg=None, weights=None, verbose=True): def get_model(self, cfg=None, weights=None, verbose=True):
model = DetectionModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1) model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
if weights: if weights:
model.load(weights) model.load(weights)
return model return model
def get_validator(self): def get_validator(self):

@ -90,7 +90,7 @@ class SegmentationPredictor(DetectionPredictor):
if self.args.save_crop: if self.args.save_crop:
save_one_box(d.xyxy, save_one_box(d.xyxy,
imc, imc,
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg', file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg',
BGR=True) BGR=True)
return log_string return log_string

Loading…
Cancel
Save