Docs URL fixes and YAML updates (#1383)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Huijae Lee <46982469+ZeroAct@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-03-13 00:40:11 +01:00
committed by GitHub
parent f921e1ac21
commit 9e58c32c15
32 changed files with 87 additions and 93 deletions

View File

@ -2,7 +2,7 @@
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI
# Example usage: yolo train data=Argoverse.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── Argoverse ← downloads here (31.3 GB)

View File

@ -2,7 +2,7 @@
# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan
# Example usage: yolo train data=GlobalWheat2020.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── GlobalWheat2020 ← downloads here (7.0 GB)

View File

@ -3,7 +3,7 @@
# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels
# Example usage: yolo train task=classify data=imagenet
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── imagenet ← downloads here (144 GB)

View File

@ -2,7 +2,7 @@
# Objects365 dataset https://www.objects365.org/ by Megvii
# Example usage: yolo train data=Objects365.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)

View File

@ -2,7 +2,7 @@
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail
# Example usage: yolo train data=SKU-110K.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── SKU-110K ← downloads here (13.6 GB)

View File

@ -2,7 +2,7 @@
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford
# Example usage: yolo train data=VOC.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── VOC ← downloads here (2.8 GB)

View File

@ -2,7 +2,7 @@
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University
# Example usage: yolo train data=VisDrone.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── VisDrone ← downloads here (2.3 GB)

View File

@ -2,7 +2,7 @@
# COCO 2017 dataset http://cocodataset.org by Microsoft
# Example usage: yolo train data=coco.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco ← downloads here (20.1 GB)

View File

@ -2,7 +2,7 @@
# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Example usage: yolo train data=coco128.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco128-seg ← downloads here (7 MB)

View File

@ -2,7 +2,7 @@
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
# Example usage: yolo train data=coco128.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco128 ← downloads here (7 MB)

View File

@ -2,7 +2,7 @@
# COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics
# Example usage: yolo train data=coco8-seg.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco8-seg ← downloads here (1 MB)

View File

@ -2,7 +2,7 @@
# COCO8 dataset (first 8 images from COCO train2017) by Ultralytics
# Example usage: yolo train data=coco8.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco8 ← downloads here (1 MB)

View File

@ -3,7 +3,7 @@
# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
# Example usage: yolo train data=xView.yaml
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── xView ← downloads here (20.7 GB)

View File

@ -24,7 +24,7 @@ yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100
```
They may also be used directly in a Python environment, and accepts the same
[arguments](https://docs.ultralytics.com/cfg/) as in the CLI example above:
[arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above:
```python
from ultralytics import YOLO

View File

@ -3,7 +3,7 @@
# Download COCO 2017 dataset http://cocodataset.org
# Example usage: bash data/scripts/get_coco.sh
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco ← downloads here

View File

@ -3,7 +3,7 @@
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
# Example usage: bash data/scripts/get_coco128.sh
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── coco128 ← downloads here

View File

@ -3,7 +3,7 @@
# Download ILSVRC2012 ImageNet dataset https://image-net.org
# Example usage: bash data/scripts/get_imagenet.sh
# parent
# ├── yolov5
# ├── ultralytics
# └── datasets
# └── imagenet ← downloads here

View File

@ -2,7 +2,7 @@
"""
Ultralytics Results, Boxes and Masks classes for handling inference results
Usage: See https://docs.ultralytics.com/predict/
Usage: See https://docs.ultralytics.com/modes/predict/
"""
import pprint

View File

@ -63,15 +63,14 @@ class DetectionPredictor(BasePredictor):
# write
for d in reversed(det):
cls, conf = d.cls.squeeze(), d.conf.squeeze()
cls, conf, id = d.cls.squeeze(), d.conf.squeeze(), None if d.id is None else int(d.id.item())
if self.args.save_txt: # Write to file
line = (cls, *(d.xywhn.view(-1).tolist()), conf) \
if self.args.save_conf else (cls, *(d.xywhn.view(-1).tolist())) # label format
line = (cls, *d.xywhn.view(-1)) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
with open(f'{self.txt_path}.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
c = int(cls) # integer class
name = f'id:{int(d.id.item())} {self.model.names[c]}' if d.id is not None else self.model.names[c]
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
if self.args.save_crop:

View File

@ -76,17 +76,15 @@ class SegmentationPredictor(DetectionPredictor):
# Write results
for j, d in enumerate(reversed(det)):
cls, conf = d.cls.squeeze(), d.conf.squeeze()
cls, conf, id = d.cls.squeeze(), d.conf.squeeze(), None if d.id is None else int(d.id.item())
if self.args.save_txt: # Write to file
seg = mask.segments[len(det) - j - 1].copy() # reversed mask.segments
seg = seg.reshape(-1) # (n,2) to (n*2)
line = (cls, *seg, conf) if self.args.save_conf else (cls, *seg) # label format
seg = mask.segments[len(det) - j - 1].copy().reshape(-1) # reversed mask.segments, (n,2) to (n*2)
line = (cls, *seg) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
with open(f'{self.txt_path}.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
c = int(cls) # integer class
name = f'id:{int(d.id.item())} {self.model.names[c]}' if d.id is not None else self.model.names[c]
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) if self.args.boxes else None
if self.args.save_crop: