Omit `ultralytics/utils/callbacks` from coverage (#4345)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
single_channel
Glenn Jocher 1 year ago committed by GitHub
parent d47718c367
commit c940d29d4f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -44,7 +44,7 @@ This example provides simple inference code for YOLO, SAM and RTDETR models. For
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the YOLOv8n model on the 'bus.jpg' image # Run inference with the YOLOv8n model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -46,7 +46,7 @@ You can use RT-DETR for object detection tasks using the `ultralytics` pip packa
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the RT-DETR-l model on the 'bus.jpg' image # Run inference with the RT-DETR-l model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -62,7 +62,7 @@ In this example we validate YOLO-NAS-s on the COCO8 dataset.
model.info() model.info()
# Validate the model on the COCO8 example dataset # Validate the model on the COCO8 example dataset
results model.val(data='coco8.yaml') results = model.val(data='coco8.yaml')
# Run inference with the YOLO-NAS-s model on the 'bus.jpg' image # Run inference with the YOLO-NAS-s model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -69,7 +69,7 @@ You can use YOLOv3 for object detection tasks using the Ultralytics repository.
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the YOLOv3n model on the 'bus.jpg' image # Run inference with the YOLOv3n model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -74,7 +74,7 @@ You can use YOLOv5u for object detection tasks using the Ultralytics repository.
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the YOLOv5n model on the 'bus.jpg' image # Run inference with the YOLOv5n model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -56,7 +56,7 @@ You can use YOLOv6 for object detection tasks using the Ultralytics pip package.
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the YOLOv6n model on the 'bus.jpg' image # Run inference with the YOLOv6n model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -101,7 +101,7 @@ You can use YOLOv8 for object detection tasks using the Ultralytics pip package.
model.info() model.info()
# Train the model on the COCO8 example dataset for 100 epochs # Train the model on the COCO8 example dataset for 100 epochs
results model.train(data='coco8.yaml', epochs=100, imgsz=640) results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
# Run inference with the YOLOv8n model on the 'bus.jpg' image # Run inference with the YOLOv8n model on the 'bus.jpg' image
results = model('path/to/bus.jpg') results = model('path/to/bus.jpg')

@ -31,6 +31,14 @@ Ultralytics provides various installation methods including pip, conda, and Dock
conda install -c conda-forge ultralytics conda install -c conda-forge ultralytics
``` ```
!!! note
If you are installing in a CUDA environment best practice is to install `ultralytics`, `pytorch` and `pytorch-cuda` in the same command to allow the conda package manager to resolve any conflicts, or else to install `pytorch-cuda` last to allow it override the CPU-specific `pytorch` package if necesary.
```bash
# Install all packages together using conda
conda install -c conda-forge -c pytorch -c nvidia ultralytics pytorch torchvision pytorch-cuda=11.8
```
=== "Git clone" === "Git clone"
Clone the `ultralytics` repository if you are interested in contributing to the development or wish to experiment with the latest source code. After cloning, navigate into the directory and install the package in editable mode `-e` using pip. Clone the `ultralytics` repository if you are interested in contributing to the development or wish to experiment with the latest source code. After cloning, navigate into the directory and install the package in editable mode `-e` using pip.
```bash ```bash

@ -9,10 +9,6 @@ keywords: Ultralytics, YOLO, callbacks, logger, training, pretraining, machine l
Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏! Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
---
## ::: ultralytics.utils.callbacks.dvc._logger_disabled
<br><br>
--- ---
## ::: ultralytics.utils.callbacks.dvc._log_images ## ::: ultralytics.utils.callbacks.dvc._log_images
<br><br> <br><br>

@ -15,6 +15,15 @@ addopts =
--doctest-modules --doctest-modules
--durations=25 --durations=25
--color=yes --color=yes
--cov=ultralytics/
--cov-report=xml
--no-cov-on-fail
[coverage:run]
source = ultralytics/
data_file = tests/.coverage
omit =
ultralytics/utils/callbacks/*
[flake8] [flake8]
max-line-length = 120 max-line-length = 120

@ -53,9 +53,9 @@ def test_predict(task, model, data):
@pytest.mark.parametrize('task,model,data', TASK_ARGS) @pytest.mark.parametrize('task,model,data', TASK_ARGS)
def test_predict_online(task, model, data): def test_predict_online(task, model, data):
mode = 'track' if task in ('detect', 'segment', 'pose') else 'predict' # mode for video inference mode = 'track' if task in ('detect', 'segment', 'pose') else 'predict' # mode for video inference
run(f'yolo predict model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32') model = WEIGHT_DIR / model
run(f'yolo {mode} model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32' run(f'yolo predict model={model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32')
) run(f'yolo {mode} model={model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32')
# Run Python YouTube tracking because CLI is broken. TODO: fix CLI YouTube # Run Python YouTube tracking because CLI is broken. TODO: fix CLI YouTube
# run(f'yolo {mode} model={model}.pt source=https://youtu.be/G17sBkb38XQ imgsz=32 tracker=bytetrack.yaml') # run(f'yolo {mode} model={model}.pt source=https://youtu.be/G17sBkb38XQ imgsz=32 tracker=bytetrack.yaml')
@ -74,7 +74,7 @@ def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'):
run(f"yolo predict {task} model={model} source={ROOT / 'assets/bus.jpg'} imgsz=640 save save_crop save_txt") run(f"yolo predict {task} model={model} source={ROOT / 'assets/bus.jpg'} imgsz=640 save save_crop save_txt")
def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'): def test_fastsam(task='segment', model=WEIGHT_DIR / 'FastSAM-s.pt', data='coco8-seg.yaml'):
source = ROOT / 'assets/bus.jpg' source = ROOT / 'assets/bus.jpg'
run(f'yolo segment val {task} model={model} data={data} imgsz=32') run(f'yolo segment val {task} model={model} data={data} imgsz=32')
@ -84,10 +84,10 @@ def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'):
from ultralytics.models.fastsam import FastSAMPrompt from ultralytics.models.fastsam import FastSAMPrompt
# Create a FastSAM model # Create a FastSAM model
model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt sam_model = FastSAM(model) # or FastSAM-x.pt
# Run inference on an image # Run inference on an image
everything_results = model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9) everything_results = sam_model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
# Everything prompt # Everything prompt
prompt_process = FastSAMPrompt(source, everything_results, device='cpu') prompt_process = FastSAMPrompt(source, everything_results, device='cpu')
@ -110,13 +110,19 @@ def test_mobilesam():
from ultralytics import SAM from ultralytics import SAM
# Load the model # Load the model
model = SAM('mobile_sam.pt') model = SAM(WEIGHT_DIR / 'mobile_sam.pt')
# Source
source = ROOT / 'assets/zidane.jpg'
# Predict a segment based on a point prompt # Predict a segment based on a point prompt
model.predict(ROOT / 'assets/zidane.jpg', points=[900, 370], labels=[1]) model.predict(source, points=[900, 370], labels=[1])
# Predict a segment based on a box prompt # Predict a segment based on a box prompt
model.predict(ROOT / 'assets/zidane.jpg', bboxes=[439, 437, 524, 709]) model.predict(source, bboxes=[439, 437, 524, 709])
# Predict all
# model(source)
# Slow Tests # Slow Tests

@ -212,8 +212,8 @@ def test_results():
for r in results: for r in results:
r = r.cpu().numpy() r = r.cpu().numpy()
r = r.to(device='cpu', dtype=torch.float32) r = r.to(device='cpu', dtype=torch.float32)
r.save_txt(txt_file='label.txt', save_conf=True) r.save_txt(txt_file='runs/tests/label.txt', save_conf=True)
r.save_crop(save_dir='crops/') r.save_crop(save_dir='runs/tests/crops/')
r.tojson(normalize=True) r.tojson(normalize=True)
r.plot(pil=True) r.plot(pil=True)
r.plot(conf=True, boxes=True) r.plot(conf=True, boxes=True)

@ -318,6 +318,7 @@ class Results(SimpleClass):
texts.append(('%g ' * len(line)).rstrip() % line) texts.append(('%g ' * len(line)).rstrip() % line)
if texts: if texts:
Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory
with open(txt_file, 'a') as f: with open(txt_file, 'a') as f:
f.writelines(text + '\n' for text in texts) f.writelines(text + '\n' for text in texts)

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
""" """
Check a model's accuracy on a test or val split of a dataset Check a model's accuracy on a test or val split of a dataset.
Usage: Usage:
$ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640 $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640

@ -12,16 +12,18 @@ class FastSAM(Model):
""" """
FastSAM model interface. FastSAM model interface.
Usage - Predict: Example:
```python
from ultralytics import FastSAM from ultralytics import FastSAM
model = FastSAM('last.pt') model = FastSAM('last.pt')
results = model.predict('ultralytics/assets/bus.jpg') results = model.predict('ultralytics/assets/bus.jpg')
```
""" """
def __init__(self, model='FastSAM-x.pt'): def __init__(self, model='FastSAM-x.pt'):
"""Call the __init__ method of the parent class (YOLO) with the updated default model""" """Call the __init__ method of the parent class (YOLO) with the updated default model"""
if model == 'FastSAM.pt': if str(model) == 'FastSAM.pt':
model = 'FastSAM-x.pt' model = 'FastSAM-x.pt'
assert Path(model).suffix not in ('.yaml', '.yml'), 'FastSAM models only support pre-trained models.' assert Path(model).suffix not in ('.yaml', '.yml'), 'FastSAM models only support pre-trained models.'
super().__init__(model=model, task='segment') super().__init__(model=model, task='segment')

@ -2,11 +2,13 @@
""" """
YOLO-NAS model interface. YOLO-NAS model interface.
Usage - Predict: Example:
```python
from ultralytics import NAS from ultralytics import NAS
model = NAS('yolo_nas_s') model = NAS('yolo_nas_s')
results = model.predict('ultralytics/assets/bus.jpg') results = model.predict('ultralytics/assets/bus.jpg')
```
""" """
from pathlib import Path from pathlib import Path

@ -3,6 +3,8 @@
SAM model interface SAM model interface
""" """
from pathlib import Path
from ultralytics.engine.model import Model from ultralytics.engine.model import Model
from ultralytics.utils.torch_utils import model_info from ultralytics.utils.torch_utils import model_info
@ -16,9 +18,8 @@ class SAM(Model):
""" """
def __init__(self, model='sam_b.pt') -> None: def __init__(self, model='sam_b.pt') -> None:
if model and not model.endswith('.pt') and not model.endswith('.pth'): if model and Path(model).suffix not in ('.pt', '.pth'):
# Should raise AssertionError instead? raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.')
raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
super().__init__(model=model, task='segment') super().__init__(model=model, task='segment')
def _load(self, weights: str, task=None): def _load(self, weights: str, task=None):

@ -36,7 +36,7 @@ def merge_matches(m1, m2, shape):
def _indices_to_matches(cost_matrix, indices, thresh): def _indices_to_matches(cost_matrix, indices, thresh):
"""_indices_to_matches: Return matched and unmatched indices given a cost matrix, indices, and a threshold.""" """Return matched and unmatched indices given a cost matrix, indices, and a threshold."""
matched_cost = cost_matrix[tuple(zip(*indices))] matched_cost = cost_matrix[tuple(zip(*indices))]
matched_mask = (matched_cost <= thresh) matched_mask = (matched_cost <= thresh)
@ -81,8 +81,12 @@ def ious(atlbrs, btlbrs):
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
if ious.size == 0: if ious.size == 0:
return ious return ious
ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32)) ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32))
# TODO: replace bbox_ious() with numpy-capable update of utils.metrics.box_iou
# from ...utils.metrics import box_iou
# ious = box_iou()
return ious return ious
@ -102,8 +106,7 @@ def iou_distance(atracks, btracks):
else: else:
atlbrs = [track.tlbr for track in atracks] atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks] btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs) return 1 - ious(atlbrs, btlbrs) # cost matrix
return 1 - _ious # cost matrix
def v_iou_distance(atracks, btracks): def v_iou_distance(atracks, btracks):
@ -122,8 +125,7 @@ def v_iou_distance(atracks, btracks):
else: else:
atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks]
btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks]
_ious = ious(atlbrs, btlbrs) return 1 - ious(atlbrs, btlbrs) # cost matrix
return 1 - _ious # cost matrix
def embedding_distance(tracks, detections, metric='cosine'): def embedding_distance(tracks, detections, metric='cosine'):

Loading…
Cancel
Save