ultralytics 8.0.47 Docker and reformat updates (#1153)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-02-25 22:49:19 -08:00
committed by GitHub
parent d4be4cb24b
commit a58f766f94
41 changed files with 224 additions and 201 deletions

View File

@ -18,7 +18,6 @@ from typing import Union
import cv2
import numpy as np
import pandas as pd
import requests
import torch
import yaml
@ -517,10 +516,7 @@ def set_sentry():
((is_pip_package() and not is_git_dir()) or
(get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git' and get_git_branch() == 'main')):
import hashlib
import sentry_sdk # noqa
sentry_sdk.init(
dsn='https://f805855f03bb4363bc1e16cb7d87b654@o4504521589325824.ingest.sentry.io/4504521592406016',
debug=False,

View File

@ -30,14 +30,14 @@ import pandas as pd
from ultralytics import YOLO
from ultralytics.yolo.engine.exporter import export_formats
from ultralytics.yolo.utils import LOGGER, ROOT, SETTINGS
from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, SETTINGS
from ultralytics.yolo.utils.checks import check_yolo
from ultralytics.yolo.utils.downloads import download
from ultralytics.yolo.utils.files import file_size
from ultralytics.yolo.utils.torch_utils import select_device
def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=0.30):
def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=False):
device = select_device(device, verbose=False)
if isinstance(model, (str, Path)):
model = YOLO(model)
@ -45,11 +45,10 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, hal
y = []
t0 = time.time()
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
emoji = '' # indicates export failure
try:
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
assert i != 11 or model.task != 'classify', 'paddle-classify bug'
assert i != 11, 'paddle exports coming soon'
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
if 'cpu' in device.type:
assert cpu, 'inference not supported on CPU'
if 'cuda' in device.type:
@ -61,13 +60,16 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, hal
export = model # PyTorch format
else:
filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
export = YOLO(filename)
export = YOLO(filename, task=model.task)
assert suffix in str(filename), 'export failed'
emoji = '' # indicates export succeeded
# Predict
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
if not (ROOT / 'assets/bus.jpg').exists():
download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets')
export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half) # test
export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half)
# Validate
if model.task == 'detect':
@ -84,17 +86,16 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, hal
if hard_fail:
assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}'
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
y.append([name, '', None, None, None]) # mAP, t_inference
y.append([name, emoji, None, None, None]) # mAP, t_inference
# Print results
check_yolo(device=device) # print system info
c = ['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)']
df = pd.DataFrame(y, columns=c)
df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
name = Path(model.ckpt_path).name
s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
LOGGER.info(s)
with open('benchmarks.log', 'a') as f:
with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
f.write(s)
if hard_fail and isinstance(hard_fail, float):

View File

@ -1,5 +1,3 @@
from .base import add_integration_callbacks, default_callbacks
__all__ = [
'add_integration_callbacks',
'default_callbacks',]
__all__ = 'add_integration_callbacks', 'default_callbacks'

View File

@ -137,7 +137,6 @@ def check_latest_pypi_version(package_name='ultralytics'):
def check_pip_update():
from ultralytics import __version__
latest = check_latest_pypi_version()
latest = '9.0.0'
if pkg.parse_version(__version__) < pkg.parse_version(latest):
LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 '
f"Update with 'pip install -U ultralytics'")
@ -239,7 +238,7 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=()
LOGGER.warning(f'{prefix}{e}')
def check_suffix(file='yolov8n.pt', suffix=('.pt',), msg=''):
def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):

View File

@ -10,9 +10,8 @@ import numpy as np
from .ops import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh
# From PyTorch internals
def _ntuple(n):
# From PyTorch internals
def parse(x):
return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n))
@ -26,7 +25,7 @@ to_4tuple = _ntuple(4)
# `ltwh` means left top and width, height(coco format)
_formats = ['xyxy', 'xywh', 'ltwh']
__all__ = ['Bboxes']
__all__ = 'Bboxes', # tuple or list
class Bboxes:

View File

@ -207,8 +207,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path('')):
def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
xyxy = torch.Tensor(xyxy).view(-1, 4)
b = xyxy2xywh(xyxy) # boxes
b = xyxy2xywh(xyxy.view(-1, 4)) # boxes
if square:
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad

View File

@ -195,7 +195,7 @@ def get_flops(model, imgsz=640):
p = next(model.parameters())
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride
im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format
flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1E9 * 2 # stride GFLOPs
imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float
flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs
return flops
@ -374,7 +374,7 @@ def profile(input, ops, n=10, device=None):
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m
tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1E9 * 2 # GFLOPs
except Exception:
flops = 0