ultralytics 8.0.41
TF SavedModel and EdgeTPU export (#1034)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Noobtoss <96134731+Noobtoss@users.noreply.github.com> Co-authored-by: Ayush Chaurasia <ayush.chaurarsia@gmail.com>
This commit is contained in:
@ -97,6 +97,7 @@ HELP_MSG = \
|
||||
torch.set_printoptions(linewidth=320, precision=5, profile='long')
|
||||
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
|
||||
pd.options.display.max_columns = 10
|
||||
pd.options.display.width = 120
|
||||
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
||||
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
|
||||
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training
|
||||
@ -287,9 +288,7 @@ def is_pytest_running():
|
||||
Returns:
|
||||
(bool): True if pytest is running, False otherwise.
|
||||
"""
|
||||
with contextlib.suppress(Exception):
|
||||
return 'pytest' in sys.modules
|
||||
return False
|
||||
return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem)
|
||||
|
||||
|
||||
def is_github_actions_ci() -> bool:
|
||||
@ -530,8 +529,7 @@ def set_sentry():
|
||||
if SETTINGS['sync'] and \
|
||||
RANK in {-1, 0} and \
|
||||
Path(sys.argv[0]).name == 'yolo' and \
|
||||
not is_pytest_running() and \
|
||||
not is_github_actions_ci() and \
|
||||
not TESTS_RUNNING and \
|
||||
((is_pip_package() and not is_git_dir()) or
|
||||
(get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git' and get_git_branch() == 'main')):
|
||||
|
||||
@ -625,4 +623,5 @@ SETTINGS = get_settings()
|
||||
DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory
|
||||
ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \
|
||||
'Docker' if is_docker() else platform.system()
|
||||
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
|
||||
set_sentry()
|
||||
|
101
ultralytics/yolo/utils/benchmarks.py
Normal file
101
ultralytics/yolo/utils/benchmarks.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Benchmark a YOLO model formats for speed and accuracy
|
||||
|
||||
Usage:
|
||||
from ultralytics.yolo.utils.benchmarks import run_benchmarks
|
||||
run_benchmarks(model='yolov8n.pt', imgsz=160)
|
||||
|
||||
Format | `format=argument` | Model
|
||||
--- | --- | ---
|
||||
PyTorch | - | yolov8n.pt
|
||||
TorchScript | `torchscript` | yolov8n.torchscript
|
||||
ONNX | `onnx` | yolov8n.onnx
|
||||
OpenVINO | `openvino` | yolov8n_openvino_model/
|
||||
TensorRT | `engine` | yolov8n.engine
|
||||
CoreML | `coreml` | yolov8n.mlmodel
|
||||
TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/
|
||||
TensorFlow GraphDef | `pb` | yolov8n.pb
|
||||
TensorFlow Lite | `tflite` | yolov8n.tflite
|
||||
TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov8n_web_model/
|
||||
PaddlePaddle | `paddle` | yolov8n_paddle_model/
|
||||
"""
|
||||
|
||||
import platform
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.yolo.engine.exporter import export_formats
|
||||
from ultralytics.yolo.utils import LOGGER, SETTINGS
|
||||
from ultralytics.yolo.utils.checks import check_yolo
|
||||
from ultralytics.yolo.utils.files import file_size
|
||||
|
||||
|
||||
def run_benchmarks(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
|
||||
imgsz=640,
|
||||
half=False,
|
||||
device='cpu',
|
||||
hard_fail=False):
|
||||
device = torch.device(int(device) if device.isnumeric() else device)
|
||||
model = YOLO(model)
|
||||
|
||||
y = []
|
||||
t0 = time.time()
|
||||
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
|
||||
try:
|
||||
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
||||
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
||||
|
||||
if 'cpu' in device.type:
|
||||
assert cpu, 'inference not supported on CPU'
|
||||
if 'cuda' in device.type:
|
||||
assert gpu, 'inference not supported on GPU'
|
||||
|
||||
# Export
|
||||
if format == '-':
|
||||
filename = model.ckpt_path
|
||||
export = model # PyTorch format
|
||||
else:
|
||||
filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
|
||||
export = YOLO(filename)
|
||||
assert suffix in str(filename), 'export failed'
|
||||
|
||||
# Validate
|
||||
if model.task == 'detect':
|
||||
data, key = 'coco128.yaml', 'metrics/mAP50-95(B)'
|
||||
elif model.task == 'segment':
|
||||
data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)'
|
||||
elif model.task == 'classify':
|
||||
data, key = 'imagenet100', 'metrics/accuracy_top5'
|
||||
|
||||
results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False)
|
||||
metric, speed = results.results_dict[key], results.speed['inference']
|
||||
y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
|
||||
except Exception as e:
|
||||
if hard_fail:
|
||||
assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
|
||||
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
|
||||
y.append([name, '❌', None, None, None]) # mAP, t_inference
|
||||
|
||||
# Print results
|
||||
LOGGER.info('\n')
|
||||
check_yolo(device=device) # print system info
|
||||
c = ['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'] if map else ['Format', 'Export', '', '']
|
||||
df = pd.DataFrame(y, columns=c)
|
||||
LOGGER.info(f'\nBenchmarks complete for {Path(model.ckpt_path).name} on {data} at imgsz={imgsz} '
|
||||
f'({time.time() - t0:.2f}s)')
|
||||
LOGGER.info(str(df if map else df.iloc[:, :2]))
|
||||
|
||||
if hard_fail and isinstance(hard_fail, str):
|
||||
metrics = df[key].array # values to compare to floor
|
||||
floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: metric < floor {floor}'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_benchmarks()
|
@ -1,5 +1,5 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
||||
|
||||
try:
|
||||
@ -7,6 +7,7 @@ try:
|
||||
from clearml import Task
|
||||
|
||||
assert clearml.__version__ # verify package is not directory
|
||||
assert not TESTS_RUNNING # do not log pytest
|
||||
except (ImportError, AssertionError):
|
||||
clearml = None
|
||||
|
||||
@ -19,14 +20,16 @@ def _log_images(imgs_dict, group='', step=0):
|
||||
|
||||
|
||||
def on_pretrain_routine_start(trainer):
|
||||
# TODO: reuse existing task
|
||||
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
|
||||
task_name=trainer.args.name,
|
||||
tags=['YOLOv8'],
|
||||
output_uri=True,
|
||||
reuse_last_task_id=False,
|
||||
auto_connect_frameworks={'pytorch': False})
|
||||
task.connect(vars(trainer.args), name='General')
|
||||
try:
|
||||
task = Task.init(project_name=trainer.args.project or 'YOLOv8',
|
||||
task_name=trainer.args.name,
|
||||
tags=['YOLOv8'],
|
||||
output_uri=True,
|
||||
reuse_last_task_id=False,
|
||||
auto_connect_frameworks={'pytorch': False})
|
||||
task.connect(vars(trainer.args), name='General')
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'WARNING ⚠️ ClearML not initialized correctly, not logging this run. {e}')
|
||||
|
||||
|
||||
def on_train_epoch_end(trainer):
|
||||
@ -35,18 +38,19 @@ def on_train_epoch_end(trainer):
|
||||
|
||||
|
||||
def on_fit_epoch_end(trainer):
|
||||
if trainer.epoch == 0:
|
||||
task = Task.current_task()
|
||||
if task and trainer.epoch == 0:
|
||||
model_info = {
|
||||
'Parameters': get_num_params(trainer.model),
|
||||
'GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'Inference speed (ms/img)': round(trainer.validator.speed[1], 3)}
|
||||
Task.current_task().connect(model_info, name='Model')
|
||||
task.connect(model_info, name='Model')
|
||||
|
||||
|
||||
def on_train_end(trainer):
|
||||
Task.current_task().update_output_model(model_path=str(trainer.best),
|
||||
model_name=trainer.args.name,
|
||||
auto_delete_file=False)
|
||||
task = Task.current_task()
|
||||
if task:
|
||||
task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
|
||||
|
||||
|
||||
callbacks = {
|
||||
|
@ -1,41 +1,49 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
||||
|
||||
try:
|
||||
import comet_ml
|
||||
|
||||
except ImportError:
|
||||
assert not TESTS_RUNNING # do not log pytest
|
||||
assert comet_ml.__version__ # verify package is not directory
|
||||
except (ImportError, AssertionError):
|
||||
comet_ml = None
|
||||
|
||||
|
||||
def on_pretrain_routine_start(trainer):
|
||||
experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
|
||||
experiment.log_parameters(vars(trainer.args))
|
||||
try:
|
||||
experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8')
|
||||
experiment.log_parameters(vars(trainer.args))
|
||||
except Exception as e:
|
||||
LOGGER.warning(f'WARNING ⚠️ Comet not initialized correctly, not logging this run. {e}')
|
||||
|
||||
|
||||
def on_train_epoch_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
|
||||
if trainer.epoch == 1:
|
||||
for f in trainer.save_dir.glob('train_batch*.jpg'):
|
||||
experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
|
||||
if experiment:
|
||||
experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1)
|
||||
if trainer.epoch == 1:
|
||||
for f in trainer.save_dir.glob('train_batch*.jpg'):
|
||||
experiment.log_image(f, name=f.stem, step=trainer.epoch + 1)
|
||||
|
||||
|
||||
def on_fit_epoch_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
|
||||
if trainer.epoch == 0:
|
||||
model_info = {
|
||||
'model/parameters': get_num_params(trainer.model),
|
||||
'model/GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'model/speed(ms)': round(trainer.validator.speed[1], 3)}
|
||||
experiment.log_metrics(model_info, step=trainer.epoch + 1)
|
||||
if experiment:
|
||||
experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1)
|
||||
if trainer.epoch == 0:
|
||||
model_info = {
|
||||
'model/parameters': get_num_params(trainer.model),
|
||||
'model/GFLOPs': round(get_flops(trainer.model), 3),
|
||||
'model/speed(ms)': round(trainer.validator.speed[1], 3)}
|
||||
experiment.log_metrics(model_info, step=trainer.epoch + 1)
|
||||
|
||||
|
||||
def on_train_end(trainer):
|
||||
experiment = comet_ml.get_global_experiment()
|
||||
experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
|
||||
if experiment:
|
||||
experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True)
|
||||
|
||||
|
||||
callbacks = {
|
||||
|
@ -4,11 +4,11 @@ import json
|
||||
from time import time
|
||||
|
||||
from ultralytics.hub.utils import PREFIX, traces
|
||||
from ultralytics.yolo.utils import LOGGER
|
||||
from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING
|
||||
|
||||
|
||||
def on_pretrain_routine_end(trainer):
|
||||
session = getattr(trainer, 'hub_session', None)
|
||||
session = not TESTS_RUNNING and getattr(trainer, 'hub_session', None)
|
||||
if session:
|
||||
# Start timer for upload rate limit
|
||||
LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀')
|
||||
|
@ -194,8 +194,12 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=()
|
||||
try:
|
||||
pkg.require(r)
|
||||
except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met
|
||||
s += f'"{r}" '
|
||||
n += 1
|
||||
try: # attempt to import (slower but more accurate)
|
||||
import importlib
|
||||
importlib.import_module(next(pkg.parse_requirements(r)).name)
|
||||
except ImportError:
|
||||
s += f'"{r}" '
|
||||
n += 1
|
||||
|
||||
if s and install and AUTOINSTALL: # check environment variable
|
||||
LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
|
||||
@ -250,7 +254,7 @@ def check_file(file, suffix='', download=True):
|
||||
return file
|
||||
else: # search
|
||||
files = []
|
||||
for d in 'models', 'yolo/data', 'tracker/cfg': # search directories
|
||||
for d in 'models', 'datasets', 'tracker/cfg': # search directories
|
||||
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
|
||||
if not files:
|
||||
raise FileNotFoundError(f"'{file}' does not exist")
|
||||
@ -280,7 +284,7 @@ def check_imshow(warn=False):
|
||||
return False
|
||||
|
||||
|
||||
def check_yolo(verbose=True):
|
||||
def check_yolo(verbose=True, device=''):
|
||||
from ultralytics.yolo.utils.torch_utils import select_device
|
||||
|
||||
if is_colab():
|
||||
@ -298,7 +302,7 @@ def check_yolo(verbose=True):
|
||||
else:
|
||||
s = ''
|
||||
|
||||
select_device(newline=False)
|
||||
select_device(device=device, newline=False)
|
||||
LOGGER.info(f'Setup complete ✅ {s}')
|
||||
|
||||
|
||||
|
@ -512,6 +512,7 @@ class DetMetrics:
|
||||
self.plot = plot
|
||||
self.names = names
|
||||
self.box = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, tp, conf, pred_cls, target_cls):
|
||||
results = ap_per_class(tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir,
|
||||
@ -554,6 +555,7 @@ class SegmentMetrics:
|
||||
self.names = names
|
||||
self.box = Metric()
|
||||
self.seg = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, tp_m, tp_b, conf, pred_cls, target_cls):
|
||||
results_mask = ap_per_class(tp_m,
|
||||
@ -612,6 +614,7 @@ class ClassifyMetrics:
|
||||
def __init__(self) -> None:
|
||||
self.top1 = 0
|
||||
self.top5 = 0
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
|
||||
def process(self, targets, pred):
|
||||
# target classes and predicted classes
|
||||
|
@ -154,7 +154,7 @@ class Annotator:
|
||||
|
||||
def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True):
|
||||
# Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop
|
||||
xyxy = torch.tensor(xyxy).view(-1, 4)
|
||||
xyxy = torch.Tensor(xyxy).view(-1, 4)
|
||||
b = xyxy2xywh(xyxy) # boxes
|
||||
if square:
|
||||
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
|
||||
|
@ -223,7 +223,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5):
|
||||
|
||||
def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
|
||||
"""Transform distance(ltrb) to box(xywh or xyxy)."""
|
||||
lt, rb = torch.split(distance, 2, dim)
|
||||
lt, rb = distance.chunk(2, dim)
|
||||
x1y1 = anchor_points - lt
|
||||
x2y2 = anchor_points + rb
|
||||
if xywh:
|
||||
@ -235,5 +235,5 @@ def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
|
||||
|
||||
def bbox2dist(anchor_points, bbox, reg_max):
|
||||
"""Transform bbox(xyxy) to dist(ltrb)."""
|
||||
x1y1, x2y2 = torch.split(bbox, 2, -1)
|
||||
x1y1, x2y2 = bbox.chunk(2, -1)
|
||||
return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb)
|
||||
|
Reference in New Issue
Block a user