`ultralytics 8.0.140` improved robustness to model path spaces (#3879)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Lukas Hennies <45569834+Gornoka@users.noreply.github.com>
single_channel
Glenn Jocher 1 year ago committed by GitHub
parent ed25db9426
commit 965e405957
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -112,22 +112,22 @@ jobs:
shell: python shell: python
run: | run: |
from ultralytics.utils.benchmarks import benchmark from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.26) benchmark(model='path with spaces/${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.26)
- name: Benchmark SegmentationModel - name: Benchmark SegmentationModel
shell: python shell: python
run: | run: |
from ultralytics.utils.benchmarks import benchmark from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.30) benchmark(model='path with spaces/${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.30)
- name: Benchmark ClassificationModel - name: Benchmark ClassificationModel
shell: python shell: python
run: | run: |
from ultralytics.utils.benchmarks import benchmark from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.36) benchmark(model='path with spaces/${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.36)
- name: Benchmark PoseModel - name: Benchmark PoseModel
shell: python shell: python
run: | run: |
from ultralytics.utils.benchmarks import benchmark from ultralytics.utils.benchmarks import benchmark
benchmark(model='${{ matrix.model }}-pose.pt', imgsz=160, half=False, hard_fail=0.17) benchmark(model='path with spaces/${{ matrix.model }}-pose.pt', imgsz=160, half=False, hard_fail=0.17)
- name: Benchmark Summary - name: Benchmark Summary
run: | run: |
cat benchmarks.log cat benchmarks.log
@ -141,12 +141,10 @@ jobs:
matrix: matrix:
os: [ubuntu-latest] os: [ubuntu-latest]
python-version: ['3.11'] python-version: ['3.11']
model: [yolov8n]
torch: [latest] torch: [latest]
include: include:
- os: ubuntu-latest - os: ubuntu-latest
python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8 python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8
model: yolov8n
torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

4
.gitignore vendored

@ -136,7 +136,6 @@ dmypy.json
datasets/ datasets/
runs/ runs/
wandb/ wandb/
.DS_Store .DS_Store
# Neural Network weights ----------------------------------------------------------------------------------------------- # Neural Network weights -----------------------------------------------------------------------------------------------
@ -154,3 +153,6 @@ weights/
*_web_model/ *_web_model/
*_openvino_model/ *_openvino_model/
*_paddle_model/ *_paddle_model/
# Autogenerated files for tests
/ultralytics/assets/

@ -65,7 +65,7 @@ Here is an example:
```python ```python
""" """
What the function does. Performs NMS on given detection predictions. What the function does. Performs NMS on given detection predictions.
Args: Args:
arg1: The description of the 1st argument arg1: The description of the 1st argument

@ -12,7 +12,7 @@ from ultralytics import RTDETR, YOLO
from ultralytics.data.build import load_inference_source from ultralytics.data.build import load_inference_source
from ultralytics.utils import LINUX, ONLINE, ROOT, SETTINGS from ultralytics.utils import LINUX, ONLINE, ROOT, SETTINGS
MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt' MODEL = Path(SETTINGS['weights_dir']) / 'path with spaces' / 'yolov8n.pt' # test spaces in path
CFG = 'yolov8n.yaml' CFG = 'yolov8n.yaml'
SOURCE = ROOT / 'assets/bus.jpg' SOURCE = ROOT / 'assets/bus.jpg'
SOURCE_GREYSCALE = Path(f'{SOURCE.parent / SOURCE.stem}_greyscale.jpg') SOURCE_GREYSCALE = Path(f'{SOURCE.parent / SOURCE.stem}_greyscale.jpg')

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.139' __version__ = '8.0.140'
from ultralytics.engine.model import YOLO from ultralytics.engine.model import YOLO
from ultralytics.hub import start from ultralytics.hub import start

@ -68,7 +68,7 @@ from ultralytics.utils import (ARM64, DEFAULT_CFG, LINUX, LOGGER, MACOS, ROOT, W
colorstr, get_default_args, yaml_save) colorstr, get_default_args, yaml_save)
from ultralytics.utils.checks import check_imgsz, check_requirements, check_version from ultralytics.utils.checks import check_imgsz, check_requirements, check_version
from ultralytics.utils.downloads import attempt_download_asset, get_github_assets from ultralytics.utils.downloads import attempt_download_asset, get_github_assets
from ultralytics.utils.files import file_size from ultralytics.utils.files import file_size, spaces_in_path
from ultralytics.utils.ops import Profile from ultralytics.utils.ops import Profile
from ultralytics.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode from ultralytics.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode
@ -112,7 +112,7 @@ def try_export(inner_func):
try: try:
with Profile() as dt: with Profile() as dt:
f, model = inner_func(*args, **kwargs) f, model = inner_func(*args, **kwargs)
LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{f}' ({file_size(f):.1f} MB)")
return f, model return f, model
except Exception as e: except Exception as e:
LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
@ -230,7 +230,7 @@ class Exporter:
if model.task == 'pose': if model.task == 'pose':
self.metadata['kpt_shape'] = model.model[-1].kpt_shape self.metadata['kpt_shape'] = model.model[-1].kpt_shape
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} BCHW and " LOGGER.info(f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)') f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)')
# Exports # Exports
@ -340,7 +340,7 @@ class Exporter:
import onnxsim import onnxsim
LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...') LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...')
# subprocess.run(f'onnxsim {f} {f}', shell=True) # subprocess.run(f'onnxsim "{f}" "{f}"', shell=True)
model_onnx, check = onnxsim.simplify(model_onnx) model_onnx, check = onnxsim.simplify(model_onnx)
assert check, 'Simplified ONNX model could not be validated' assert check, 'Simplified ONNX model could not be validated'
except Exception as e: except Exception as e:
@ -410,7 +410,7 @@ class Exporter:
LOGGER.info(f'\n{prefix} starting export with ncnn {ncnn.__version__}...') LOGGER.info(f'\n{prefix} starting export with ncnn {ncnn.__version__}...')
f = Path(str(self.file).replace(self.file.suffix, f'_ncnn_model{os.sep}')) f = Path(str(self.file).replace(self.file.suffix, f'_ncnn_model{os.sep}'))
f_ts = str(self.file.with_suffix('.torchscript')) f_ts = self.file.with_suffix('.torchscript')
pnnx_filename = 'pnnx.exe' if WINDOWS else 'pnnx' pnnx_filename = 'pnnx.exe' if WINDOWS else 'pnnx'
if Path(pnnx_filename).is_file(): if Path(pnnx_filename).is_file():
@ -434,7 +434,7 @@ class Exporter:
cmd = [ cmd = [
str(pnnx), str(pnnx),
f_ts, str(f_ts),
f'pnnxparam={f / "model.pnnx.param"}', f'pnnxparam={f / "model.pnnx.param"}',
f'pnnxbin={f / "model.pnnx.bin"}', f'pnnxbin={f / "model.pnnx.bin"}',
f'pnnxpy={f / "model_pnnx.py"}', f'pnnxpy={f / "model_pnnx.py"}',
@ -586,8 +586,8 @@ class Exporter:
# Export to TF # Export to TF
int8 = '-oiqt -qt per-tensor' if self.args.int8 else '' int8 = '-oiqt -qt per-tensor' if self.args.int8 else ''
cmd = f'onnx2tf -i {f_onnx} -o {f} -nuo --non_verbose {int8}' cmd = f'onnx2tf -i "{f_onnx}" -o "{f}" -nuo --non_verbose {int8}'
LOGGER.info(f"\n{prefix} running '{cmd.strip()}'") LOGGER.info(f"\n{prefix} running '{cmd}'")
subprocess.run(cmd, shell=True) subprocess.run(cmd, shell=True)
yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml
@ -659,9 +659,9 @@ class Exporter:
LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model
cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {Path(f).parent} {tflite_model}' cmd = f'edgetpu_compiler -s -d -k 10 --out_dir "{Path(f).parent}" "{tflite_model}"'
LOGGER.info(f"{prefix} running '{cmd}'") LOGGER.info(f"{prefix} running '{cmd}'")
subprocess.run(cmd.split(), check=True) subprocess.run(cmd, shell=True)
self._add_tflite_metadata(f) self._add_tflite_metadata(f)
return f, None return f, None
@ -674,7 +674,7 @@ class Exporter:
LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
f = str(self.file).replace(self.file.suffix, '_web_model') # js dir f = str(self.file).replace(self.file.suffix, '_web_model') # js dir
f_pb = self.file.with_suffix('.pb') # *.pb path f_pb = str(self.file.with_suffix('.pb')) # *.pb path
gd = tf.Graph().as_graph_def() # TF GraphDef gd = tf.Graph().as_graph_def() # TF GraphDef
with open(f_pb, 'rb') as file: with open(f_pb, 'rb') as file:
@ -682,8 +682,13 @@ class Exporter:
outputs = ','.join(gd_outputs(gd)) outputs = ','.join(gd_outputs(gd))
LOGGER.info(f'\n{prefix} output node names: {outputs}') LOGGER.info(f'\n{prefix} output node names: {outputs}')
cmd = f'tensorflowjs_converter --input_format=tf_frozen_model --output_node_names={outputs} {f_pb} {f}' with spaces_in_path(f_pb) as fpb_, spaces_in_path(f) as f_: # exporter can not handle spaces in path
subprocess.run(cmd.split(), check=True) cmd = f'tensorflowjs_converter --input_format=tf_frozen_model --output_node_names={outputs} "{fpb_}" "{f_}"'
LOGGER.info(f"{prefix} running '{cmd}'")
subprocess.run(cmd, shell=True)
if ' ' in str(f):
LOGGER.warning(f"{prefix} WARNING ⚠️ your model may not work correctly with spaces in path '{f}'.")
# f_json = Path(f) / 'model.json' # *.json path # f_json = Path(f) / 'model.json' # *.json path
# with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order # with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order

@ -6,7 +6,9 @@ try:
from torch.utils.tensorboard import SummaryWriter from torch.utils.tensorboard import SummaryWriter
assert not TESTS_RUNNING # do not log pytest assert not TESTS_RUNNING # do not log pytest
except (ImportError, AssertionError):
# TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
except (ImportError, AssertionError, TypeError):
SummaryWriter = None SummaryWriter = None
writer = None # TensorBoard SummaryWriter instance writer = None # TensorBoard SummaryWriter instance

@ -149,7 +149,7 @@ def safe_download(url,
elif not f.is_file(): # URL and file do not exist elif not f.is_file(): # URL and file do not exist
assert dir or file, 'dir or file required for download' assert dir or file, 'dir or file required for download'
f = dir / url2file(url) if dir else Path(file) f = dir / url2file(url) if dir else Path(file)
desc = f'Downloading {clean_url(url)} to {f}' desc = f"Downloading {clean_url(url)} to '{f}'"
LOGGER.info(f'{desc}...') LOGGER.info(f'{desc}...')
f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing
check_disk_space(url) check_disk_space(url)

@ -4,6 +4,8 @@ import contextlib
import glob import glob
import os import os
import shutil import shutil
import tempfile
from contextlib import contextmanager
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
@ -25,6 +27,57 @@ class WorkingDirectory(contextlib.ContextDecorator):
os.chdir(self.cwd) os.chdir(self.cwd)
@contextmanager
def spaces_in_path(path):
"""
Context manager to handle paths with spaces in their names.
If a path contains spaces, it replaces them with underscores, copies the file/directory to the new path,
executes the context code block, then copies the file/directory back to its original location.
Args:
path (str | Path): The original path.
Yields:
Path: Temporary path with spaces replaced by underscores if spaces were present, otherwise the original path.
Examples:
with spaces_in_path('/path/with spaces') as new_path:
# your code here
"""
# If path has spaces, replace them with underscores
if ' ' in str(path):
string = isinstance(path, str) # input type
path = Path(path)
# Create a temporary directory and construct the new path
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir) / path.name.replace(' ', '_')
# Copy file/directory
if path.is_dir():
# tmp_path.mkdir(parents=True, exist_ok=True)
shutil.copytree(path, tmp_path)
elif path.is_file():
tmp_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(path, tmp_path)
try:
# Yield the temporary path
yield str(tmp_path) if string else tmp_path
finally:
# Copy file/directory back
if tmp_path.is_dir():
shutil.copytree(tmp_path, path, dirs_exist_ok=True)
elif tmp_path.is_file():
shutil.copy2(tmp_path, path) # Copy back the file
else:
# If there are no spaces, just yield the original path
yield path
def increment_path(path, exist_ok=False, sep='', mkdir=False): def increment_path(path, exist_ok=False, sep='', mkdir=False):
""" """
Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.

Loading…
Cancel
Save