ultralytics 8.0.21 Windows, segments, YAML fixes (#655)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: corey-nm <109536191+corey-nm@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-01-26 23:07:27 +01:00
committed by GitHub
parent dc9502c700
commit 6c44ce21d9
16 changed files with 147 additions and 146 deletions

View File

@ -129,7 +129,7 @@ class Exporter:
overrides (dict, optional): Configuration overrides. Defaults to None.
"""
self.args = get_cfg(cfg, overrides)
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks
callbacks.add_integration_callbacks(self)
@smart_inference_mode()

View File

@ -61,8 +61,8 @@ class YOLO:
else:
raise NotImplementedError(f"'{suffix}' model loading not implemented")
def __call__(self, source=None, stream=False, verbose=False, **kwargs):
return self.predict(source, stream, verbose, **kwargs)
def __call__(self, source=None, stream=False, **kwargs):
return self.predict(source, stream, **kwargs)
def _new(self, cfg: str, verbose=True):
"""
@ -118,7 +118,7 @@ class YOLO:
self.model.fuse()
@smart_inference_mode()
def predict(self, source=None, stream=False, verbose=False, **kwargs):
def predict(self, source=None, stream=False, **kwargs):
"""
Perform prediction using the YOLO model.
@ -126,7 +126,6 @@ class YOLO:
source (str | int | PIL | np.ndarray): The source of the image to make predictions on.
Accepts all source types accepted by the YOLO model.
stream (bool): Whether to stream the predictions or not. Defaults to False.
verbose (bool): Whether to print verbose information or not. Defaults to False.
**kwargs : Additional keyword arguments passed to the predictor.
Check the 'configuration' section in the documentation for all available options.
@ -143,7 +142,7 @@ class YOLO:
self.predictor.setup_model(model=self.model)
else: # only update args if predictor is already setup
self.predictor.args = get_cfg(self.predictor.args, overrides)
return self.predictor(source=source, stream=stream, verbose=verbose)
return self.predictor(source=source, stream=stream)
@smart_inference_mode()
def val(self, data=None, **kwargs):
@ -234,7 +233,8 @@ class YOLO:
"""
return self.model.names
def add_callback(self, event: str, func):
@staticmethod
def add_callback(event: str, func):
"""
Add callback
"""
@ -242,16 +242,8 @@ class YOLO:
@staticmethod
def _reset_ckpt_args(args):
args.pop("project", None)
args.pop("name", None)
args.pop("exist_ok", None)
args.pop("resume", None)
args.pop("batch", None)
args.pop("epochs", None)
args.pop("cache", None)
args.pop("save_json", None)
args.pop("half", None)
args.pop("v5loader", None)
for arg in 'verbose', 'project', 'name', 'exist_ok', 'resume', 'batch', 'epochs', 'cache', 'save_json', \
'half', 'v5loader':
args.pop(arg, None)
# set device to '' to prevent from auto DDP usage
args["device"] = ''
args["device"] = '' # set device to '' to prevent auto-DDP usage

View File

@ -88,7 +88,7 @@ class BasePredictor:
self.vid_path, self.vid_writer = None, None
self.annotator = None
self.data_path = None
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks
callbacks.add_integration_callbacks(self)
def preprocess(self, img):
@ -151,19 +151,19 @@ class BasePredictor:
self.bs = bs
@smart_inference_mode()
def __call__(self, source=None, model=None, verbose=False, stream=False):
def __call__(self, source=None, model=None, stream=False):
if stream:
return self.stream_inference(source, model, verbose)
return self.stream_inference(source, model)
else:
return list(self.stream_inference(source, model, verbose)) # merge list of Result into one
return list(self.stream_inference(source, model)) # merge list of Result into one
def predict_cli(self):
# Method used for CLI prediction. It uses always generator as outputs as not required by CLI mode
gen = self.stream_inference(verbose=True)
gen = self.stream_inference()
for _ in gen: # running CLI inference without accumulating any outputs (do not modify)
pass
def stream_inference(self, source=None, model=None, verbose=False):
def stream_inference(self, source=None, model=None):
self.run_callbacks("on_predict_start")
# setup model
@ -201,7 +201,7 @@ class BasePredictor:
p, im0 = (path[i], im0s[i]) if self.webcam or self.from_img else (path, im0s)
p = Path(p)
if verbose or self.args.save or self.args.save_txt or self.args.show:
if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
s += self.write_results(i, self.results, (p, im, im0))
if self.args.show:
@ -214,11 +214,11 @@ class BasePredictor:
yield from self.results
# Print time (inference-only)
if verbose:
if self.args.verbose:
LOGGER.info(f"{s}{'' if len(preds) else '(no detections), '}{self.dt[1].dt * 1E3:.1f}ms")
# Print results
if verbose and self.seen:
if self.args.verbose and self.seen:
t = tuple(x.t / self.seen * 1E3 for x in self.dt) # speeds per image
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms postprocess per image at shape '
f'{(1, 3, *self.imgsz)}' % t)
@ -243,7 +243,7 @@ class BasePredictor:
if isinstance(source, (str, int, Path)): # int for local usb carame
source = str(source)
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://'))
webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
screenshot = source.lower().startswith('screen')
if is_url and is_file:

View File

@ -85,7 +85,6 @@ class BaseTrainer:
self.console = LOGGER
self.validator = None
self.model = None
self.callbacks = defaultdict(list)
init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic)
# Dirs
@ -141,7 +140,7 @@ class BaseTrainer:
self.plot_idx = [0, 1, 2]
# Callbacks
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks
if RANK in {0, -1}:
callbacks.add_integration_callbacks(self)

View File

@ -70,7 +70,7 @@ class BaseValidator:
if self.args.conf is None:
self.args.conf = 0.001 # default conf=0.001
self.callbacks = defaultdict(list, {k: v for k, v in callbacks.default_callbacks.items()}) # add callbacks
self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks
@smart_inference_mode()
def __call__(self, trainer=None, model=None):