diff --git a/docs/config.md b/docs/config.md index 1cf42b5..9c54961 100644 --- a/docs/config.md +++ b/docs/config.md @@ -60,7 +60,7 @@ task. | device | '' | cuda device, i.e. 0 or 0,1,2,3 or cpu. `''` selects available cuda 0 device | | epochs | 100 | Number of epochs to train | | workers | 8 | Number of cpu workers used per process. Scales automatically with DDP | -| batch_size | 16 | Batch size of the dataloader | +| batch | 16 | Batch size of the dataloader | | imgsz | 640 | Image size of data in dataloader | | optimizer | SGD | Optimizer used. Supported optimizer are: `Adam`, `SGD`, `RMSProp` | | single_cls | False | Train on multi-class data as single-class | @@ -129,8 +129,8 @@ validation dataset and to detect and prevent overfitting. | noval | `False` | ??? | | save_json | `False` | | | save_hybrid | `False` | | -| conf_thres | `0.001` | Confidence threshold | -| iou_thres | `0.6` | IoU threshold | +| conf | `0.001` | Confidence threshold | +| iou | `0.6` | IoU threshold | | max_det | `300` | Maximum number of detections | | half | `True` | Use .half() mode. | | dnn | `False` | Use OpenCV DNN for ONNX inference | diff --git a/ultralytics/yolo/configs/default.yaml b/ultralytics/yolo/configs/default.yaml index 56dd116..d75ad97 100644 --- a/ultralytics/yolo/configs/default.yaml +++ b/ultralytics/yolo/configs/default.yaml @@ -9,7 +9,7 @@ model: null # i.e. yolov8n.pt, yolov8n.yaml. Path to model file data: null # i.e. coco128.yaml. Path to data file epochs: 100 # number of epochs to train for patience: 50 # TODO: epochs to wait for no observable improvement for early stopping of training -batch_size: 16 # number of images per batch +batch: 16 # number of images per batch imgsz: 640 # size of input images save: True # save checkpoints cache: False # True/ram, disk or False. Use cache for data loading @@ -23,7 +23,6 @@ optimizer: 'SGD' # optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] verbose: False # whether to print verbose output seed: 0 # random seed for reproducibility deterministic: True # whether to enable deterministic mode -local_rank: -1 # local rank for distributed training single_cls: False # train multi-class data as single-class image_weights: False # use weighted image selection for training rect: False # support rectangular training @@ -40,8 +39,8 @@ dropout: False # use dropout regularization val: True # validate/test during training save_json: False # save results to JSON file save_hybrid: False # save hybrid version of labels (labels + additional predictions) -conf_thres: 0.001 # object confidence threshold for detection -iou_thres: 0.7 # intersection over union threshold for NMS +conf: 0.001 # object confidence threshold for detection +iou: 0.7 # intersection over union (IoU) threshold for NMS max_det: 300 # maximum number of detections per image half: False # use half precision (FP16) dnn: False # use OpenCV DNN for ONNX inference @@ -57,7 +56,6 @@ hide_labels: False # hide labels hide_conf: False # hide confidence scores vid_stride: 1 # video frame-rate stride line_thickness: 3 # bounding box thickness (pixels) -update: False # Update all models visualize: False # visualize results augment: False # apply data augmentation to images agnostic_nms: False # class-agnostic NMS diff --git a/ultralytics/yolo/engine/exporter.py b/ultralytics/yolo/engine/exporter.py index 5765a9a..4dd4bd3 100644 --- a/ultralytics/yolo/engine/exporter.py +++ b/ultralytics/yolo/engine/exporter.py @@ -164,14 +164,14 @@ class Exporter: assert not self.args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic' # Checks - # if self.args.batch_size == model.args['batch_size']: # user has not modified training batch_size - self.args.batch_size = 1 + # if self.args.batch == model.args['batch_size']: # user has not modified training batch_size + self.args.batch = 1 self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size if self.args.optimize: assert self.device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' # Input - im = torch.zeros(self.args.batch_size, 3, *self.imgsz).to(self.device) + im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device) file = Path(getattr(model, 'pt_path', None) or model.yaml['yaml_file']) if file.suffix == '.yaml': file = Path(file.name) diff --git a/ultralytics/yolo/engine/trainer.py b/ultralytics/yolo/engine/trainer.py index 7b8248b..0d02c1a 100644 --- a/ultralytics/yolo/engine/trainer.py +++ b/ultralytics/yolo/engine/trainer.py @@ -102,7 +102,7 @@ class BaseTrainer: yaml_save(self.save_dir / 'args.yaml', OmegaConf.to_container(self.args, resolve=True)) # save run args self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt' # checkpoint paths - self.batch_size = self.args.batch_size + self.batch_size = self.args.batch self.epochs = self.args.epochs self.start_epoch = 0 if RANK == -1: diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py index 638e828..6715cb4 100644 --- a/ultralytics/yolo/engine/validator.py +++ b/ultralytics/yolo/engine/validator.py @@ -87,18 +87,18 @@ class BaseValidator: callbacks.add_integration_callbacks(self) self.run_callbacks('on_val_start') assert model is not None, "Either trainer or model is needed for validation" - self.device = select_device(self.args.device, self.args.batch_size) + self.device = select_device(self.args.device, self.args.batch) self.args.half &= self.device.type != 'cpu' model = AutoBackend(model, device=self.device, dnn=self.args.dnn, fp16=self.args.half) self.model = model stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_imgsz(self.args.imgsz, stride=stride) if engine: - self.args.batch_size = model.batch_size + self.args.batch = model.batch_size else: self.device = model.device if not pt and not jit: - self.args.batch_size = 1 # export.py models default to batch-size 1 + self.args.batch = 1 # export.py models default to batch-size 1 self.logger.info( f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') @@ -110,7 +110,7 @@ class BaseValidator: if self.device.type == 'cpu': self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading self.dataloader = self.dataloader or \ - self.get_dataloader(data.get("val") or data.set("test"), self.args.batch_size) + self.get_dataloader(data.get("val") or data.set("test"), self.args.batch) self.data = data model.eval() diff --git a/ultralytics/yolo/v8/detect/predict.py b/ultralytics/yolo/v8/detect/predict.py index dff7ced..4d1282f 100644 --- a/ultralytics/yolo/v8/detect/predict.py +++ b/ultralytics/yolo/v8/detect/predict.py @@ -20,8 +20,8 @@ class DetectionPredictor(BasePredictor): def postprocess(self, preds, img, orig_img): preds = ops.non_max_suppression(preds, - self.args.conf_thres, - self.args.iou_thres, + self.args.conf, + self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det) diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/yolo/v8/detect/val.py index 9f49298..fa69306 100644 --- a/ultralytics/yolo/v8/detect/val.py +++ b/ultralytics/yolo/v8/detect/val.py @@ -58,8 +58,8 @@ class DetectionValidator(BaseValidator): def postprocess(self, preds): preds = ops.non_max_suppression(preds, - self.args.conf_thres, - self.args.iou_thres, + self.args.conf, + self.args.iou, labels=self.lb, multi_label=True, agnostic=self.args.single_cls, diff --git a/ultralytics/yolo/v8/segment/predict.py b/ultralytics/yolo/v8/segment/predict.py index 942bb8d..1705dba 100644 --- a/ultralytics/yolo/v8/segment/predict.py +++ b/ultralytics/yolo/v8/segment/predict.py @@ -14,8 +14,8 @@ class SegmentationPredictor(DetectionPredictor): masks = [] # TODO: filter by classes p = ops.non_max_suppression(preds[0], - self.args.conf_thres, - self.args.iou_thres, + self.args.conf, + self.args.iou, agnostic=self.args.agnostic_nms, max_det=self.args.max_det, nm=32) diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/yolo/v8/segment/val.py index cc90497..6dc0bc1 100644 --- a/ultralytics/yolo/v8/segment/val.py +++ b/ultralytics/yolo/v8/segment/val.py @@ -60,8 +60,8 @@ class SegmentationValidator(DetectionValidator): def postprocess(self, preds): p = ops.non_max_suppression(preds[0], - self.args.conf_thres, - self.args.iou_thres, + self.args.conf, + self.args.iou, labels=self.lb, multi_label=True, agnostic=self.args.single_cls,