Docstrings arguments cleanup (#3229)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@ -80,8 +80,8 @@ def cfg2dict(cfg):
|
||||
"""
|
||||
Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object.
|
||||
|
||||
Inputs:
|
||||
cfg (str) or (Path) or (SimpleNamespace): Configuration object to be converted to a dictionary.
|
||||
Args:
|
||||
cfg (str | Path | SimpleNamespace): Configuration object to be converted to a dictionary.
|
||||
|
||||
Returns:
|
||||
cfg (dict): Configuration object in dictionary format.
|
||||
@ -98,8 +98,8 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove
|
||||
Load and merge configuration data from a file or dictionary.
|
||||
|
||||
Args:
|
||||
cfg (str) or (Path) or (Dict) or (SimpleNamespace): Configuration data.
|
||||
overrides (str) or (Dict), optional: Overrides in the form of a file name or a dictionary. Default is None.
|
||||
cfg (str | Path | Dict | SimpleNamespace): Configuration data.
|
||||
overrides (str | Dict | optional): Overrides in the form of a file name or a dictionary. Default is None.
|
||||
|
||||
Returns:
|
||||
(SimpleNamespace): Training arguments namespace.
|
||||
@ -168,9 +168,9 @@ def check_cfg_mismatch(base: Dict, custom: Dict, e=None):
|
||||
This function checks for any mismatched keys between a custom configuration list and a base configuration list.
|
||||
If any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
|
||||
|
||||
Inputs:
|
||||
- custom (Dict): a dictionary of custom configuration options
|
||||
- base (Dict): a dictionary of base configuration options
|
||||
Args:
|
||||
custom (Dict): a dictionary of custom configuration options
|
||||
base (Dict): a dictionary of base configuration options
|
||||
"""
|
||||
custom = _handle_deprecation(custom)
|
||||
base, custom = (set(x.keys()) for x in (base, custom))
|
||||
|
@ -13,7 +13,7 @@ def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='',
|
||||
det_model (str, optional): Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'.
|
||||
sam_model (str, optional): Pre-trained SAM segmentation model. Defaults to 'sam_b.pt'.
|
||||
device (str, optional): Device to run the models on. Defaults to an empty string (CPU or GPU, if available).
|
||||
output_dir (str, None, optional): Directory to save the annotated results.
|
||||
output_dir (str | None | optional): Directory to save the annotated results.
|
||||
Defaults to a 'labels' folder in the same directory as 'data'.
|
||||
"""
|
||||
device = select_device(device)
|
||||
|
@ -223,7 +223,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
|
||||
root (str): Dataset path.
|
||||
args (Namespace): Argument parser containing dataset related settings.
|
||||
augment (bool, optional): True if dataset should be augmented, False otherwise. Defaults to False.
|
||||
cache (Union[bool, str], optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False.
|
||||
cache (bool | str | optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False.
|
||||
"""
|
||||
super().__init__(root=root)
|
||||
if augment and args.fraction < 1.0: # reduce training fraction
|
||||
|
@ -129,7 +129,7 @@ class YOLO:
|
||||
|
||||
Args:
|
||||
cfg (str): model configuration file
|
||||
task (str) or (None): model task
|
||||
task (str | None): model task
|
||||
verbose (bool): display model info on load
|
||||
"""
|
||||
cfg_dict = yaml_model_load(cfg)
|
||||
@ -149,7 +149,7 @@ class YOLO:
|
||||
|
||||
Args:
|
||||
weights (str): model checkpoint to be loaded
|
||||
task (str) or (None): model task
|
||||
task (str | None): model task
|
||||
"""
|
||||
suffix = Path(weights).suffix
|
||||
if suffix == '.pt':
|
||||
|
@ -355,23 +355,23 @@ class Boxes(BaseTensor):
|
||||
A class for storing and manipulating detection boxes.
|
||||
|
||||
Args:
|
||||
boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes,
|
||||
boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes,
|
||||
with shape (num_boxes, 6). The last two columns should contain confidence and class values.
|
||||
orig_shape (tuple): Original image size, in the format (height, width).
|
||||
|
||||
Attributes:
|
||||
boxes (torch.Tensor) or (numpy.ndarray): The detection boxes with shape (num_boxes, 6).
|
||||
orig_shape (torch.Tensor) or (numpy.ndarray): Original image size, in the format (height, width).
|
||||
boxes (torch.Tensor | numpy.ndarray): The detection boxes with shape (num_boxes, 6).
|
||||
orig_shape (torch.Tensor | numpy.ndarray): Original image size, in the format (height, width).
|
||||
is_track (bool): True if the boxes also include track IDs, False otherwise.
|
||||
|
||||
Properties:
|
||||
xyxy (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format.
|
||||
conf (torch.Tensor) or (numpy.ndarray): The confidence values of the boxes.
|
||||
cls (torch.Tensor) or (numpy.ndarray): The class values of the boxes.
|
||||
id (torch.Tensor) or (numpy.ndarray): The track IDs of the boxes (if available).
|
||||
xywh (torch.Tensor) or (numpy.ndarray): The boxes in xywh format.
|
||||
xyxyn (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format normalized by original image size.
|
||||
xywhn (torch.Tensor) or (numpy.ndarray): The boxes in xywh format normalized by original image size.
|
||||
xyxy (torch.Tensor | numpy.ndarray): The boxes in xyxy format.
|
||||
conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes.
|
||||
cls (torch.Tensor | numpy.ndarray): The class values of the boxes.
|
||||
id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available).
|
||||
xywh (torch.Tensor | numpy.ndarray): The boxes in xywh format.
|
||||
xyxyn (torch.Tensor | numpy.ndarray): The boxes in xyxy format normalized by original image size.
|
||||
xywhn (torch.Tensor | numpy.ndarray): The boxes in xywh format normalized by original image size.
|
||||
data (torch.Tensor): The raw bboxes tensor
|
||||
|
||||
Methods:
|
||||
|
@ -422,7 +422,7 @@ def is_dir_writeable(dir_path: Union[str, Path]) -> bool:
|
||||
Check if a directory is writeable.
|
||||
|
||||
Args:
|
||||
dir_path (str) or (Path): The path to the directory.
|
||||
dir_path (str | Path): The path to the directory.
|
||||
|
||||
Returns:
|
||||
(bool): True if the directory is writeable, False otherwise.
|
||||
@ -467,7 +467,7 @@ def get_git_dir():
|
||||
If the current file is not part of a git repository, returns None.
|
||||
|
||||
Returns:
|
||||
(Path) or (None): Git root directory if found or None if not found.
|
||||
(Path | None): Git root directory if found or None if not found.
|
||||
"""
|
||||
for d in Path(__file__).parents:
|
||||
if (d / '.git').is_dir():
|
||||
@ -480,7 +480,7 @@ def get_git_origin_url():
|
||||
Retrieves the origin URL of a git repository.
|
||||
|
||||
Returns:
|
||||
(str) or (None): The origin URL of the git repository.
|
||||
(str | None): The origin URL of the git repository.
|
||||
"""
|
||||
if is_git_dir():
|
||||
with contextlib.suppress(subprocess.CalledProcessError):
|
||||
@ -494,7 +494,7 @@ def get_git_branch():
|
||||
Returns the current git branch name. If not in a git repository, returns None.
|
||||
|
||||
Returns:
|
||||
(str) or (None): The current git branch name.
|
||||
(str | None): The current git branch name.
|
||||
"""
|
||||
if is_git_dir():
|
||||
with contextlib.suppress(subprocess.CalledProcessError):
|
||||
|
@ -51,13 +51,13 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
|
||||
Benchmark a YOLO model across different formats for speed and accuracy.
|
||||
|
||||
Args:
|
||||
model (Union[str, Path], optional): Path to the model file or directory. Default is
|
||||
model (str | Path | optional): Path to the model file or directory. Default is
|
||||
Path(SETTINGS['weights_dir']) / 'yolov8n.pt'.
|
||||
imgsz (int, optional): Image size for the benchmark. Default is 160.
|
||||
half (bool, optional): Use half-precision for the model if True. Default is False.
|
||||
int8 (bool, optional): Use int8-precision for the model if True. Default is False.
|
||||
device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'.
|
||||
hard_fail (Union[bool, float], optional): If True or a float, assert benchmarks pass with given metric.
|
||||
hard_fail (bool | float | optional): If True or a float, assert benchmarks pass with given metric.
|
||||
Default is False.
|
||||
|
||||
Returns:
|
||||
|
@ -47,7 +47,7 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0):
|
||||
stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value.
|
||||
|
||||
Args:
|
||||
imgsz (int) or (cList[int]): Image size.
|
||||
imgsz (int | cList[int]): Image size.
|
||||
stride (int): Stride value.
|
||||
min_dim (int): Minimum number of dimensions.
|
||||
floor (int): Minimum allowed value for image size.
|
||||
|
@ -102,7 +102,7 @@ class Bboxes:
|
||||
def mul(self, scale):
|
||||
"""
|
||||
Args:
|
||||
scale (tuple) or (list) or (int): the scale for four coords.
|
||||
scale (tuple | list | int): the scale for four coords.
|
||||
"""
|
||||
if isinstance(scale, Number):
|
||||
scale = to_4tuple(scale)
|
||||
@ -116,7 +116,7 @@ class Bboxes:
|
||||
def add(self, offset):
|
||||
"""
|
||||
Args:
|
||||
offset (tuple) or (list) or (int): the offset for four coords.
|
||||
offset (tuple | list | int): the offset for four coords.
|
||||
"""
|
||||
if isinstance(offset, Number):
|
||||
offset = to_4tuple(offset)
|
||||
|
@ -123,7 +123,7 @@ def make_divisible(x, divisor):
|
||||
|
||||
Args:
|
||||
x (int): The number to make divisible.
|
||||
divisor (int) or (torch.Tensor): The divisor.
|
||||
divisor (int | torch.Tensor): The divisor.
|
||||
|
||||
Returns:
|
||||
(int): The nearest number divisible by the divisor.
|
||||
@ -166,7 +166,7 @@ def non_max_suppression(
|
||||
list contains the apriori labels for a given image. The list should be in the format
|
||||
output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2).
|
||||
max_det (int): The maximum number of boxes to keep after NMS.
|
||||
nc (int): (optional) The number of classes output by the model. Any indices after this will be considered masks.
|
||||
nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks.
|
||||
max_time_img (float): The maximum time (seconds) for processing one image.
|
||||
max_nms (int): The maximum number of boxes into torchvision.ops.nms().
|
||||
max_wh (int): The maximum box width and height in pixels
|
||||
@ -290,7 +290,7 @@ def clip_coords(coords, shape):
|
||||
Clip line coordinates to the image boundaries.
|
||||
|
||||
Args:
|
||||
coords (torch.Tensor) or (numpy.ndarray): A list of line coordinates.
|
||||
coords (torch.Tensor | numpy.ndarray): A list of line coordinates.
|
||||
shape (tuple): A tuple of integers representing the size of the image in the format (height, width).
|
||||
|
||||
Returns:
|
||||
@ -347,9 +347,9 @@ def xyxy2xywh(x):
|
||||
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format.
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x, y, width, height) format.
|
||||
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format.
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
|
||||
@ -365,9 +365,9 @@ def xywh2xyxy(x):
|
||||
top-left corner and (x2, y2) is the bottom-right corner.
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x, y, width, height) format.
|
||||
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format.
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
|
||||
@ -382,13 +382,13 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
|
||||
Convert normalized bounding box coordinates to pixel coordinates.
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The bounding box coordinates.
|
||||
x (np.ndarray | torch.Tensor): The bounding box coordinates.
|
||||
w (int): Width of the image. Defaults to 640
|
||||
h (int): Height of the image. Defaults to 640
|
||||
padw (int): Padding width. Defaults to 0
|
||||
padh (int): Padding height. Defaults to 0
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where
|
||||
y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where
|
||||
x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box.
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
@ -405,13 +405,13 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
|
||||
x, y, width and height are normalized to image dimensions
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
||||
w (int): The width of the image. Defaults to 640
|
||||
h (int): The height of the image. Defaults to 640
|
||||
clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False
|
||||
eps (float): The minimum value of the box's width and height. Defaults to 0.0
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format
|
||||
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format
|
||||
"""
|
||||
if clip:
|
||||
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
|
||||
@ -428,13 +428,13 @@ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
|
||||
Convert normalized coordinates to pixel coordinates of shape (n,2)
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input tensor of normalized bounding box coordinates
|
||||
x (np.ndarray | torch.Tensor): The input tensor of normalized bounding box coordinates
|
||||
w (int): The width of the image. Defaults to 640
|
||||
h (int): The height of the image. Defaults to 640
|
||||
padw (int): The width of the padding. Defaults to 0
|
||||
padh (int): The height of the padding. Defaults to 0
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The x and y coordinates of the top left corner of the bounding box
|
||||
y (np.ndarray | torch.Tensor): The x and y coordinates of the top left corner of the bounding box
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[..., 0] = w * x[..., 0] + padw # top left x
|
||||
@ -447,9 +447,9 @@ def xywh2ltwh(x):
|
||||
Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates.
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input tensor with the bounding box coordinates in the xywh format
|
||||
x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format
|
||||
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
||||
@ -462,9 +462,9 @@ def xyxy2ltwh(x):
|
||||
Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format
|
||||
x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format.
|
||||
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format.
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[:, 2] = x[:, 2] - x[:, 0] # width
|
||||
@ -490,10 +490,10 @@ def ltwh2xyxy(x):
|
||||
It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
||||
|
||||
Args:
|
||||
x (np.ndarray) or (torch.Tensor): the input image
|
||||
x (np.ndarray | torch.Tensor): the input image
|
||||
|
||||
Returns:
|
||||
y (np.ndarray) or (torch.Tensor): the xyxy coordinates of the bounding boxes.
|
||||
y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes.
|
||||
"""
|
||||
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
||||
y[:, 2] = x[:, 2] + x[:, 0] # width
|
||||
|
Reference in New Issue
Block a user