ultralytics 8.0.81
single-line docstring updates (#2061)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@ -32,6 +32,7 @@ class SourceTypes:
|
||||
class LoadStreams:
|
||||
# YOLOv8 streamloader, i.e. `yolo predict source='rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
||||
def __init__(self, sources='file.streams', imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
"""Initialize instance variables and check for consistent input stream shapes."""
|
||||
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
|
||||
self.mode = 'stream'
|
||||
self.imgsz = imgsz
|
||||
@ -97,10 +98,12 @@ class LoadStreams:
|
||||
time.sleep(0.0) # wait time
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterates through YOLO image feed and re-opens unresponsive streams."""
|
||||
self.count = -1
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Returns source paths, transformed and original images for processing YOLOv5."""
|
||||
self.count += 1
|
||||
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
|
||||
cv2.destroyAllWindows()
|
||||
@ -117,6 +120,7 @@ class LoadStreams:
|
||||
return self.sources, im, im0, None, ''
|
||||
|
||||
def __len__(self):
|
||||
"""Return the length of the sources object."""
|
||||
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
|
||||
|
||||
|
||||
@ -153,6 +157,7 @@ class LoadScreenshots:
|
||||
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator of the object."""
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
@ -173,6 +178,7 @@ class LoadScreenshots:
|
||||
class LoadImages:
|
||||
# YOLOv8 image/video dataloader, i.e. `yolo predict source=image.jpg/vid.mp4`
|
||||
def __init__(self, path, imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
"""Initialize the Dataloader and raise FileNotFoundError if file not found."""
|
||||
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
|
||||
path = Path(path).read_text().rsplit()
|
||||
files = []
|
||||
@ -211,10 +217,12 @@ class LoadImages:
|
||||
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}')
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator object for VideoStream or ImageFolder."""
|
||||
self.count = 0
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Return next image, path and metadata from dataset."""
|
||||
if self.count == self.nf:
|
||||
raise StopIteration
|
||||
path = self.files[self.count]
|
||||
@ -276,12 +284,14 @@ class LoadImages:
|
||||
return im
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of files in the object."""
|
||||
return self.nf # number of files
|
||||
|
||||
|
||||
class LoadPilAndNumpy:
|
||||
|
||||
def __init__(self, im0, imgsz=640, stride=32, auto=True, transforms=None):
|
||||
"""Initialize PIL and Numpy Dataloader."""
|
||||
if not isinstance(im0, list):
|
||||
im0 = [im0]
|
||||
self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)]
|
||||
@ -296,6 +306,7 @@ class LoadPilAndNumpy:
|
||||
|
||||
@staticmethod
|
||||
def _single_check(im):
|
||||
"""Validate and format an image to numpy array."""
|
||||
assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}'
|
||||
if isinstance(im, Image.Image):
|
||||
if im.mode != 'RGB':
|
||||
@ -305,6 +316,7 @@ class LoadPilAndNumpy:
|
||||
return im
|
||||
|
||||
def _single_preprocess(self, im, auto):
|
||||
"""Preprocesses a single image for inference."""
|
||||
if self.transforms:
|
||||
im = self.transforms(im) # transforms
|
||||
else:
|
||||
@ -314,9 +326,11 @@ class LoadPilAndNumpy:
|
||||
return im
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the length of the 'im0' attribute."""
|
||||
return len(self.im0)
|
||||
|
||||
def __next__(self):
|
||||
"""Returns batch paths, images, processed images, None, ''."""
|
||||
if self.count == 1: # loop only once as it's batch inference
|
||||
raise StopIteration
|
||||
auto = all(x.shape == self.im0[0].shape for x in self.im0) and self.auto
|
||||
@ -326,6 +340,7 @@ class LoadPilAndNumpy:
|
||||
return self.paths, im, self.im0, None, ''
|
||||
|
||||
def __iter__(self):
|
||||
"""Enables iteration for class LoadPilAndNumpy."""
|
||||
self.count = 0
|
||||
return self
|
||||
|
||||
@ -338,16 +353,19 @@ class LoadTensor:
|
||||
self.mode = 'image'
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator object."""
|
||||
self.count = 0
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Return next item in the iterator."""
|
||||
if self.count == 1:
|
||||
raise StopIteration
|
||||
self.count += 1
|
||||
return None, self.im0, self.im0, None, '' # self.paths, im, self.im0, None, ''
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the batch size."""
|
||||
return self.bs
|
||||
|
||||
|
||||
|
@ -24,6 +24,7 @@ IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
|
||||
class Albumentations:
|
||||
# YOLOv5 Albumentations class (optional, only used if package is installed)
|
||||
def __init__(self, size=640):
|
||||
"""Instantiate object with image augmentations for YOLOv5."""
|
||||
self.transform = None
|
||||
prefix = colorstr('albumentations: ')
|
||||
try:
|
||||
@ -48,6 +49,7 @@ class Albumentations:
|
||||
LOGGER.info(f'{prefix}{e}')
|
||||
|
||||
def __call__(self, im, labels, p=1.0):
|
||||
"""Transforms input image and labels with probability 'p'."""
|
||||
if self.transform and random.random() < p:
|
||||
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
|
||||
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
|
||||
@ -111,7 +113,7 @@ def replicate(im, labels):
|
||||
|
||||
|
||||
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
|
||||
# Resize and pad image while meeting stride-multiple constraints
|
||||
"""Resize and pad image while meeting stride-multiple constraints."""
|
||||
shape = im.shape[:2] # current shape [height, width]
|
||||
if isinstance(new_shape, int):
|
||||
new_shape = (new_shape, new_shape)
|
||||
@ -359,6 +361,7 @@ def classify_transforms(size=224):
|
||||
class LetterBox:
|
||||
# YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, size=(640, 640), auto=False, stride=32):
|
||||
"""Resizes and crops an image to a specified size for YOLOv5 preprocessing."""
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
self.auto = auto # pass max size integer, automatically solve for short side using stride
|
||||
@ -378,6 +381,7 @@ class LetterBox:
|
||||
class CenterCrop:
|
||||
# YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
|
||||
def __init__(self, size=640):
|
||||
"""Converts input image into tensor for YOLOv5 processing."""
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
|
||||
@ -391,6 +395,7 @@ class CenterCrop:
|
||||
class ToTensor:
|
||||
# YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, half=False):
|
||||
"""Initialize ToTensor class for YOLOv5 image preprocessing."""
|
||||
super().__init__()
|
||||
self.half = half
|
||||
|
||||
|
@ -162,14 +162,17 @@ class InfiniteDataLoader(dataloader.DataLoader):
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Dataloader that reuses workers for same syntax as vanilla DataLoader."""
|
||||
super().__init__(*args, **kwargs)
|
||||
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
|
||||
self.iterator = super().__iter__()
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the length of batch_sampler's sampler."""
|
||||
return len(self.batch_sampler.sampler)
|
||||
|
||||
def __iter__(self):
|
||||
"""Creates a sampler that infinitely repeats."""
|
||||
for _ in range(len(self)):
|
||||
yield next(self.iterator)
|
||||
|
||||
@ -182,9 +185,11 @@ class _RepeatSampler:
|
||||
"""
|
||||
|
||||
def __init__(self, sampler):
|
||||
"""Sampler that repeats dataset samples infinitely."""
|
||||
self.sampler = sampler
|
||||
|
||||
def __iter__(self):
|
||||
"""Infinite loop iterating over a given sampler."""
|
||||
while True:
|
||||
yield from iter(self.sampler)
|
||||
|
||||
@ -221,6 +226,7 @@ class LoadScreenshots:
|
||||
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterates over objects with the same structure as the monitor attribute."""
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
@ -241,6 +247,7 @@ class LoadScreenshots:
|
||||
class LoadImages:
|
||||
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
|
||||
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
"""Initialize instance variables and check for valid input."""
|
||||
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
|
||||
path = Path(path).read_text().rsplit()
|
||||
files = []
|
||||
@ -276,10 +283,12 @@ class LoadImages:
|
||||
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
|
||||
|
||||
def __iter__(self):
|
||||
"""Returns an iterator object for iterating over images or videos found in a directory."""
|
||||
self.count = 0
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Iterator's next item, performs transformation on image and returns path, transformed image, original image, capture and size."""
|
||||
if self.count == self.nf:
|
||||
raise StopIteration
|
||||
path = self.files[self.count]
|
||||
@ -338,12 +347,14 @@ class LoadImages:
|
||||
return im
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of files in the class instance."""
|
||||
return self.nf # number of files
|
||||
|
||||
|
||||
class LoadStreams:
|
||||
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
||||
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
"""Initialize YOLO detector with optional transforms and check input shapes."""
|
||||
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
|
||||
self.mode = 'stream'
|
||||
self.img_size = img_size
|
||||
@ -404,10 +415,12 @@ class LoadStreams:
|
||||
time.sleep(0.0) # wait time
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterator that returns the class instance."""
|
||||
self.count = -1
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
"""Return a tuple containing transformed and resized image data."""
|
||||
self.count += 1
|
||||
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
|
||||
cv2.destroyAllWindows()
|
||||
@ -424,6 +437,7 @@ class LoadStreams:
|
||||
return self.sources, im, im0, None, ''
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of sources as the length of the object."""
|
||||
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
|
||||
|
||||
|
||||
@ -607,6 +621,7 @@ class LoadImagesAndLabels(Dataset):
|
||||
return cache
|
||||
|
||||
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
|
||||
"""Cache labels and save as numpy file for next time."""
|
||||
# Cache dataset labels, check images and read shapes
|
||||
if path.exists():
|
||||
path.unlink() # remove *.cache file if exists
|
||||
@ -646,9 +661,11 @@ class LoadImagesAndLabels(Dataset):
|
||||
return x
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the length of 'im_files' attribute."""
|
||||
return len(self.im_files)
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""Get a sample and its corresponding label, filename and shape from the dataset."""
|
||||
index = self.indices[index] # linear, shuffled, or image_weights
|
||||
|
||||
hyp = self.hyp
|
||||
@ -1039,6 +1056,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
|
||||
"""
|
||||
|
||||
def __init__(self, root, augment, imgsz, cache=False):
|
||||
"""Initialize YOLO dataset with root, augmentation, image size, and cache parameters."""
|
||||
super().__init__(root=root)
|
||||
self.torch_transforms = classify_transforms(imgsz)
|
||||
self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
|
||||
@ -1047,6 +1065,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
|
||||
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
|
||||
|
||||
def __getitem__(self, i):
|
||||
"""Retrieves data items of 'dataset' via indices & creates InfiniteDataLoader."""
|
||||
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
|
||||
if self.cache_ram and im is None:
|
||||
im = self.samples[i][3] = cv2.imread(f)
|
||||
|
Reference in New Issue
Block a user