Update README.md (#272)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
__version__ = "8.0.3"
|
||||
__version__ = "8.0.4"
|
||||
|
||||
from ultralytics.hub import checks
|
||||
from ultralytics.yolo.engine.model import YOLO
|
||||
|
@ -197,7 +197,7 @@ class AutoBackend(nn.Module):
|
||||
input_details = interpreter.get_input_details() # inputs
|
||||
output_details = interpreter.get_output_details() # outputs
|
||||
elif tfjs: # TF.js
|
||||
raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
|
||||
raise NotImplementedError('ERROR: YOLOv8 TF.js inference is not supported')
|
||||
elif paddle: # PaddlePaddle
|
||||
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
|
||||
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
|
||||
|
@ -366,7 +366,7 @@ class Concat(nn.Module):
|
||||
|
||||
|
||||
class AutoShape(nn.Module):
|
||||
# YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
||||
# YOLOv8 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
||||
conf = 0.25 # NMS confidence threshold
|
||||
iou = 0.45 # NMS IoU threshold
|
||||
agnostic = False # NMS class-agnostic
|
||||
@ -465,7 +465,7 @@ class AutoShape(nn.Module):
|
||||
|
||||
|
||||
class Detections:
|
||||
# YOLOv5 detections class for inference results
|
||||
# YOLOv8 detections class for inference results
|
||||
def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
|
||||
super().__init__()
|
||||
d = pred[0].device # device
|
||||
@ -572,7 +572,7 @@ class Detections:
|
||||
return self._run(pprint=True) # print results
|
||||
|
||||
def __repr__(self):
|
||||
return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
|
||||
return f'YOLOv8 {self.__class__} instance\n' + self.__str__()
|
||||
|
||||
|
||||
class Proto(nn.Module):
|
||||
@ -603,7 +603,7 @@ class Ensemble(nn.ModuleList):
|
||||
|
||||
# heads
|
||||
class Detect(nn.Module):
|
||||
# YOLOv5 Detect head for detection models
|
||||
# YOLOv8 Detect head for detection models
|
||||
dynamic = False # force grid reconstruction
|
||||
export = False # export mode
|
||||
shape = None
|
||||
@ -650,7 +650,7 @@ class Detect(nn.Module):
|
||||
|
||||
|
||||
class Segment(Detect):
|
||||
# YOLOv5 Segment head for segmentation models
|
||||
# YOLOv8 Segment head for segmentation models
|
||||
def __init__(self, nc=80, nm=32, npr=256, ch=()):
|
||||
super().__init__(nc, ch)
|
||||
self.nm = nm # number of masks
|
||||
@ -673,7 +673,7 @@ class Segment(Detect):
|
||||
|
||||
|
||||
class Classify(nn.Module):
|
||||
# YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
||||
# YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2)
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super().__init__()
|
||||
c_ = 1280 # efficientnet_b0 size
|
||||
|
@ -142,7 +142,7 @@ class BaseModel(nn.Module):
|
||||
|
||||
|
||||
class DetectionModel(BaseModel):
|
||||
# YOLOv5 detection model
|
||||
# YOLOv8 detection model
|
||||
def __init__(self, cfg='yolov8n.yaml', ch=3, nc=None, verbose=True): # model, input channels, number of classes
|
||||
super().__init__()
|
||||
self.yaml = cfg if isinstance(cfg, dict) else yaml_load(check_yaml(cfg), append_filename=True) # cfg dict
|
||||
@ -222,13 +222,13 @@ class DetectionModel(BaseModel):
|
||||
|
||||
|
||||
class SegmentationModel(DetectionModel):
|
||||
# YOLOv5 segmentation model
|
||||
# YOLOv8 segmentation model
|
||||
def __init__(self, cfg='yolov8n-seg.yaml', ch=3, nc=None, verbose=True):
|
||||
super().__init__(cfg, ch, nc, verbose)
|
||||
|
||||
|
||||
class ClassificationModel(BaseModel):
|
||||
# YOLOv5 classification model
|
||||
# YOLOv8 classification model
|
||||
def __init__(self,
|
||||
cfg=None,
|
||||
model=None,
|
||||
|
@ -531,7 +531,7 @@ class CopyPaste:
|
||||
|
||||
|
||||
class Albumentations:
|
||||
# YOLOv5 Albumentations class (optional, only used if package is installed)
|
||||
# YOLOv8 Albumentations class (optional, only used if package is installed)
|
||||
def __init__(self, p=1.0):
|
||||
self.p = p
|
||||
self.transform = None
|
||||
@ -699,7 +699,7 @@ def classify_albumentations(
|
||||
std=IMAGENET_STD,
|
||||
auto_aug=False,
|
||||
):
|
||||
# YOLOv5 classification Albumentations (optional, only used if package is installed)
|
||||
# YOLOv8 classification Albumentations (optional, only used if package is installed)
|
||||
prefix = colorstr("albumentations: ")
|
||||
try:
|
||||
import albumentations as A
|
||||
@ -732,7 +732,7 @@ def classify_albumentations(
|
||||
|
||||
|
||||
class ClassifyLetterBox:
|
||||
# YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
# YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, size=(640, 640), auto=False, stride=32):
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
@ -751,7 +751,7 @@ class ClassifyLetterBox:
|
||||
|
||||
|
||||
class CenterCrop:
|
||||
# YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
|
||||
# YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()])
|
||||
def __init__(self, size=640):
|
||||
super().__init__()
|
||||
self.h, self.w = (size, size) if isinstance(size, int) else size
|
||||
@ -764,7 +764,7 @@ class CenterCrop:
|
||||
|
||||
|
||||
class ToTensor:
|
||||
# YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
# YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()])
|
||||
def __init__(self, half=False):
|
||||
super().__init__()
|
||||
self.half = half
|
||||
|
@ -19,7 +19,7 @@ from ultralytics.yolo.utils.checks import check_requirements
|
||||
|
||||
|
||||
class LoadStreams:
|
||||
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
||||
# YOLOv8 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
|
||||
def __init__(self, sources='file.streams', imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
|
||||
self.mode = 'stream'
|
||||
@ -105,7 +105,7 @@ class LoadStreams:
|
||||
|
||||
|
||||
class LoadScreenshots:
|
||||
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
|
||||
# YOLOv8 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
|
||||
def __init__(self, source, imgsz=640, stride=32, auto=True, transforms=None):
|
||||
# source = [screen_number left top width height] (pixels)
|
||||
check_requirements('mss')
|
||||
@ -154,7 +154,7 @@ class LoadScreenshots:
|
||||
|
||||
|
||||
class LoadImages:
|
||||
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
|
||||
# YOLOv8 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
|
||||
def __init__(self, path, imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1):
|
||||
if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line
|
||||
path = Path(path).read_text().rsplit()
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
|
||||
Format | `format=argument` | Model
|
||||
--- | --- | ---
|
||||
@ -81,7 +81,7 @@ MACOS = platform.system() == 'Darwin' # macOS environment
|
||||
|
||||
|
||||
def export_formats():
|
||||
# YOLOv5 export formats
|
||||
# YOLOv8 export formats
|
||||
x = [
|
||||
['PyTorch', '-', '.pt', True, True],
|
||||
['TorchScript', 'torchscript', '.torchscript', True, True],
|
||||
@ -99,7 +99,7 @@ def export_formats():
|
||||
|
||||
|
||||
def try_export(inner_func):
|
||||
# YOLOv5 export decorator, i..e @try_export
|
||||
# YOLOv8 export decorator, i..e @try_export
|
||||
inner_args = get_default_args(inner_func)
|
||||
|
||||
def outer_func(*args, **kwargs):
|
||||
|
@ -24,11 +24,11 @@ ROOT = FILE.parents[2] # YOLO
|
||||
DEFAULT_CONFIG = ROOT / "yolo/configs/default.yaml"
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads
|
||||
AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
|
||||
AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode
|
||||
FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf
|
||||
VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode
|
||||
VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode
|
||||
TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
|
||||
LOGGING_NAME = 'yolov5'
|
||||
LOGGING_NAME = 'ultralytics'
|
||||
HELP_MSG = \
|
||||
"""
|
||||
Usage examples for running YOLOv8:
|
||||
@ -288,7 +288,7 @@ def set_logging(name=LOGGING_NAME, verbose=True):
|
||||
|
||||
|
||||
class TryExcept(contextlib.ContextDecorator):
|
||||
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
||||
# YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager
|
||||
def __init__(self, msg=''):
|
||||
self.msg = msg
|
||||
|
||||
|
@ -92,7 +92,7 @@ def check_version(current: str = "0.0.0",
|
||||
from pkg_resources import parse_version
|
||||
current, minimum = (parse_version(x) for x in (current, minimum))
|
||||
result = (current == minimum) if pinned else (current >= minimum) # bool
|
||||
warning_message = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed"
|
||||
warning_message = f"WARNING ⚠️ {name}{minimum} is required by YOLOv8, but {name}{current} is currently installed"
|
||||
if hard:
|
||||
assert result, emojis(warning_message) # assert min requirements met
|
||||
if verbose and not result:
|
||||
@ -176,7 +176,7 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=()
|
||||
n += 1
|
||||
|
||||
if s and install and AUTOINSTALL: # check environment variable
|
||||
LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
|
||||
LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...")
|
||||
try:
|
||||
assert check_online(), "AutoUpdate skipped (offline)"
|
||||
LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())
|
||||
|
@ -15,7 +15,7 @@ from .metrics import box_iou
|
||||
|
||||
|
||||
class Profile(contextlib.ContextDecorator):
|
||||
# YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
|
||||
# YOLOv8 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
|
||||
def __init__(self, t=0.0):
|
||||
self.t = t
|
||||
self.cuda = torch.cuda.is_available()
|
||||
@ -139,7 +139,7 @@ def non_max_suppression(
|
||||
# Checks
|
||||
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
|
||||
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
|
||||
if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
|
||||
if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out)
|
||||
prediction = prediction[0] # select only inference output
|
||||
|
||||
device = prediction.device
|
||||
|
@ -41,7 +41,7 @@ colors = Colors() # create instance for 'from utils.plots import colors'
|
||||
|
||||
|
||||
class Annotator:
|
||||
# YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
||||
# YOLOv8 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
|
||||
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
|
||||
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
|
||||
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
|
||||
|
@ -319,7 +319,7 @@ def guess_task_from_head(head):
|
||||
|
||||
|
||||
def profile(input, ops, n=10, device=None):
|
||||
""" YOLOv5 speed/memory/FLOPs profiler
|
||||
""" YOLOv8 speed/memory/FLOPs profiler
|
||||
Usage:
|
||||
input = torch.randn(16, 3, 640, 640)
|
||||
m1 = lambda x: x * torch.sigmoid(x)
|
||||
|
Reference in New Issue
Block a user