# Ultralytics YOLO 🚀, GPL-3.0 license import math import os import platform import random import time from contextlib import contextmanager from copy import deepcopy from pathlib import Path import numpy as np import thop import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.nn.parallel import DistributedDataParallel as DDP from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__ from ultralytics.yolo.utils.checks import check_version LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) TORCH_1_9 = check_version(torch.__version__, '1.9.0') TORCH_1_11 = check_version(torch.__version__, '1.11.0') TORCH_1_12 = check_version(torch.__version__, '1.12.0') @contextmanager def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something initialized = torch.distributed.is_available() and torch.distributed.is_initialized() if initialized and local_rank not in {-1, 0}: dist.barrier(device_ids=[local_rank]) yield if initialized and local_rank == 0: dist.barrier(device_ids=[0]) def smart_inference_mode(): # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator def decorate(fn): return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) return decorate def DDP_model(model): # Model DDP creation with checks assert not check_version(torch.__version__, '1.12.0', pinned=True), \ 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' if TORCH_1_11: return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) else: return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) def select_device(device='', batch=0, newline=False, verbose=True): # device = None or 'cpu' or 0 or '0' or '0,1,2,3' s = f'Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} ' device = str(device).lower() for remove in 'cuda:', 'none', '(', ')', '[', ']', "'", ' ': device = device.replace(remove, '') # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' cpu = device == 'cpu' mps = device == 'mps' # Apple Metal Performance Shaders (MPS) if cpu or mps: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested visible = os.environ.get('CUDA_VISIBLE_DEVICES', None) os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', ''))): LOGGER.info(s) install = 'See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no ' \ 'CUDA devices are seen by torch.\n' if torch.cuda.device_count() == 0 else '' raise ValueError(f"Invalid CUDA 'device={device}' requested." f" Use 'device=cpu' or pass valid CUDA device(s) if available," f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" f'\ntorch.cuda.is_available(): {torch.cuda.is_available()}' f'\ntorch.cuda.device_count(): {torch.cuda.device_count()}' f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" f'{install}') if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count raise ValueError(f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}.") space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB arg = 'cuda:0' elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available s += 'MPS\n' arg = 'mps' else: # revert to CPU s += 'CPU\n' arg = 'cpu' if verbose and RANK == -1: LOGGER.info(s if newline else s.rstrip()) return torch.device(arg) def time_sync(): # PyTorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() def fuse_conv_and_bn(conv, bn): # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, dilation=conv.dilation, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv def fuse_deconv_and_bn(deconv, bn): fuseddconv = nn.ConvTranspose2d(deconv.in_channels, deconv.out_channels, kernel_size=deconv.kernel_size, stride=deconv.stride, padding=deconv.padding, output_padding=deconv.output_padding, dilation=deconv.dilation, groups=deconv.groups, bias=True).requires_grad_(False).to(deconv.weight.device) # prepare filters w_deconv = deconv.weight.clone().view(deconv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) # Prepare spatial bias b_conv = torch.zeros(deconv.weight.size(1), device=deconv.weight.device) if deconv.bias is None else deconv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fuseddconv def model_info(model, detailed=False, verbose=True, imgsz=640): # Model information. imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320] if not verbose: return n_p = get_num_params(model) n_g = get_num_gradients(model) # number gradients if detailed: LOGGER.info( f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) flops = get_flops(model, imgsz) fused = ' (fused)' if model.is_fused() else '' fs = f', {flops:.1f} GFLOPs' if flops else '' m = Path(getattr(model, 'yaml_file', '') or model.yaml.get('yaml_file', '')).stem.replace('yolo', 'YOLO') or 'Model' LOGGER.info(f'{m} summary{fused}: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def get_num_params(model): return sum(x.numel() for x in model.parameters()) def get_num_gradients(model): return sum(x.numel() for x in model.parameters() if x.requires_grad) def get_flops(model, imgsz=640): try: model = de_parallel(model) p = next(model.parameters()) stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1E9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs return flops except Exception: return 0 def initialize_weights(model): for m in model.modules(): t = type(m) if t is nn.Conv2d: pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # Scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean def make_divisible(x, divisor): # Returns nearest x divisible by divisor if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith('_') or k in exclude: continue else: setattr(a, k, v) def get_latest_opset(): # Return max supported ONNX opset by this version of torch return max(int(k[14:]) for k in vars(torch.onnx) if 'symbolic_opset' in k) # opset def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} def is_parallel(model): # Returns True if model is of type DP or DDP return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) def de_parallel(model): # De-parallelize a model: returns single-GPU model if model is of type DP or DDP return model.module if is_parallel(model) else model def one_cycle(y1=0.0, y2=1.0, steps=100): # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 def init_seeds(seed=0, deterministic=False): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 if deterministic and TORCH_1_12: # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' os.environ['PYTHONHASHSEED'] = str(seed) class ModelEMA: """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage To disable EMA set the `enabled` attribute to `False`. """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) self.enabled = True def update(self, model): # Update EMA parameters if self.enabled: self.updates += 1 d = self.decay(self.updates) msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes if self.enabled: copy_attr(self.ema, model, include, exclude) def strip_optimizer(f='best.pt', s=''): """ Strip optimizer from 'f' to finalize training, optionally save as 's'. Usage: from ultralytics.yolo.utils.torch_utils import strip_optimizer from pathlib import Path for f in Path('/Users/glennjocher/Downloads/weights').glob('*.pt'): strip_optimizer(f) Args: f (str): file path to model to strip the optimizer from. Default is 'best.pt'. s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten. Returns: None """ x = torch.load(f, map_location=torch.device('cpu')) args = {**DEFAULT_CFG_DICT, **x['train_args']} # combine model args with default args, preferring model args if x.get('ema'): x['model'] = x['ema'] # replace model with ema for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): p.requires_grad = False x['train_args'] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys # x['model'].args = x['train_args'] torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") def profile(input, ops, n=10, device=None): """ YOLOv8 speed/memory/FLOPs profiler Usage: input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) m2 = nn.SiLU() profile(input, [m1, m2], n=100) # profile over 100 iterations """ results = [] if not isinstance(device, torch.device): device = select_device(device) LOGGER.info(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1E9 * 2 # GFLOPs except Exception: flops = 0 try: for _ in range(n): t[0] = time_sync() y = m(x) t[1] = time_sync() try: _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception: # no backward method # print(e) # for debug t[2] = float('nan') tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters LOGGER.info(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: LOGGER.info(e) results.append(None) torch.cuda.empty_cache() return results class EarlyStopping: """ Early stopping class that stops training when a specified number of epochs have passed without improvement. """ def __init__(self, patience=50): """ Initialize early stopping object Args: patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. """ self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): """ Check whether to stop training Args: epoch (int): Current epoch of training fitness (float): Fitness value of current epoch Returns: bool: True if training should stop, False otherwise """ if fitness is None: # check if fitness=None (happens when val=False) return False if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness delta = epoch - self.best_epoch # epochs without improvement self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' f'i.e. `patience=300` or use `patience=0` to disable EarlyStopping.') return stop