ultralytics 8.0.87 improved Pose models (#2202)

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
Co-authored-by: Kirolos Atef <keroatef295@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2023-04-24 16:04:40 +02:00
committed by GitHub
parent 3d60347755
commit efc941aa81
14 changed files with 150 additions and 56 deletions

View File

@ -7,7 +7,7 @@ from pathlib import Path
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader, dataloader, distributed
from torch.utils.data import dataloader, distributed
from ultralytics.yolo.data.dataloaders.stream_loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots,
LoadStreams, LoadTensor, SourceTypes, autocast_list)
@ -38,6 +38,12 @@ class InfiniteDataLoader(dataloader.DataLoader):
for _ in range(len(self)):
yield next(self.iterator)
def reset(self):
"""Reset iterator.
This is useful when we want to modify settings of dataset while training.
"""
self.iterator = self._get_iterator()
class _RepeatSampler:
"""
@ -94,20 +100,17 @@ def build_dataloader(cfg, batch, img_path, data_info, stride=32, rect=False, ran
workers = cfg.workers if mode == 'train' else cfg.workers * 2
nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if cfg.image_weights or cfg.close_mosaic else InfiniteDataLoader # allow attribute updates
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + RANK)
return loader(
dataset=dataset,
batch_size=batch,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=getattr(dataset, 'collate_fn', None),
worker_init_fn=seed_worker,
persistent_workers=(nw > 0) and (loader == DataLoader), # persist workers if using default PyTorch DataLoader
generator=generator), dataset
return InfiniteDataLoader(dataset=dataset,
batch_size=batch,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=getattr(dataset, 'collate_fn', None),
worker_init_fn=seed_worker,
generator=generator), dataset
# Build classification