You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
178 lines
7.7 KiB
178 lines
7.7 KiB
2 years ago
|
import contextlib
|
||
|
import hashlib
|
||
|
import os
|
||
|
|
||
|
import cv2
|
||
|
import numpy as np
|
||
|
from PIL import ExifTags, Image, ImageOps
|
||
|
|
||
|
from ..utils.general import segments2boxes
|
||
|
|
||
|
HELP_URL = "See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data"
|
||
|
IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm" # include image suffixes
|
||
|
VID_FORMATS = "asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv" # include video suffixes
|
||
|
BAR_FORMAT = "{l_bar}{bar:10}{r_bar}{bar:-10b}" # tqdm bar format
|
||
|
LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||
|
RANK = int(os.getenv('RANK', -1))
|
||
|
PIN_MEMORY = str(os.getenv("PIN_MEMORY", True)).lower() == "true" # global pin_memory for dataloaders
|
||
|
IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean
|
||
|
IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation
|
||
|
|
||
|
# Get orientation exif tag
|
||
|
for orientation in ExifTags.TAGS.keys():
|
||
|
if ExifTags.TAGS[orientation] == "Orientation":
|
||
|
break
|
||
|
|
||
|
|
||
|
def img2label_paths(img_paths):
|
||
|
# Define label paths as a function of image paths
|
||
|
sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings
|
||
|
return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
|
||
|
|
||
|
|
||
|
def get_hash(paths):
|
||
|
# Returns a single hash value of a list of paths (files or dirs)
|
||
|
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
|
||
|
h = hashlib.md5(str(size).encode()) # hash sizes
|
||
|
h.update("".join(paths).encode()) # hash paths
|
||
|
return h.hexdigest() # return hash
|
||
|
|
||
|
|
||
|
def exif_size(img):
|
||
|
# Returns exif-corrected PIL size
|
||
|
s = img.size # (width, height)
|
||
|
with contextlib.suppress(Exception):
|
||
|
rotation = dict(img._getexif().items())[orientation]
|
||
|
if rotation in [6, 8]: # rotation 270 or 90
|
||
|
s = (s[1], s[0])
|
||
|
return s
|
||
|
|
||
|
|
||
|
def verify_image_label(args):
|
||
|
# Verify one image-label pair
|
||
|
im_file, lb_file, prefix, keypoint = args
|
||
|
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", None, None # number (missing, found, empty, corrupt), message, segments, keypoints
|
||
|
try:
|
||
|
# verify images
|
||
|
im = Image.open(im_file)
|
||
|
im.verify() # PIL verify
|
||
|
shape = exif_size(im) # image size
|
||
|
shape = (shape[1], shape[0]) # hw
|
||
|
assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels"
|
||
|
assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}"
|
||
|
if im.format.lower() in ("jpg", "jpeg"):
|
||
|
with open(im_file, "rb") as f:
|
||
|
f.seek(-2, 2)
|
||
|
if f.read() != b"\xff\xd9": # corrupt JPEG
|
||
|
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100)
|
||
|
msg = f"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved"
|
||
|
|
||
|
# verify labels
|
||
|
if os.path.isfile(lb_file):
|
||
|
nf = 1 # label found
|
||
|
with open(lb_file) as f:
|
||
|
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
||
|
if any(len(x) > 6 for x in lb) and (not keypoint): # is segment
|
||
|
classes = np.array([x[0] for x in lb], dtype=np.float32)
|
||
|
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
|
||
|
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
|
||
|
lb = np.array(lb, dtype=np.float32)
|
||
|
nl = len(lb)
|
||
|
if nl:
|
||
|
if keypoint:
|
||
|
assert lb.shape[1] == 56, "labels require 56 columns each"
|
||
|
assert (lb[:, 5::3] <= 1).all(), "non-normalized or out of bounds coordinate labels"
|
||
|
assert (lb[:, 6::3] <= 1).all(), "non-normalized or out of bounds coordinate labels"
|
||
|
kpts = np.zeros((lb.shape[0], 39))
|
||
|
for i in range(len(lb)):
|
||
|
kpt = np.delete(lb[i, 5:], np.arange(2, lb.shape[1] - 5,
|
||
|
3)) # remove the occlusion paramater from the GT
|
||
|
kpts[i] = np.hstack((lb[i, :5], kpt))
|
||
|
lb = kpts
|
||
|
assert lb.shape[1] == 39, "labels require 39 columns each after removing occlusion paramater"
|
||
|
else:
|
||
|
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
|
||
|
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
|
||
|
assert (lb[:, 1:] <=
|
||
|
1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"
|
||
|
_, i = np.unique(lb, axis=0, return_index=True)
|
||
|
if len(i) < nl: # duplicate row check
|
||
|
lb = lb[i] # remove duplicates
|
||
|
if segments:
|
||
|
segments = [segments[x] for x in i]
|
||
|
msg = f"{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed"
|
||
|
else:
|
||
|
ne = 1 # label empty
|
||
|
lb = np.zeros((0, 39), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32)
|
||
|
else:
|
||
|
nm = 1 # label missing
|
||
|
lb = np.zeros((0, 39), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32)
|
||
|
if keypoint:
|
||
|
keypoints = lb[:, 5:].reshape(-1, 17, 2)
|
||
|
lb = lb[:, :5]
|
||
|
return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg
|
||
|
except Exception as e:
|
||
|
nc = 1
|
||
|
msg = f"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}"
|
||
|
return [None, None, None, None, None, nm, nf, ne, nc, msg]
|
||
|
|
||
|
|
||
|
def polygon2mask(img_size, polygons, color=1, downsample_ratio=1):
|
||
|
"""
|
||
|
Args:
|
||
|
img_size (tuple): The image size.
|
||
|
polygons (np.ndarray): [N, M], N is the number of polygons,
|
||
|
M is the number of points(Be divided by 2).
|
||
|
"""
|
||
|
mask = np.zeros(img_size, dtype=np.uint8)
|
||
|
polygons = np.asarray(polygons)
|
||
|
polygons = polygons.astype(np.int32)
|
||
|
shape = polygons.shape
|
||
|
polygons = polygons.reshape(shape[0], -1, 2)
|
||
|
cv2.fillPoly(mask, polygons, color=color)
|
||
|
nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio)
|
||
|
# NOTE: fillPoly firstly then resize is trying the keep the same way
|
||
|
# of loss calculation when mask-ratio=1.
|
||
|
mask = cv2.resize(mask, (nw, nh))
|
||
|
return mask
|
||
|
|
||
|
|
||
|
def polygons2masks(img_size, polygons, color, downsample_ratio=1):
|
||
|
"""
|
||
|
Args:
|
||
|
img_size (tuple): The image size.
|
||
|
polygons (list[np.ndarray]): each polygon is [N, M],
|
||
|
N is the number of polygons,
|
||
|
M is the number of points(Be divided by 2).
|
||
|
"""
|
||
|
masks = []
|
||
|
for si in range(len(polygons)):
|
||
|
mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio)
|
||
|
masks.append(mask)
|
||
|
return np.array(masks)
|
||
|
|
||
|
|
||
|
def polygons2masks_overlap(img_size, segments, downsample_ratio=1):
|
||
|
"""Return a (640, 640) overlap mask."""
|
||
|
masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio),
|
||
|
dtype=np.int32 if len(segments) > 255 else np.uint8)
|
||
|
areas = []
|
||
|
ms = []
|
||
|
for si in range(len(segments)):
|
||
|
mask = polygon2mask(
|
||
|
img_size,
|
||
|
[segments[si].reshape(-1)],
|
||
|
downsample_ratio=downsample_ratio,
|
||
|
color=1,
|
||
|
)
|
||
|
ms.append(mask)
|
||
|
areas.append(mask.sum())
|
||
|
areas = np.asarray(areas)
|
||
|
index = np.argsort(-areas)
|
||
|
ms = np.array(ms)[index]
|
||
|
for i in range(len(segments)):
|
||
|
mask = ms[i] * (i + 1)
|
||
|
masks = masks + mask
|
||
|
masks = np.clip(masks, a_min=0, a_max=i + 1)
|
||
|
return masks, index
|