|
|
@ -47,6 +47,17 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def segment2box(segment, width=640, height=640):
|
|
|
|
def segment2box(segment, width=640, height=640):
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
> Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to
|
|
|
|
|
|
|
|
(xyxy)
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
segment: the segment label
|
|
|
|
|
|
|
|
width: the width of the image. Defaults to 640
|
|
|
|
|
|
|
|
height: The height of the image. Defaults to 640
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the minimum and maximum x and y values of the segment.
|
|
|
|
|
|
|
|
"""
|
|
|
|
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
|
|
|
|
# Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
|
|
|
|
x, y = segment.T # segment xy
|
|
|
|
x, y = segment.T # segment xy
|
|
|
|
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
|
|
|
|
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
|
|
|
@ -55,7 +66,18 @@ def segment2box(segment, width=640, height=640):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
|
|
|
|
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
|
|
|
|
# Rescale boxes (xyxy) from img1_shape to img0_shape
|
|
|
|
"""
|
|
|
|
|
|
|
|
> Rescale boxes (xyxy) from img1_shape to img0_shape
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
img1_shape: The shape of the image that the bounding boxes are for.
|
|
|
|
|
|
|
|
boxes: the bounding boxes of the objects in the image
|
|
|
|
|
|
|
|
img0_shape: the shape of the original image
|
|
|
|
|
|
|
|
ratio_pad: a tuple of (ratio, pad)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
The boxes are being returned.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
#
|
|
|
|
if ratio_pad is None: # calculate from img0_shape
|
|
|
|
if ratio_pad is None: # calculate from img0_shape
|
|
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
|
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
|
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
|
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
|
|
@ -70,18 +92,6 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):
|
|
|
|
return boxes
|
|
|
|
return boxes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clip_boxes(boxes, shape):
|
|
|
|
|
|
|
|
# Clip boxes (xyxy) to image shape (height, width)
|
|
|
|
|
|
|
|
if isinstance(boxes, torch.Tensor): # faster individually
|
|
|
|
|
|
|
|
boxes[..., 0].clamp_(0, shape[1]) # x1
|
|
|
|
|
|
|
|
boxes[..., 1].clamp_(0, shape[0]) # y1
|
|
|
|
|
|
|
|
boxes[..., 2].clamp_(0, shape[1]) # x2
|
|
|
|
|
|
|
|
boxes[..., 3].clamp_(0, shape[0]) # y2
|
|
|
|
|
|
|
|
else: # np.array (faster grouped)
|
|
|
|
|
|
|
|
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
|
|
|
|
|
|
|
|
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def make_divisible(x, divisor):
|
|
|
|
def make_divisible(x, divisor):
|
|
|
|
# Returns nearest x divisible by divisor
|
|
|
|
# Returns nearest x divisible by divisor
|
|
|
|
if isinstance(divisor, torch.Tensor):
|
|
|
|
if isinstance(divisor, torch.Tensor):
|
|
|
@ -101,7 +111,7 @@ def non_max_suppression(
|
|
|
|
nm=0, # number of masks
|
|
|
|
nm=0, # number of masks
|
|
|
|
):
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box.
|
|
|
|
> Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box.
|
|
|
|
|
|
|
|
|
|
|
|
Arguments:
|
|
|
|
Arguments:
|
|
|
|
prediction (torch.Tensor): A tensor of shape (batch_size, num_boxes, num_classes + 4 + num_masks)
|
|
|
|
prediction (torch.Tensor): A tensor of shape (batch_size, num_boxes, num_classes + 4 + num_masks)
|
|
|
@ -217,6 +227,25 @@ def non_max_suppression(
|
|
|
|
return output
|
|
|
|
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clip_boxes(boxes, shape):
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the
|
|
|
|
|
|
|
|
shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
boxes: the bounding boxes to clip
|
|
|
|
|
|
|
|
shape: the shape of the image
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(boxes, torch.Tensor): # faster individually
|
|
|
|
|
|
|
|
boxes[..., 0].clamp_(0, shape[1]) # x1
|
|
|
|
|
|
|
|
boxes[..., 1].clamp_(0, shape[0]) # y1
|
|
|
|
|
|
|
|
boxes[..., 2].clamp_(0, shape[1]) # x2
|
|
|
|
|
|
|
|
boxes[..., 3].clamp_(0, shape[0]) # y2
|
|
|
|
|
|
|
|
else: # np.array (faster grouped)
|
|
|
|
|
|
|
|
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
|
|
|
|
|
|
|
|
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clip_coords(boxes, shape):
|
|
|
|
def clip_coords(boxes, shape):
|
|
|
|
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
|
|
|
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
|
|
|
if isinstance(boxes, torch.Tensor): # faster individually
|
|
|
|
if isinstance(boxes, torch.Tensor): # faster individually
|
|
|
@ -231,9 +260,16 @@ def clip_coords(boxes, shape):
|
|
|
|
|
|
|
|
|
|
|
|
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
|
|
|
|
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
img1_shape: model input shape, [h, w]
|
|
|
|
> It takes a mask, and resizes it to the original image size
|
|
|
|
img0_shape: origin pic shape, [h, w, 3]
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
im1_shape: model input shape, [h, w]
|
|
|
|
masks: [h, w, num]
|
|
|
|
masks: [h, w, num]
|
|
|
|
|
|
|
|
im0_shape: the original image shape
|
|
|
|
|
|
|
|
ratio_pad: the ratio of the padding to the original image.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
The masks are being returned.
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
# Rescale coordinates (xyxy) from im1_shape to im0_shape
|
|
|
|
# Rescale coordinates (xyxy) from im1_shape to im0_shape
|
|
|
|
if ratio_pad is None: # calculate from im0_shape
|
|
|
|
if ratio_pad is None: # calculate from im0_shape
|
|
|
@ -258,7 +294,16 @@ def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xyxy2xywh(x):
|
|
|
|
def xyxy2xywh(x):
|
|
|
|
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes a list of bounding boxes, and converts them from the format [x1, y1, x2, y2] to [x, y, w,
|
|
|
|
|
|
|
|
h] where xy1=top-left, xy2=bottom-right
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the input tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the center of the box, the width and the height of the box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
|
|
|
|
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
|
|
|
|
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
|
|
|
|
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
|
|
|
@ -268,7 +313,15 @@ def xyxy2xywh(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xywh2xyxy(x):
|
|
|
|
def xywh2xyxy(x):
|
|
|
|
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts the bounding box from x,y,w,h to x1,y1,x2,y2 where xy1=top-left, xy2=bottom-right
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the input tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the top left and bottom right coordinates of the bounding box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
|
|
|
|
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
|
|
|
|
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
|
|
|
|
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
|
|
|
@ -278,7 +331,19 @@ def xywh2xyxy(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
# Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts the normalized coordinates to the actual coordinates [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the bounding box coordinates
|
|
|
|
|
|
|
|
w: width of the image. Defaults to 640
|
|
|
|
|
|
|
|
h: height of the image. Defaults to 640
|
|
|
|
|
|
|
|
padw: padding width. Defaults to 0
|
|
|
|
|
|
|
|
padh: height of the padding. Defaults to 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the xyxy coordinates of the bounding box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
|
|
|
|
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
|
|
|
|
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
|
|
|
|
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
|
|
|
@ -288,7 +353,20 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
|
|
|
|
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
|
|
|
|
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes in a list of bounding boxes, and returns a list of bounding boxes, but with the x and y
|
|
|
|
|
|
|
|
coordinates normalized to the width and height of the image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the bounding box coordinates
|
|
|
|
|
|
|
|
w: width of the image. Defaults to 640
|
|
|
|
|
|
|
|
h: height of the image. Defaults to 640
|
|
|
|
|
|
|
|
clip: If True, the boxes will be clipped to the image boundaries. Defaults to False
|
|
|
|
|
|
|
|
eps: the minimum value of the box's width and height.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the xywhn format of the bounding boxes.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if clip:
|
|
|
|
if clip:
|
|
|
|
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
|
|
|
|
clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
@ -300,7 +378,19 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
def xyn2xy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
# Convert normalized segments into pixel segments, shape (n,2)
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts normalized segments into pixel segments of shape (n,2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the normalized coordinates of the bounding box
|
|
|
|
|
|
|
|
w: width of the image. Defaults to 640
|
|
|
|
|
|
|
|
h: height of the image. Defaults to 640
|
|
|
|
|
|
|
|
padw: padding width. Defaults to 0
|
|
|
|
|
|
|
|
padh: padding height. Defaults to 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the x and y coordinates of the top left corner of the bounding box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[..., 0] = w * x[..., 0] + padw # top left x
|
|
|
|
y[..., 0] = w * x[..., 0] + padw # top left x
|
|
|
|
y[..., 1] = h * x[..., 1] + padh # top left y
|
|
|
|
y[..., 1] = h * x[..., 1] + padh # top left y
|
|
|
@ -308,7 +398,15 @@ def xyn2xy(x, w=640, h=640, padw=0, padh=0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xywh2ltwh(x):
|
|
|
|
def xywh2ltwh(x):
|
|
|
|
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, w, h] where xy1=top-left
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts the bounding box from [x, y, w, h] to [x1, y1, w, h] where xy1=top-left
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the x coordinate of the center of the bounding box
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the top left x and y coordinates of the bounding box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
|
|
|
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
|
|
|
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
|
|
|
|
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
|
|
|
@ -316,7 +414,15 @@ def xywh2ltwh(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def xyxy2ltwh(x):
|
|
|
|
def xyxy2ltwh(x):
|
|
|
|
# Convert nx4 boxes from [x1, y1, x2, y2] to [x1, y1, w, h] where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> Convert nx4 boxes from [x1, y1, x2, y2] to [x1, y1, w, h] where xy1=top-left, xy2=bottom-right
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the input tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the xyxy2ltwh function.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[:, 2] = x[:, 2] - x[:, 0] # width
|
|
|
|
y[:, 2] = x[:, 2] - x[:, 0] # width
|
|
|
|
y[:, 3] = x[:, 3] - x[:, 1] # height
|
|
|
|
y[:, 3] = x[:, 3] - x[:, 1] # height
|
|
|
@ -324,7 +430,12 @@ def xyxy2ltwh(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ltwh2xywh(x):
|
|
|
|
def ltwh2xywh(x):
|
|
|
|
# Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center
|
|
|
|
"""
|
|
|
|
|
|
|
|
> Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the input tensor
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x
|
|
|
|
y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x
|
|
|
|
y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y
|
|
|
|
y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y
|
|
|
@ -332,7 +443,16 @@ def ltwh2xywh(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ltwh2xyxy(x):
|
|
|
|
def ltwh2xyxy(x):
|
|
|
|
# Convert nx4 boxes from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left,
|
|
|
|
|
|
|
|
xy2=bottom-right
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
x: the input image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the xyxy coordinates of the bounding boxes.
|
|
|
|
|
|
|
|
"""
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
|
|
y[:, 2] = x[:, 2] + x[:, 0] # width
|
|
|
|
y[:, 2] = x[:, 2] + x[:, 0] # width
|
|
|
|
y[:, 3] = x[:, 3] + x[:, 1] # height
|
|
|
|
y[:, 3] = x[:, 3] + x[:, 1] # height
|
|
|
@ -340,7 +460,16 @@ def ltwh2xyxy(x):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def segments2boxes(segments):
|
|
|
|
def segments2boxes(segments):
|
|
|
|
# Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
segments: list of segments, each segment is a list of points, each point is a list of x, y
|
|
|
|
|
|
|
|
coordinates
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the xywh coordinates of the bounding boxes.
|
|
|
|
|
|
|
|
"""
|
|
|
|
boxes = []
|
|
|
|
boxes = []
|
|
|
|
for s in segments:
|
|
|
|
for s in segments:
|
|
|
|
x, y = s.T # segment xy
|
|
|
|
x, y = s.T # segment xy
|
|
|
@ -349,7 +478,17 @@ def segments2boxes(segments):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def resample_segments(segments, n=1000):
|
|
|
|
def resample_segments(segments, n=1000):
|
|
|
|
# Up-sample an (n,2) segment
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes a list of segments (n,2) and returns a list of segments (n,2) where each segment has been
|
|
|
|
|
|
|
|
up-sampled to n points
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
segments: a list of (n,2) arrays, where n is the number of points in the segment.
|
|
|
|
|
|
|
|
n: number of points to resample the segment to. Defaults to 1000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the resampled segments.
|
|
|
|
|
|
|
|
"""
|
|
|
|
for i, s in enumerate(segments):
|
|
|
|
for i, s in enumerate(segments):
|
|
|
|
s = np.concatenate((s, s[0:1, :]), axis=0)
|
|
|
|
s = np.concatenate((s, s[0:1, :]), axis=0)
|
|
|
|
x = np.linspace(0, len(s) - 1, n)
|
|
|
|
x = np.linspace(0, len(s) - 1, n)
|
|
|
@ -360,13 +499,15 @@ def resample_segments(segments, n=1000):
|
|
|
|
|
|
|
|
|
|
|
|
def crop_mask(masks, boxes):
|
|
|
|
def crop_mask(masks, boxes):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
"Crop" predicted masks by zeroing out everything not in the predicted bbox.
|
|
|
|
> It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box
|
|
|
|
Vectorized by Chong (thanks Chong).
|
|
|
|
|
|
|
|
Args:
|
|
|
|
Args:
|
|
|
|
- masks should be a size [h, w, n] tensor of masks
|
|
|
|
masks: [h, w, n] tensor of masks
|
|
|
|
- boxes should be a size [n, 4] tensor of bbox coords in relative point form
|
|
|
|
boxes: [n, 4] tensor of bbox coords in relative point form
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
The masks are being cropped to the bounding box.
|
|
|
|
|
|
|
|
"""
|
|
|
|
n, h, w = masks.shape
|
|
|
|
n, h, w = masks.shape
|
|
|
|
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
|
|
|
|
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n)
|
|
|
|
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
|
|
|
|
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1)
|
|
|
@ -377,14 +518,18 @@ def crop_mask(masks, boxes):
|
|
|
|
|
|
|
|
|
|
|
|
def process_mask_upsample(protos, masks_in, bboxes, shape):
|
|
|
|
def process_mask_upsample(protos, masks_in, bboxes, shape):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Crop after upsample.
|
|
|
|
> It takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher
|
|
|
|
proto_out: [mask_dim, mask_h, mask_w]
|
|
|
|
quality but is slower.
|
|
|
|
out_masks: [n, mask_dim], n is number of masks after nms
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
protos: [mask_dim, mask_h, mask_w]
|
|
|
|
|
|
|
|
masks_in: [n, mask_dim], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
shape:input_image_size, (h, w)
|
|
|
|
shape: the size of the input image
|
|
|
|
return: h, w, n
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
mask
|
|
|
|
|
|
|
|
"""
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
|
|
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
|
|
|
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
|
|
|
|
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW
|
|
|
@ -394,12 +539,17 @@ def process_mask_upsample(protos, masks_in, bboxes, shape):
|
|
|
|
|
|
|
|
|
|
|
|
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
|
|
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Crop before upsample.
|
|
|
|
> It takes the output of the mask head, and applies the mask to the bounding boxes. This is faster but produces
|
|
|
|
proto_out: [mask_dim, mask_h, mask_w]
|
|
|
|
downsampled quality of mask
|
|
|
|
out_masks: [n, mask_dim], n is number of masks after nms
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
protos: [mask_dim, mask_h, mask_w]
|
|
|
|
|
|
|
|
masks_in: [n, mask_dim], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
shape:input_image_size, (h, w)
|
|
|
|
shape: the size of the input image
|
|
|
|
return: h, w, n
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
mask
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
@ -420,12 +570,16 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
|
|
|
|
|
|
|
|
|
|
def process_mask_native(protos, masks_in, bboxes, shape):
|
|
|
|
def process_mask_native(protos, masks_in, bboxes, shape):
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
Crop after upsample.
|
|
|
|
> It takes the output of the mask head, and crops it after upsampling to the bounding boxes.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
protos: [mask_dim, mask_h, mask_w]
|
|
|
|
protos: [mask_dim, mask_h, mask_w]
|
|
|
|
masks_in: [n, mask_dim], n is number of masks after nms
|
|
|
|
masks_in: [n, mask_dim], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
bboxes: [n, 4], n is number of masks after nms
|
|
|
|
shape: input_image_size, (h, w)
|
|
|
|
shape: input_image_size, (h, w)
|
|
|
|
return: h, w, n
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
masks: [h, w, n]
|
|
|
|
"""
|
|
|
|
"""
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
|
c, mh, mw = protos.shape # CHW
|
|
|
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
|
|
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)
|
|
|
@ -441,7 +595,19 @@ def process_mask_native(protos, masks_in, bboxes, shape):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
|
|
|
|
def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):
|
|
|
|
# Rescale coords (xyxy) from img1_shape to img0_shape
|
|
|
|
"""
|
|
|
|
|
|
|
|
> Rescale segment coords (xyxy) from img1_shape to img0_shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
img1_shape: The shape of the image that the segments are from.
|
|
|
|
|
|
|
|
segments: the segments to be scaled
|
|
|
|
|
|
|
|
img0_shape: the shape of the image that the segmentation is being applied to
|
|
|
|
|
|
|
|
ratio_pad: the ratio of the image size to the padded image size.
|
|
|
|
|
|
|
|
normalize: If True, the coordinates will be normalized to the range [0, 1]. Defaults to False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
the segmented image.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if ratio_pad is None: # calculate from img0_shape
|
|
|
|
if ratio_pad is None: # calculate from img0_shape
|
|
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
|
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
|
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
|
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
|
|
@ -460,7 +626,16 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def masks2segments(masks, strategy='largest'):
|
|
|
|
def masks2segments(masks, strategy='largest'):
|
|
|
|
# Convert masks(n,160,160) into segments(n,xy)
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes a list of masks(n,h,w) and returns a list of segments(n,xy)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
masks: the output of the model, which is a tensor of shape (batch_size, 160, 160)
|
|
|
|
|
|
|
|
strategy: 'concat' or 'largest'. Defaults to largest
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
segments (List): list of segment masks
|
|
|
|
|
|
|
|
"""
|
|
|
|
segments = []
|
|
|
|
segments = []
|
|
|
|
for x in masks.int().cpu().numpy().astype('uint8'):
|
|
|
|
for x in masks.int().cpu().numpy().astype('uint8'):
|
|
|
|
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
|
|
|
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
|
|
@ -476,7 +651,14 @@ def masks2segments(masks, strategy='largest'):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clip_segments(segments, shape):
|
|
|
|
def clip_segments(segments, shape):
|
|
|
|
# Clip segments (xy1,xy2,...) to image shape (height, width)
|
|
|
|
"""
|
|
|
|
|
|
|
|
> It takes a list of line segments (x1,y1,x2,y2) and clips them to the image shape (height, width)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
|
|
|
segments: a list of segments, each segment is a list of points, each point is a list of x,y
|
|
|
|
|
|
|
|
coordinates
|
|
|
|
|
|
|
|
shape: the shape of the image
|
|
|
|
|
|
|
|
"""
|
|
|
|
if isinstance(segments, torch.Tensor): # faster individually
|
|
|
|
if isinstance(segments, torch.Tensor): # faster individually
|
|
|
|
segments[:, 0].clamp_(0, shape[1]) # x
|
|
|
|
segments[:, 0].clamp_(0, shape[1]) # x
|
|
|
|
segments[:, 1].clamp_(0, shape[0]) # y
|
|
|
|
segments[:, 1].clamp_(0, shape[0]) # y
|
|
|
|