Update .pre-commit-config.yaml (#1026)

This commit is contained in:
Glenn Jocher
2023-02-17 22:26:40 +01:00
committed by GitHub
parent 9047d737f4
commit edd3ff1669
76 changed files with 928 additions and 935 deletions

View File

@ -4,4 +4,4 @@ from .predict import SegmentationPredictor, predict
from .train import SegmentationTrainer, train
from .val import SegmentationValidator, val
__all__ = ["SegmentationPredictor", "predict", "SegmentationTrainer", "train", "SegmentationValidator", "val"]
__all__ = ['SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val']

View File

@ -39,7 +39,7 @@ class SegmentationPredictor(DetectionPredictor):
def write_results(self, idx, results, batch):
p, im, im0 = batch
log_string = ""
log_string = ''
if len(im.shape) == 3:
im = im[None] # expand for batch dim
self.seen += 1
@ -84,7 +84,7 @@ class SegmentationPredictor(DetectionPredictor):
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
c = int(cls) # integer class
name = f"id:{int(d.id.item())} {self.model.names[c]}" if d.id is not None else self.model.names[c]
name = f'id:{int(d.id.item())} {self.model.names[c]}' if d.id is not None else self.model.names[c]
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) if self.args.boxes else None
if self.args.save_crop:
@ -97,9 +97,9 @@ class SegmentationPredictor(DetectionPredictor):
def predict(cfg=DEFAULT_CFG, use_python=False):
model = cfg.model or "yolov8n-seg.pt"
source = cfg.source if cfg.source is not None else ROOT / "assets" if (ROOT / "assets").exists() \
else "https://ultralytics.com/images/bus.jpg"
model = cfg.model or 'yolov8n-seg.pt'
source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
else 'https://ultralytics.com/images/bus.jpg'
args = dict(model=model, source=source)
if use_python:
@ -110,5 +110,5 @@ def predict(cfg=DEFAULT_CFG, use_python=False):
predictor.predict_cli()
if __name__ == "__main__":
if __name__ == '__main__':
predict()

View File

@ -20,11 +20,11 @@ class SegmentationTrainer(v8.detect.DetectionTrainer):
def __init__(self, cfg=DEFAULT_CFG, overrides=None):
if overrides is None:
overrides = {}
overrides["task"] = "segment"
overrides['task'] = 'segment'
super().__init__(cfg, overrides)
def get_model(self, cfg=None, weights=None, verbose=True):
model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
model = SegmentationModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1)
if weights:
model.load(weights)
@ -43,13 +43,13 @@ class SegmentationTrainer(v8.detect.DetectionTrainer):
return self.compute_loss(preds, batch)
def plot_training_samples(self, batch, ni):
images = batch["img"]
masks = batch["masks"]
cls = batch["cls"].squeeze(-1)
bboxes = batch["bboxes"]
paths = batch["im_file"]
batch_idx = batch["batch_idx"]
plot_images(images, batch_idx, cls, bboxes, masks, paths=paths, fname=self.save_dir / f"train_batch{ni}.jpg")
images = batch['img']
masks = batch['masks']
cls = batch['cls'].squeeze(-1)
bboxes = batch['bboxes']
paths = batch['im_file']
batch_idx = batch['batch_idx']
plot_images(images, batch_idx, cls, bboxes, masks, paths=paths, fname=self.save_dir / f'train_batch{ni}.jpg')
def plot_metrics(self):
plot_results(file=self.csv, segment=True) # save results.png
@ -80,15 +80,15 @@ class SegLoss(Loss):
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
# targets
batch_idx = batch["batch_idx"].view(-1, 1)
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
batch_idx = batch['batch_idx'].view(-1, 1)
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
masks = batch["masks"].to(self.device).float()
masks = batch['masks'].to(self.device).float()
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0]
masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
# pboxes
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
@ -135,13 +135,13 @@ class SegLoss(Loss):
def single_mask_loss(self, gt_mask, pred, proto, xyxy, area):
# Mask loss for one image
pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80)
loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none")
loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none')
return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean()
def train(cfg=DEFAULT_CFG, use_python=False):
model = cfg.model or "yolov8n-seg.pt"
data = cfg.data or "coco128-seg.yaml" # or yolo.ClassificationDataset("mnist")
model = cfg.model or 'yolov8n-seg.pt'
data = cfg.data or 'coco128-seg.yaml' # or yolo.ClassificationDataset("mnist")
device = cfg.device if cfg.device is not None else ''
args = dict(model=model, data=data, device=device)
@ -153,5 +153,5 @@ def train(cfg=DEFAULT_CFG, use_python=False):
trainer.train()
if __name__ == "__main__":
if __name__ == '__main__':
train()

View File

@ -24,7 +24,7 @@ class SegmentationValidator(DetectionValidator):
def preprocess(self, batch):
batch = super().preprocess(batch)
batch["masks"] = batch["masks"].to(self.device).float()
batch['masks'] = batch['masks'].to(self.device).float()
return batch
def init_metrics(self, model):
@ -37,8 +37,8 @@ class SegmentationValidator(DetectionValidator):
self.process = ops.process_mask # faster
def get_desc(self):
return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P",
"R", "mAP50", "mAP50-95)")
return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P',
'R', 'mAP50', 'mAP50-95)')
def postprocess(self, preds):
p = ops.non_max_suppression(preds[0],
@ -55,11 +55,11 @@ class SegmentationValidator(DetectionValidator):
def update_metrics(self, preds, batch):
# Metrics
for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
idx = batch["batch_idx"] == si
cls = batch["cls"][idx]
bbox = batch["bboxes"][idx]
idx = batch['batch_idx'] == si
cls = batch['cls'][idx]
bbox = batch['bboxes'][idx]
nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions
shape = batch["ori_shape"][si]
shape = batch['ori_shape'][si]
correct_masks = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init
correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init
self.seen += 1
@ -74,23 +74,23 @@ class SegmentationValidator(DetectionValidator):
# Masks
midx = [si] if self.args.overlap_mask else idx
gt_masks = batch["masks"][midx]
pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch["img"][si].shape[1:])
gt_masks = batch['masks'][midx]
pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch['img'][si].shape[1:])
# Predictions
if self.args.single_cls:
pred[:, 5] = 0
predn = pred.clone()
ops.scale_boxes(batch["img"][si].shape[1:], predn[:, :4], shape,
ratio_pad=batch["ratio_pad"][si]) # native-space pred
ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape,
ratio_pad=batch['ratio_pad'][si]) # native-space pred
# Evaluate
if nl:
height, width = batch["img"].shape[2:]
height, width = batch['img'].shape[2:]
tbox = ops.xywh2xyxy(bbox) * torch.tensor(
(width, height, width, height), device=self.device) # target boxes
ops.scale_boxes(batch["img"][si].shape[1:], tbox, shape,
ratio_pad=batch["ratio_pad"][si]) # native-space labels
ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape,
ratio_pad=batch['ratio_pad'][si]) # native-space labels
labelsn = torch.cat((cls, tbox), 1) # native-space labels
correct_bboxes = self._process_batch(predn, labelsn)
# TODO: maybe remove these `self.` arguments as they already are member variable
@ -112,11 +112,11 @@ class SegmentationValidator(DetectionValidator):
# Save
if self.args.save_json:
pred_masks = ops.scale_image(batch["img"][si].shape[1:],
pred_masks = ops.scale_image(batch['img'][si].shape[1:],
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
shape,
ratio_pad=batch["ratio_pad"][si])
self.pred_to_json(predn, batch["im_file"][si], pred_masks)
ratio_pad=batch['ratio_pad'][si])
self.pred_to_json(predn, batch['im_file'][si], pred_masks)
# if self.args.save_txt:
# save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt')
@ -136,7 +136,7 @@ class SegmentationValidator(DetectionValidator):
gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640)
gt_masks = torch.where(gt_masks == index, 1.0, 0.0)
if gt_masks.shape[1:] != pred_masks.shape[1:]:
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0]
gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0]
gt_masks = gt_masks.gt_(0.5)
iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1))
else: # boxes
@ -158,20 +158,20 @@ class SegmentationValidator(DetectionValidator):
return torch.tensor(correct, dtype=torch.bool, device=detections.device)
def plot_val_samples(self, batch, ni):
plot_images(batch["img"],
batch["batch_idx"],
batch["cls"].squeeze(-1),
batch["bboxes"],
batch["masks"],
paths=batch["im_file"],
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
plot_images(batch['img'],
batch['batch_idx'],
batch['cls'].squeeze(-1),
batch['bboxes'],
batch['masks'],
paths=batch['im_file'],
fname=self.save_dir / f'val_batch{ni}_labels.jpg',
names=self.names)
def plot_predictions(self, batch, preds, ni):
plot_images(batch["img"],
plot_images(batch['img'],
*output_to_target(preds[0], max_det=15),
torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
paths=batch["im_file"],
paths=batch['im_file'],
fname=self.save_dir / f'val_batch{ni}_pred.jpg',
names=self.names) # pred
self.plot_masks.clear()
@ -182,8 +182,8 @@ class SegmentationValidator(DetectionValidator):
from pycocotools.mask import encode # noqa
def single_encode(x):
rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
rle["counts"] = rle["counts"].decode("utf-8")
rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]
rle['counts'] = rle['counts'].decode('utf-8')
return rle
stem = Path(filename).stem
@ -203,8 +203,8 @@ class SegmentationValidator(DetectionValidator):
def eval_json(self, stats):
if self.args.save_json and self.is_coco and len(self.jdict):
anno_json = self.data['path'] / "annotations/instances_val2017.json" # annotations
pred_json = self.save_dir / "predictions.json" # predictions
anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations
pred_json = self.save_dir / 'predictions.json' # predictions
self.logger.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...')
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements('pycocotools>=2.0.6')
@ -212,7 +212,7 @@ class SegmentationValidator(DetectionValidator):
from pycocotools.cocoeval import COCOeval # noqa
for x in anno_json, pred_json:
assert x.is_file(), f"{x} file not found"
assert x.is_file(), f'{x} file not found'
anno = COCO(str(anno_json)) # init annotations api
pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]):
@ -231,8 +231,8 @@ class SegmentationValidator(DetectionValidator):
def val(cfg=DEFAULT_CFG, use_python=False):
model = cfg.model or "yolov8n-seg.pt"
data = cfg.data or "coco128-seg.yaml"
model = cfg.model or 'yolov8n-seg.pt'
data = cfg.data or 'coco128-seg.yaml'
args = dict(model=model, data=data)
if use_python:
@ -243,5 +243,5 @@ def val(cfg=DEFAULT_CFG, use_python=False):
validator(model=args['model'])
if __name__ == "__main__":
if __name__ == '__main__':
val()