New YOLOv8 Results()
class for prediction outputs (#314)
Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com> Co-authored-by: Viet Nhat Thai <60825385+vietnhatthai@users.noreply.github.com> Co-authored-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com>
This commit is contained in:
@ -4,8 +4,8 @@ import hydra
|
||||
import torch
|
||||
|
||||
from ultralytics.yolo.engine.predictor import BasePredictor
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT
|
||||
from ultralytics.yolo.utils.checks import check_imgsz
|
||||
from ultralytics.yolo.utils.plotting import Annotator
|
||||
|
||||
|
||||
@ -15,20 +15,27 @@ class ClassificationPredictor(BasePredictor):
|
||||
return Annotator(img, example=str(self.model.names), pil=True)
|
||||
|
||||
def preprocess(self, img):
|
||||
img = torch.Tensor(img).to(self.model.device)
|
||||
img = (img if isinstance(img, torch.Tensor) else torch.Tensor(img)).to(self.model.device)
|
||||
img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
|
||||
return img
|
||||
|
||||
def write_results(self, idx, preds, batch):
|
||||
def postprocess(self, preds, img, orig_img):
|
||||
results = []
|
||||
for i, pred in enumerate(preds):
|
||||
shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
|
||||
results.append(Results(probs=pred.softmax(0), orig_shape=shape[:2]))
|
||||
return results
|
||||
|
||||
def write_results(self, idx, results, batch):
|
||||
p, im, im0 = batch
|
||||
log_string = ""
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
self.seen += 1
|
||||
im0 = im0.copy()
|
||||
if self.webcam: # batch_size >= 1
|
||||
if self.webcam or self.from_img: # batch_size >= 1
|
||||
log_string += f'{idx}: '
|
||||
frame = self.dataset.cound
|
||||
frame = self.dataset.count
|
||||
else:
|
||||
frame = getattr(self.dataset, 'frame', 0)
|
||||
|
||||
@ -38,9 +45,10 @@ class ClassificationPredictor(BasePredictor):
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
prob = preds[idx].softmax(0)
|
||||
if self.return_outputs:
|
||||
self.output["prob"] = prob.cpu().numpy()
|
||||
result = results[idx]
|
||||
if len(result) == 0:
|
||||
return log_string
|
||||
prob = result.probs
|
||||
# Print results
|
||||
top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices
|
||||
log_string += f"{', '.join(f'{self.model.names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
||||
@ -59,7 +67,6 @@ class ClassificationPredictor(BasePredictor):
|
||||
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
|
||||
def predict(cfg):
|
||||
cfg.model = cfg.model or "yolov8n-cls.pt" # or "resnet18"
|
||||
cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
|
||||
cfg.source = cfg.source if cfg.source is not None else ROOT / "assets"
|
||||
predictor = ClassificationPredictor(cfg)
|
||||
predictor.predict_cli()
|
||||
|
@ -56,6 +56,8 @@ class ClassificationTrainer(BaseTrainer):
|
||||
# Load a YOLO model locally, from torchvision, or from Ultralytics assets
|
||||
if model.endswith(".pt"):
|
||||
self.model, _ = attempt_load_one_weight(model, device='cpu')
|
||||
for p in model.parameters():
|
||||
p.requires_grad = True # for training
|
||||
elif model.endswith(".yaml"):
|
||||
self.model = self.get_model(cfg=model)
|
||||
elif model in torchvision.models.__dict__:
|
||||
|
@ -4,8 +4,8 @@ import hydra
|
||||
import torch
|
||||
|
||||
from ultralytics.yolo.engine.predictor import BasePredictor
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
|
||||
from ultralytics.yolo.utils.checks import check_imgsz
|
||||
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
|
||||
|
||||
|
||||
@ -27,58 +27,53 @@ class DetectionPredictor(BasePredictor):
|
||||
agnostic=self.args.agnostic_nms,
|
||||
max_det=self.args.max_det)
|
||||
|
||||
results = []
|
||||
for i, pred in enumerate(preds):
|
||||
shape = orig_img[i].shape if self.webcam else orig_img.shape
|
||||
shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
|
||||
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
|
||||
results.append(Results(boxes=pred, orig_shape=shape[:2]))
|
||||
return results
|
||||
|
||||
return preds
|
||||
|
||||
def write_results(self, idx, preds, batch):
|
||||
def write_results(self, idx, results, batch):
|
||||
p, im, im0 = batch
|
||||
log_string = ""
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
self.seen += 1
|
||||
im0 = im0.copy()
|
||||
if self.webcam: # batch_size >= 1
|
||||
if self.webcam or self.from_img: # batch_size >= 1
|
||||
log_string += f'{idx}: '
|
||||
frame = self.dataset.count
|
||||
else:
|
||||
frame = getattr(self.dataset, 'frame', 0)
|
||||
|
||||
self.data_path = p
|
||||
# save_path = str(self.save_dir / p.name) # im.jpg
|
||||
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
det = preds[idx]
|
||||
det = results[idx].boxes # TODO: make boxes inherit from tensors
|
||||
if len(det) == 0:
|
||||
return log_string
|
||||
for c in det[:, 5].unique():
|
||||
n = (det[:, 5] == c).sum() # detections per class
|
||||
for c in det.cls.unique():
|
||||
n = (det.cls == c).sum() # detections per class
|
||||
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
|
||||
|
||||
if self.return_outputs:
|
||||
self.output["det"] = det.cpu().numpy()
|
||||
|
||||
# write
|
||||
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||||
for *xyxy, conf, cls in reversed(det):
|
||||
for d in reversed(det):
|
||||
cls, conf = d.cls.squeeze(), d.conf.squeeze()
|
||||
if self.args.save_txt: # Write to file
|
||||
xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
||||
line = (cls, *xywh, conf) if self.args.save_conf else (cls, *xywh) # label format
|
||||
line = (cls, *(d.xywhn.view(-1).tolist()), conf) \
|
||||
if self.args.save_conf else (cls, *(d.xywhn.view(-1).tolist())) # label format
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
label = None if self.args.hide_labels else (
|
||||
self.model.names[c] if self.args.hide_conf else f'{self.model.names[c]} {conf:.2f}')
|
||||
self.annotator.box_label(xyxy, label, color=colors(c, True))
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
imc = im0.copy()
|
||||
save_one_box(xyxy,
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
|
||||
BGR=True)
|
||||
@ -89,7 +84,6 @@ class DetectionPredictor(BasePredictor):
|
||||
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
|
||||
def predict(cfg):
|
||||
cfg.model = cfg.model or "yolov8n.pt"
|
||||
cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
|
||||
cfg.source = cfg.source if cfg.source is not None else ROOT / "assets"
|
||||
predictor = DetectionPredictor(cfg)
|
||||
predictor.predict_cli()
|
||||
|
@ -3,8 +3,8 @@
|
||||
import hydra
|
||||
import torch
|
||||
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CONFIG, ROOT, ops
|
||||
from ultralytics.yolo.utils.checks import check_imgsz
|
||||
from ultralytics.yolo.utils.plotting import colors, save_one_box
|
||||
from ultralytics.yolo.v8.detect.predict import DetectionPredictor
|
||||
|
||||
@ -12,7 +12,6 @@ from ultralytics.yolo.v8.detect.predict import DetectionPredictor
|
||||
class SegmentationPredictor(DetectionPredictor):
|
||||
|
||||
def postprocess(self, preds, img, orig_img):
|
||||
masks = []
|
||||
# TODO: filter by classes
|
||||
p = ops.non_max_suppression(preds[0],
|
||||
self.args.conf,
|
||||
@ -20,27 +19,29 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
agnostic=self.args.agnostic_nms,
|
||||
max_det=self.args.max_det,
|
||||
nm=32)
|
||||
results = []
|
||||
proto = preds[1][-1]
|
||||
for i, pred in enumerate(p):
|
||||
shape = orig_img[i].shape if self.webcam else orig_img.shape
|
||||
shape = orig_img[i].shape if isinstance(orig_img, list) else orig_img.shape
|
||||
if not len(pred):
|
||||
results.append(Results(boxes=pred[:, :6], orig_shape=shape[:2])) # save empty boxes
|
||||
continue
|
||||
if self.args.retina_masks:
|
||||
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
|
||||
masks.append(ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], shape[:2])) # HWC
|
||||
masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], shape[:2]) # HWC
|
||||
else:
|
||||
masks.append(ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True)) # HWC
|
||||
masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
|
||||
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
|
||||
results.append(Results(boxes=pred[:, :6], masks=masks, orig_shape=shape[:2]))
|
||||
return results
|
||||
|
||||
return (p, masks)
|
||||
|
||||
def write_results(self, idx, preds, batch):
|
||||
def write_results(self, idx, results, batch):
|
||||
p, im, im0 = batch
|
||||
log_string = ""
|
||||
if len(im.shape) == 3:
|
||||
im = im[None] # expand for batch dim
|
||||
self.seen += 1
|
||||
if self.webcam: # batch_size >= 1
|
||||
if self.webcam or self.from_img: # batch_size >= 1
|
||||
log_string += f'{idx}: '
|
||||
frame = self.dataset.count
|
||||
else:
|
||||
@ -51,54 +52,48 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
preds, masks = preds
|
||||
det = preds[idx]
|
||||
if len(det) == 0:
|
||||
result = results[idx]
|
||||
if len(result) == 0:
|
||||
return log_string
|
||||
# Segments
|
||||
mask = masks[idx]
|
||||
if self.args.save_txt or self.return_outputs:
|
||||
shape = im0.shape if self.args.retina_masks else im.shape[2:]
|
||||
segments = [
|
||||
ops.scale_segments(shape, x, im0.shape, normalize=False) for x in reversed(ops.masks2segments(mask))]
|
||||
det, mask = result.boxes, result.masks # getting tensors TODO: mask mask,box inherit for tensor
|
||||
|
||||
# Print results
|
||||
for c in det[:, 5].unique():
|
||||
n = (det[:, 5] == c).sum() # detections per class
|
||||
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, " # add to string
|
||||
for c in det.cls.unique():
|
||||
n = (det.cls == c).sum() # detections per class
|
||||
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
|
||||
|
||||
# Mask plotting
|
||||
self.annotator.masks(
|
||||
mask,
|
||||
colors=[colors(x, True) for x in det[:, 5]],
|
||||
mask.masks,
|
||||
colors=[colors(x, True) for x in det.cls],
|
||||
im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(self.device).permute(2, 0, 1).flip(0).contiguous() /
|
||||
255 if self.args.retina_masks else im[idx])
|
||||
|
||||
det = reversed(det[:, :6])
|
||||
if self.return_outputs:
|
||||
self.output["det"] = det.cpu().numpy()
|
||||
self.output["segment"] = segments
|
||||
# Segments
|
||||
if self.args.save_txt:
|
||||
segments = mask.segments
|
||||
|
||||
# Write results
|
||||
for j, (*xyxy, conf, cls) in enumerate(det):
|
||||
for j, d in enumerate(reversed(det)):
|
||||
cls, conf = d.cls.squeeze(), d.conf.squeeze()
|
||||
if self.args.save_txt: # Write to file
|
||||
seg = segments[j].copy()
|
||||
seg[:, 0] /= shape[1] # width
|
||||
seg[:, 1] /= shape[0] # height
|
||||
seg = seg.reshape(-1) # (n,2) to (n*2)
|
||||
line = (cls, *seg, conf) if self.args.save_conf else (cls, *seg) # label format
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
|
||||
if self.args.save or self.args.save_crop or self.args.show:
|
||||
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
|
||||
c = int(cls) # integer class
|
||||
label = None if self.args.hide_labels else (
|
||||
self.model.names[c] if self.args.hide_conf else f'{self.model.names[c]} {conf:.2f}')
|
||||
self.annotator.box_label(xyxy, label, color=colors(c, True))
|
||||
# annotator.draw.polygon(segments[j], outline=colors(c, True), width=3)
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
imc = im0.copy()
|
||||
save_one_box(xyxy, imc, file=self.save_dir / 'crops' / self.model.names[c] / f'{p.stem}.jpg', BGR=True)
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
|
||||
BGR=True)
|
||||
|
||||
return log_string
|
||||
|
||||
@ -106,7 +101,6 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
@hydra.main(version_base=None, config_path=str(DEFAULT_CONFIG.parent), config_name=DEFAULT_CONFIG.name)
|
||||
def predict(cfg):
|
||||
cfg.model = cfg.model or "yolov8n-seg.pt"
|
||||
cfg.imgsz = check_imgsz(cfg.imgsz, min_dim=2) # check image size
|
||||
cfg.source = cfg.source if cfg.source is not None else ROOT / "assets"
|
||||
|
||||
predictor = SegmentationPredictor(cfg)
|
||||
|
Reference in New Issue
Block a user