ultralytics 8.0.70
minor fixes and improvements (#1892)
Co-authored-by: feicccccccc <49809204+feicccccccc@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Laughing-q <1185102784@qq.com>
This commit is contained in:
@ -44,7 +44,6 @@ class ClassificationPredictor(BasePredictor):
|
||||
# save_path = str(self.save_dir / p.name) # im.jpg
|
||||
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
result = results[idx]
|
||||
if len(result) == 0:
|
||||
@ -56,10 +55,10 @@ class ClassificationPredictor(BasePredictor):
|
||||
log_string += f"{', '.join(f'{self.model.names[j]} {prob[j]:.2f}' for j in top5i)}, "
|
||||
|
||||
# write
|
||||
text = '\n'.join(f'{prob[j]:.2f} {self.model.names[j]}' for j in top5i)
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
self.annotator.text((32, 32), text, txt_color=(255, 255, 255))
|
||||
self.plotted_img = result.plot()
|
||||
if self.args.save_txt: # Write to file
|
||||
text = '\n'.join(f'{prob[j]:.2f} {self.model.names[j]}' for j in top5i)
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(text + '\n')
|
||||
|
||||
|
@ -5,14 +5,11 @@ import torch
|
||||
from ultralytics.yolo.engine.predictor import BasePredictor
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
|
||||
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
|
||||
from ultralytics.yolo.utils.plotting import save_one_box
|
||||
|
||||
|
||||
class DetectionPredictor(BasePredictor):
|
||||
|
||||
def get_annotator(self, img):
|
||||
return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names))
|
||||
|
||||
def preprocess(self, img):
|
||||
img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device)
|
||||
img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
|
||||
@ -52,15 +49,18 @@ class DetectionPredictor(BasePredictor):
|
||||
self.data_path = p
|
||||
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
det = results[idx].boxes # TODO: make boxes inherit from tensors
|
||||
if len(det) == 0:
|
||||
result = results[idx] # TODO: make boxes inherit from tensors
|
||||
if len(result) == 0:
|
||||
return f'{log_string}(no detections), '
|
||||
det = result.boxes
|
||||
for c in det.cls.unique():
|
||||
n = (det.cls == c).sum() # detections per class
|
||||
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
|
||||
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
self.plotted_img = result.plot(line_width=self.args.line_thickness)
|
||||
|
||||
# write
|
||||
for d in reversed(det):
|
||||
c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
|
||||
@ -68,10 +68,6 @@ class DetectionPredictor(BasePredictor):
|
||||
line = (c, *d.xywhn.view(-1)) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
|
||||
label = (f'{name} {conf:.2f}' if self.args.show_conf else name) if self.args.show_labels else None
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
|
||||
from ultralytics.yolo.utils.plotting import colors, save_one_box
|
||||
from ultralytics.yolo.utils.plotting import save_one_box
|
||||
from ultralytics.yolo.v8.detect.predict import DetectionPredictor
|
||||
|
||||
|
||||
@ -49,33 +49,27 @@ class PosePredictor(DetectionPredictor):
|
||||
self.data_path = p
|
||||
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
det = results[idx].boxes # TODO: make boxes inherit from tensors
|
||||
if len(det) == 0:
|
||||
result = results[idx] # TODO: make boxes inherit from tensors
|
||||
if len(result) == 0:
|
||||
return f'{log_string}(no detections), '
|
||||
det = result.boxes
|
||||
for c in det.cls.unique():
|
||||
n = (det.cls == c).sum() # detections per class
|
||||
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
|
||||
|
||||
kpts = reversed(results[idx].keypoints)
|
||||
for k in kpts:
|
||||
self.annotator.kpts(k, shape=results[idx].orig_shape)
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
self.plotted_img = result.plot(line_width=self.args.line_thickness, boxes=self.args.boxes)
|
||||
|
||||
# write
|
||||
for j, d in enumerate(reversed(det)):
|
||||
c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
|
||||
if self.args.save_txt: # Write to file
|
||||
kpt = (kpts[j][:, :2] / d.orig_shape[[1, 0]]).reshape(-1).tolist()
|
||||
kpt = (result[j].keypoints[:, :2] / d.orig_shape[[1, 0]]).reshape(-1).tolist()
|
||||
box = d.xywhn.view(-1).tolist()
|
||||
line = (c, *box, *kpt) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
|
||||
label = (f'{name} {conf:.2f}' if self.args.show_conf else name) if self.args.show_labels else None
|
||||
if self.args.boxes:
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
|
@ -198,7 +198,7 @@ class PoseValidator(DetectionValidator):
|
||||
|
||||
def val(cfg=DEFAULT_CFG, use_python=False):
|
||||
model = cfg.model or 'yolov8n-pose.pt'
|
||||
data = cfg.data or 'coco128-pose.yaml'
|
||||
data = cfg.data or 'coco8-pose.yaml'
|
||||
|
||||
args = dict(model=model, data=data)
|
||||
if use_python:
|
||||
|
@ -4,7 +4,7 @@ import torch
|
||||
|
||||
from ultralytics.yolo.engine.results import Results
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
|
||||
from ultralytics.yolo.utils.plotting import colors, save_one_box
|
||||
from ultralytics.yolo.utils.plotting import save_one_box
|
||||
from ultralytics.yolo.v8.detect.predict import DetectionPredictor
|
||||
|
||||
|
||||
@ -56,7 +56,6 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
self.data_path = p
|
||||
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
|
||||
log_string += '%gx%g ' % im.shape[2:] # print string
|
||||
self.annotator = self.get_annotator(im0)
|
||||
|
||||
result = results[idx]
|
||||
if len(result) == 0:
|
||||
@ -72,7 +71,7 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
if self.args.save or self.args.show:
|
||||
im_gpu = torch.as_tensor(im0, dtype=torch.float16, device=mask.masks.device).permute(
|
||||
2, 0, 1).flip(0).contiguous() / 255 if self.args.retina_masks else im[idx]
|
||||
self.annotator.masks(masks=mask.masks, colors=[colors(x, True) for x in det.cls], im_gpu=im_gpu)
|
||||
self.plotted_img = result.plot(line_width=self.args.line_thickness, im_gpu=im_gpu, boxes=self.args.boxes)
|
||||
|
||||
# Write results
|
||||
for j, d in enumerate(reversed(det)):
|
||||
@ -82,11 +81,6 @@ class SegmentationPredictor(DetectionPredictor):
|
||||
line = (c, *seg) + (conf, ) * self.args.save_conf + (() if id is None else (id, ))
|
||||
with open(f'{self.txt_path}.txt', 'a') as f:
|
||||
f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
||||
if self.args.save or self.args.show: # Add bbox to image
|
||||
name = ('' if id is None else f'id:{id} ') + self.model.names[c]
|
||||
label = (f'{name} {conf:.2f}' if self.args.show_conf else name) if self.args.show_labels else None
|
||||
if self.args.boxes:
|
||||
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
|
||||
if self.args.save_crop:
|
||||
save_one_box(d.xyxy,
|
||||
imc,
|
||||
|
@ -111,8 +111,7 @@ class SegmentationValidator(DetectionValidator):
|
||||
|
||||
# Save
|
||||
if self.args.save_json:
|
||||
pred_masks = ops.scale_image(batch['img'][si].shape[1:],
|
||||
pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
||||
pred_masks = ops.scale_image(pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
|
||||
shape,
|
||||
ratio_pad=batch['ratio_pad'][si])
|
||||
self.pred_to_json(predn, batch['im_file'][si], pred_masks)
|
||||
|
Reference in New Issue
Block a user