Cleanup (#168)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Kalen Michael <kalenmike@gmail.com>
This commit is contained in:
@ -1,4 +1,4 @@
|
||||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ultralytics import yolo # noqa
|
||||
|
@ -1,4 +1,4 @@
|
||||
# predictor engine by Ultralytics
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
Usage - sources:
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
"""
|
||||
Simple training loop; Boilerplate that could apply to any arbitrary neural network,
|
||||
"""
|
||||
|
@ -1,3 +1,5 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
@ -86,6 +88,7 @@ class BaseValidator:
|
||||
self.model = model
|
||||
self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
|
||||
self.args.plots = trainer.epoch == trainer.epochs - 1 # always plot final epoch
|
||||
model.eval()
|
||||
else:
|
||||
callbacks.add_integration_callbacks(self)
|
||||
self.run_callbacks('on_val_start')
|
||||
@ -106,17 +109,17 @@ class BaseValidator:
|
||||
f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
|
||||
|
||||
if isinstance(self.args.data, str) and self.args.data.endswith(".yaml"):
|
||||
data = check_dataset_yaml(self.args.data)
|
||||
self.data = check_dataset_yaml(self.args.data)
|
||||
else:
|
||||
data = check_dataset(self.args.data)
|
||||
self.data = check_dataset(self.args.data)
|
||||
|
||||
if self.device.type == 'cpu':
|
||||
self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
|
||||
self.dataloader = self.dataloader or \
|
||||
self.get_dataloader(data.get("val") or data.set("test"), self.args.batch)
|
||||
self.data = data
|
||||
self.get_dataloader(self.data.get("val") or self.data.set("test"), self.args.batch)
|
||||
|
||||
model.eval()
|
||||
model.eval()
|
||||
model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup
|
||||
|
||||
dt = Profile(), Profile(), Profile(), Profile()
|
||||
n_batches = len(self.dataloader)
|
||||
|
Reference in New Issue
Block a user