Create Exporter() Class (#117)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher
2022-12-30 01:28:41 +01:00
committed by GitHub
parent a9dc1637c2
commit 076d73cfaa
10 changed files with 531 additions and 540 deletions

View File

@ -31,7 +31,7 @@ def cli(cfg):
elif task == "classify":
module = yolo.v8.classify
elif task == "export":
func = yolo.trainer.exporter.export_model
func = yolo.engine.exporter.export
else:
raise SyntaxError("task not recognized. Choices are `'detect', 'segment', 'classify'`")
@ -42,7 +42,7 @@ def cli(cfg):
elif mode == "predict":
func = module.predict
elif mode == "export":
func = yolo.trainer.exporter.export_model
func = yolo.engine.exporter.export
else:
raise SyntaxError("mode not recognized. Choices are `'train', 'val', 'predict', 'export'`")
func(cfg)

View File

@ -29,12 +29,12 @@ image_weights: False # use weighted image selection for training
rect: False # support rectangular training
cos_lr: False # use cosine LR scheduler
close_mosaic: 10 # disable mosaic for final 10 epochs
resume: False
# Segmentation
overlap_mask: True # masks overlap
mask_ratio: 4 # mask downsample ratio
# Classification
dropout: False # use dropout
resume: False
# Val/Test settings ----------------------------------------------------------------------------------------------------
@ -65,6 +65,7 @@ agnostic_nms: False # class-agnostic NMS
retina_masks: False
# Export settings ------------------------------------------------------------------------------------------------------
format: torchscript
keras: False # use Keras
optimize: False # TorchScript: optimize for mobile
int8: False # CoreML/TF INT8 quantization

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ import torch
from ultralytics import yolo # noqa required for python usage
from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, attempt_load_weights
from ultralytics.yolo.configs import get_config
from ultralytics.yolo.engine.exporter import export_model
from ultralytics.yolo.engine.exporter import Exporter
from ultralytics.yolo.utils import DEFAULT_CONFIG, HELP_MSG, LOGGER
from ultralytics.yolo.utils.checks import check_yaml
from ultralytics.yolo.utils.files import yaml_load
@ -164,7 +164,7 @@ class YOLO:
validator(model=self.model)
@smart_inference_mode()
def export(self, format='', save_dir='', **kwargs):
def export(self, **kwargs):
"""
Export model.
@ -177,36 +177,9 @@ class YOLO:
overrides.update(kwargs)
args = get_config(config=DEFAULT_CONFIG, overrides=overrides)
args.task = self.task
args.format = format
file = self.ckpt or Path(Path(self.cfg).name)
if save_dir:
file = Path(save_dir) / file.name
file.parent.mkdir(parents=True, exist_ok=True)
export_model(
model=self.model,
file=file,
data=args.data, # 'dataset.yaml path'
imgsz=args.imgsz or (640, 640), # image (height, width)
batch_size=1, # batch size
device=args.device, # cuda device, i.e. 0 or 0,1,2,3 or cpu
format=args.format, # include formats
half=args.half or False, # FP16 half-precision export
keras=args.keras or False, # use Keras
optimize=args.optimize or False, # TorchScript: optimize for mobile
int8=args.int8 or False, # CoreML/TF INT8 quantization
dynamic=args.dynamic or False, # ONNX/TF/TensorRT: dynamic axes
opset=args.opset or 17, # ONNX: opset version
verbose=False, # TensorRT: verbose log
workspace=args.workspace or 4, # TensorRT: workspace size (GB)
nms=False, # TF: add NMS to model
agnostic_nms=False, # TF: add agnostic NMS to model
topk_per_class=100, # TF.js NMS: topk per class to keep
topk_all=100, # TF.js NMS: topk for all classes to keep
iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25, # TF.js NMS: confidence threshold
)
exporter = Exporter(overrides=overrides)
exporter(model=self.model)
def train(self, **kwargs):
"""

View File

@ -16,14 +16,14 @@ Usage - formats:
$ yolo task=... mode=predict --weights yolov8n.pt # PyTorch
yolov8n.torchscript # TorchScript
yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s_openvino_model # OpenVINO
yolov8n_openvino_model # OpenVINO
yolov8n.engine # TensorRT
yolov8n.mlmodel # CoreML (macOS-only)
yolov5s_saved_model # TensorFlow SavedModel
yolov8n_saved_model # TensorFlow SavedModel
yolov8n.pb # TensorFlow GraphDef
yolov8n.tflite # TensorFlow Lite
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
yolov5s_paddle_model # PaddlePaddle
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
yolov8n_paddle_model # PaddlePaddle
"""
import platform
from pathlib import Path

View File

@ -25,14 +25,12 @@ TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format
LOGGING_NAME = 'yolov5'
HELP_MSG = \
"""
Please refer to below Usage examples for help running YOLOv8
For help visit Ultralytics Community at https://community.ultralytics.com/
Submit bug reports to https//github.com/ultralytics/ultralytics
Please refer to below Usage examples for help running YOLOv8:
Install:
pip install ultralytics
Python usage:
Python SDK:
from ultralytics import YOLO
model = YOLO.new('yolov8n.yaml') # create a new model from scratch
@ -42,12 +40,15 @@ HELP_MSG = \
results = model.predict(source='bus.jpg')
success = model.export(format='onnx')
CLI usage:
yolo task=detect mode=train model=yolov8n.yaml ...
classify predict yolov8n-cls.yaml
segment val yolov8n-seg.yaml
CLI:
yolo task=detect mode=train model=yolov8n.yaml args...
classify predict yolov8n-cls.yaml args...
segment val yolov8n-seg.yaml args...
export yolov8n.pt format=onnx args...
For all arguments see https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/utils/configs/default.yaml
Docs: https://docs.ultralytics.com
Community: https://community.ultralytics.com
GitHub: https://github.com/ultralytics/ultralytics
"""
# Settings
@ -56,7 +57,6 @@ HELP_MSG = \
pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads
os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy)
def is_colab():

View File

@ -36,8 +36,8 @@ def on_val_end(trainer):
if trainer.epoch == 0:
model_info = {
"Parameters": get_num_params(trainer.model),
"GFLOPs": round(get_flops(trainer.model), 1),
"Inference speed (ms/img)": round(trainer.validator.speed[1], 1)}
"GFLOPs": round(get_flops(trainer.model), 3),
"Inference speed (ms/img)": round(trainer.validator.speed[1], 3)}
Task.current_task().connect(model_info, name='Model')

View File

@ -19,8 +19,8 @@ def on_val_end(trainer):
if trainer.epoch == 0:
model_info = {
"model/parameters": get_num_params(trainer.model),
"model/GFLOPs": round(get_flops(trainer.model), 1),
"model/speed(ms)": round(trainer.validator.speed[1], 1)}
"model/GFLOPs": round(get_flops(trainer.model), 3),
"model/speed(ms)": round(trainer.validator.speed[1], 3)}
wandb.run.log(model_info, step=trainer.epoch + 1)