standalone val (#56)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Ayush Chaurasia
2022-11-30 15:04:44 +05:30
committed by GitHub
parent 3a241e4cea
commit 5a52e7663a
16 changed files with 161 additions and 31 deletions

View File

@ -28,17 +28,22 @@ single_cls: False # train multi-class data as single-class
image_weights: False # use weighted image selection for training
rect: False # support rectangular training
cos_lr: False # Use cosine LR scheduler
overlap_mask: True # Segmentation masks overlap
mask_ratio: 4 # Segmentation mask downsample ratio
noval: False
# Segmentation
overlap_mask: True # masks overlap
mask_ratio: 4 # mask downsample ratio
# Classification
dropout: False # use dropout
# Val/Test settings ----------------------------------------------------------------------------------------------------
noval: False
save_json: False
save_hybrid: False
conf_thres: 0.001
iou_thres: 0.6
max_det: 300
half: True
dnn: False # use OpenCV DNN for ONNX inference
plots: False
save_txt: False

View File

@ -113,8 +113,8 @@ def get_model(model='s.pt', pretrained=True):
model = model.split(".")[0]
if Path(f"{model}.pt").is_file(): # local file
return torch.load(f"{model}.pt", map_location='cpu')
return attempt_load_weights(f"{model}.pt", device='cpu')
elif model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0
return torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None)
else: # Ultralytics assets
return torch.load(attempt_download(f"{model}.pt"), map_location='cpu')
return attempt_load_weights(f"{model}.pt", device='cpu')

View File

@ -304,7 +304,7 @@ class AutoBackend(nn.Module):
def _model_type(p='path/to/model.pt'):
# Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
# types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
from export import export_formats
from ultralytics.yolo.engine.exporter import export_formats
sf = list(export_formats().Suffix) # export suffixes
if not is_url(p, check=False):
check_suffix(p, sf) # checks

View File

@ -172,7 +172,7 @@ class DetectionModel(BaseModel):
csd = weights['model'].float().state_dict() # checkpoint state_dict as FP32
csd = intersect_state_dicts(csd, self.state_dict()) # intersect
self.load_state_dict(csd, strict=False) # load
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from {weights}')
LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights')
class SegmentationModel(DetectionModel):

View File

@ -164,6 +164,25 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
def check_img_size(imgsz, s=32, floor=0):
# Verify image size is a multiple of stride s in each dimension
if isinstance(imgsz, int): # integer i.e. img_size=640
new_size = max(make_divisible(imgsz, int(s)), floor)
else: # list i.e. img_size=[640, 480]
imgsz = list(imgsz) # convert to list if tuple
new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]
if new_size != imgsz:
LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')
return new_size
def make_divisible(x, divisor):
# Returns nearest x divisible by divisor
if isinstance(divisor, torch.Tensor):
divisor = int(divisor.max()) # to int
return math.ceil(x / divisor) * divisor
def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():