|
|
|
@ -142,7 +142,7 @@ class BaseTrainer:
|
|
|
|
|
# Optimizer
|
|
|
|
|
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
|
|
|
|
|
self.args.weight_decay *= self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
|
|
|
|
|
self.optimizer = build_optimizer(model=self.model,
|
|
|
|
|
self.optimizer = self.build_optimizer(model=self.model,
|
|
|
|
|
name=self.args.optimizer,
|
|
|
|
|
lr=self.args.lr0,
|
|
|
|
|
momentum=self.args.momentum,
|
|
|
|
@ -459,10 +459,8 @@ class BaseTrainer:
|
|
|
|
|
self.best_fitness = best_fitness
|
|
|
|
|
self.start_epoch = start_epoch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def build_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5):
|
|
|
|
|
# TODO: 1. docstring with example? 2. Move this inside Trainer? or utils?
|
|
|
|
|
# YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay
|
|
|
|
|
g = [], [], [] # optimizer parameter groups
|
|
|
|
|
bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d()
|
|
|
|
|
for v in model.modules():
|
|
|
|
|