DNN/Human Pose

HumanPose Optimizer setting

Small Octopus 2020. 1. 10. 11:25

 

Convolutional Pose Machine

CPM (https://github.com/namedBen/Convolutional-Pose-Machines-Pytorch/blob/master/config/config.yml)

def adjust_learning_rate(optimizer, iters, base_lr, policy_parameter, policy='step', multiple=[1]):

    if policy == 'fixed':
        lr = base_lr
    elif policy == 'step':
        lr = base_lr * (policy_parameter['gamma'] ** (iters // policy_parameter['step_size']))
    elif policy == 'exp':
        lr = base_lr * (policy_parameter['gamma'] ** iters)
    elif policy == 'inv':
        lr = base_lr * ((1 + policy_parameter['gamma'] * iters) ** (-policy_parameter['power']))
    elif policy == 'multistep':
        lr = base_lr
        for stepvalue in policy_parameter['stepvalue']:
            if iters >= stepvalue:
                lr *= policy_parameter['gamma']
            else:
                break
    elif policy == 'poly':
        lr = base_lr * ((1 - iters * 1.0 / policy_parameter['max_iter']) ** policy_parameter['power'])
    elif policy == 'sigmoid':
        lr = base_lr * (1.0 / (1 + math.exp(-policy_parameter['gamma'] * (iters - policy_parameter['stepsize']))))
    elif policy == 'multistep-poly':
        lr = base_lr
        stepstart = 0
        stepend = policy_parameter['max_iter']
        for stepvalue in policy_parameter['stepvalue']:
            if iters >= stepvalue:
                lr *= policy_parameter['gamma']
                stepstart = stepvalue
            else:
                stepend = stepvalue
                break
        lr = max(lr * policy_parameter['gamma'], lr * (1 - (iters - stepstart) * 1.0 / (stepend - stepstart)) ** policy_parameter['power'])

    for i, param_group in enumerate(optimizer.param_groups):
        param_group['lr'] = lr * multiple[i]
    return lr



def get_parameters(model, config, isdefault=True):

    if isdefault:
        return model.parameters(), [1.]
    lr_1 = []
    lr_2 = []
    lr_4 = []
    lr_8 = []
    params_dict = dict(model.module.named_parameters())
    for key, value in params_dict.items():
        if ('model1_' not in key) and ('model0.' not in key):
            if key[-4:] == 'bias':
                lr_8.append(value)
            else:
                lr_4.append(value)
        elif key[-4:] == 'bias':
            lr_2.append(value)
        else:
            lr_1.append(value)
    params = [{'params': lr_1, 'lr': config.base_lr},
            {'params': lr_2, 'lr': config.base_lr * 2.},
            {'params': lr_4, 'lr': config.base_lr * 4.},
            {'params': lr_8, 'lr': config.base_lr * 8.}]

    return params, [1., 2., 4., 8.]

params, multiple = get_parameters(model, config, False)

optimizer = torch.optim.SGD(params, config.base_lr, momentum=config.momentum,
                                weight_decay=config.weight_decay)
                                
while iters < config.max_iter:

        for i, (input, heatmap, centermap) in enumerate(train_loader):

            learning_rate = adjust_learning_rate(optimizer, iters, config.base_lr, policy=config.lr_policy,
                                                 policy_parameter=config.policy_parameter, multiple=multiple)
optimizer: SGD
lr_policy: 'step'
policy_parameter: gamma: 0.333, step_size: 13275
weight_decay: 0.0005
momentum: 0.9
max_iter: 62500
batch_size: 16
base_lr: 0.000004, 0.4e-5
weight init: he

 

Stacked Hourglass

SH(https://github.com/bearpaw/pytorch-pose/blob/master/example/main.py)

optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)

optimizer: RMSprop
base_lr: 2.5e-4
momentum: 0
weight_decay: 0
batchsize: 6

Pose ResNet50

optimizer: Adam
base_lr: 1e-3
momentum: 0
weight_decay: 0
batchsize: 32
epoch: 140
schedule: 90, 120

(https://github.com/princeton-vl/pytorch_stacked_hourglass/blob/master/task/pose.py)

optimizer: Adam
base_lr: 1e-3
decay_lr: 2e-4
decay_iters: 1000
batchsize: 6

 

Pytorch-Human-Pose-Estimation

 

Optimizer = getattr(torch.optim, self.opts.optimizer_type)(TrainableParams, lr = self.opts.LR, alpha = 0.99, eps = 1e-8)

for epoch in range(startepoch, endepoch+1):
	adjust_learning_rate(self.optimizer, epoch, self.opts.dropLR, self.opts.dropMag)

def adjust_learning_rate(optimizer, epoch, dropLR, dropMag):
	if epoch%dropLR==0:
		lrfac = dropMag
	else:
		lrfac = 1
	for i,param_group in enumerate(optimizer.param_groups):
		if lrfac!=1:
			print("Reducing learning rate of group %d from %f to %f"%(i,param_group['lr'],param_group['lr']*lrfac))
		param_group['lr'] *= lrfac
model = StackedHourGlass
TargetType: heatmap
outputRes: 64
nChannels: 256
nStack: 2
nModules: 2
nReductions: 4
data_loader_size = 6
mini_batch_count: 1
optimizer_type: RMSprop
LR: 2.5e-4
dropLR: 50
dropMag: 0.1
valInterval: 4
saveInterval: 8
model = PyraNet
TargetType: heatmap
outputRes: 64
nChannels: 256
nStack: 2
nModules: 2
nReductions: 4
baseWidth: 6
cardinality:30
data_loader_size = 6
mini_batch_count: 1
optimizer_type: RMSprop
LR: 2.5e-4
dropLR: 50
dropMag: 0.1
valInterval: 4
saveInterval: 8
model = StackedHourGlass
TargetType: heatmap
outputRes: 64
nChannels: 256
nStack: 2
nModules: 2
nReductions: 4
LRNSize : 1
IterSize : 3
data_loader_size = 6
mini_batch_count: 1
optimizer_type: RMSprop
LR: 2.5e-4
dropLR: 50
dropMag: 0.1
valInterval: 4
saveInterval: 8
model = DeepPose
TargetType = direct
### DeepPose
baseName: resnet34
data_loader_size = 16
mini_batch_count: 1
optimizer_type: RMSprop
LR: 2.5e-4
dropLR: 5
dropMag: 0.7
valInterval: 3
saveInterval: 5
outputRes = 32
model = ChainedPredictions
TargetType = heatmap
outputRes = 32
modelName: resnet34
ohKernel: 1
hhKernel: 1
data_loader_size = 16
mini_batch_count: 1
optimizer_type: RMSprop
LR: 2.5e-4
dropLR: 50
dropMag: 0.1
valInterval: 4
saveInterval: 8
outputRes = 32