commit
9781e7defd
|
@ -54,7 +54,7 @@ def load_model(model, model_path, optimizer=None, resume=False,
|
||||||
if state_dict[k].shape != model_state_dict[k].shape:
|
if state_dict[k].shape != model_state_dict[k].shape:
|
||||||
print('Skip loading parameter {}, required shape{}, '\
|
print('Skip loading parameter {}, required shape{}, '\
|
||||||
'loaded shape{}. {}'.format(
|
'loaded shape{}. {}'.format(
|
||||||
k, model_state_dict[k].shape, state_dict[k].shape) + msg)
|
k, model_state_dict[k].shape, state_dict[k].shape, msg))
|
||||||
state_dict[k] = model_state_dict[k]
|
state_dict[k] = model_state_dict[k]
|
||||||
else:
|
else:
|
||||||
print('Drop parameter {}.'.format(k) + msg)
|
print('Drop parameter {}.'.format(k) + msg)
|
||||||
|
|
|
@ -639,7 +639,7 @@ def dla169up(classes, pretrained_base=None, **kwargs):
|
||||||
return model
|
return model
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def get_pose_net(num_layers, heads, add_conv=256, down_ratio=4):
|
def get_pose_net(num_layers, heads, head_conv=256, down_ratio=4):
|
||||||
model = DLASeg('dla{}'.format(num_layers), heads,
|
model = DLASeg('dla{}'.format(num_layers), heads,
|
||||||
pretrained=True,
|
pretrained=True,
|
||||||
down_ratio=down_ratio,
|
down_ratio=down_ratio,
|
||||||
|
|
Loading…
Reference in New Issue