optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9) optimizer = optim.Adam([var1, var2], lr = 0.0001)
因而,可以只向optimizer中传入不需要冻结的参数。
具体做法
将需要冻结的参数的requires_grad属性设置为False。
1 2 3 4 5 6 7
model_ft = models.resnet50(pretrained=True) #读入resnet50模型 ct = 0 for child in model_ft.children(): ct += 1 if ct < 7: for param in child.parameters(): param.requires_grad = False
for param in self.unet.module.backbone.parameters(): param.requires_grad = False self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.unet.module.parameters()), self.lr, [self.beta1, self.beta2])
训练特定epoch之后,解冻模型参数,将参数添加到优化器。
1 2 3
for param in self.unet.module.backbone.parameters(): param.requires_grad = True self.optimizer.add_param_group({'params':self.unet.module.backbone.parameters()})
print('%s is Successfully Loaded from %s' % (self.model_type, weight_path)) write_txt(self.save_path, '%s is Successfully Loaded from %s' % (self.model_type, weight_path)) else: raise FileNotFoundError("Can not find weight file in {}".format(weight_path))
for param in self.unet.module.backbone.parameters(): param.requires_grad = False self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.unet.module.parameters()), self.lr, [self.beta1, self.beta2]) if self.resume: for param in self.unet.module.backbone.parameters(): param.requires_grad = True self.optimizer.add_param_group({'params': self.unet.module.backbone.parameters()}) self.load_checkpoint()