114 lines
4.0 KiB
Python
114 lines
4.0 KiB
Python
"""
|
|
Layer-wise adaptive rate scaling for SGD in PyTorch!
|
|
Based on https://github.com/noahgolmant/pytorch-lars
|
|
"""
|
|
import torch
|
|
from torch.optim.optimizer import Optimizer
|
|
|
|
|
|
class LARS(Optimizer):
|
|
r"""Implements layer-wise adaptive rate scaling for SGD.
|
|
|
|
Args:
|
|
params (iterable): iterable of parameters to optimize or dicts defining
|
|
parameter groups
|
|
lr (float): base learning rate (\gamma_0)
|
|
momentum (float, optional): momentum factor (default: 0) ("m")
|
|
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
|
("\beta")
|
|
eta (float, optional): LARS coefficient
|
|
max_epoch: maximum training epoch to determine polynomial LR decay.
|
|
|
|
Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.
|
|
Large Batch Training of Convolutional Networks:
|
|
https://arxiv.org/abs/1708.03888
|
|
|
|
Example:
|
|
>>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)
|
|
>>> optimizer.zero_grad()
|
|
>>> loss_fn(model(input), target).backward()
|
|
>>> optimizer.step()
|
|
"""
|
|
|
|
def __init__(self, params, lr=1.0, momentum=0.9, weight_decay=0.0005, eta=0.001, max_epoch=200, warmup_epochs=1):
|
|
if lr < 0.0:
|
|
raise ValueError("Invalid learning rate: {}".format(lr))
|
|
if momentum < 0.0:
|
|
raise ValueError("Invalid momentum value: {}".format(momentum))
|
|
if weight_decay < 0.0:
|
|
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
|
if eta < 0.0:
|
|
raise ValueError("Invalid LARS coefficient value: {}".format(eta))
|
|
|
|
self.epoch = 0
|
|
defaults = dict(
|
|
lr=lr,
|
|
momentum=momentum,
|
|
weight_decay=weight_decay,
|
|
eta=eta,
|
|
max_epoch=max_epoch,
|
|
warmup_epochs=warmup_epochs,
|
|
use_lars=True,
|
|
)
|
|
super().__init__(params, defaults)
|
|
|
|
def step(self, epoch=None, closure=None):
|
|
"""Performs a single optimization step.
|
|
|
|
Arguments:
|
|
closure (callable, optional): A closure that reevaluates the model
|
|
and returns the loss.
|
|
epoch: current epoch to calculate polynomial LR decay schedule.
|
|
if None, uses self.epoch and increments it.
|
|
"""
|
|
loss = None
|
|
if closure is not None:
|
|
loss = closure()
|
|
|
|
if epoch is None:
|
|
epoch = self.epoch
|
|
self.epoch += 1
|
|
|
|
for group in self.param_groups:
|
|
weight_decay = group["weight_decay"]
|
|
momentum = group["momentum"]
|
|
eta = group["eta"]
|
|
lr = group["lr"]
|
|
warmup_epochs = group["warmup_epochs"]
|
|
use_lars = group["use_lars"]
|
|
group["lars_lrs"] = []
|
|
|
|
for p in group["params"]:
|
|
if p.grad is None:
|
|
continue
|
|
|
|
param_state = self.state[p]
|
|
d_p = p.grad.data
|
|
|
|
weight_norm = torch.norm(p.data)
|
|
grad_norm = torch.norm(d_p)
|
|
|
|
# Global LR computed on polynomial decay schedule
|
|
warmup = min((1 + float(epoch)) / warmup_epochs, 1)
|
|
global_lr = lr * warmup
|
|
|
|
# Update the momentum term
|
|
if use_lars:
|
|
# Compute local learning rate for this layer
|
|
local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm)
|
|
actual_lr = local_lr * global_lr
|
|
group["lars_lrs"].append(actual_lr.item())
|
|
else:
|
|
actual_lr = global_lr
|
|
group["lars_lrs"].append(global_lr)
|
|
|
|
if "momentum_buffer" not in param_state:
|
|
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
|
|
else:
|
|
buf = param_state["momentum_buffer"]
|
|
|
|
buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)
|
|
p.data.add_(-buf)
|
|
|
|
return loss
|