2 from .optimizer
import Optimizer
6 """Implements Adagrad algorithm. 8 It has been proposed in `Adaptive Subgradient Methods for Online Learning 9 and Stochastic Optimization`_. 12 params (iterable): iterable of parameters to optimize or dicts defining 14 lr (float, optional): learning rate (default: 1e-2) 15 lr_decay (float, optional): learning rate decay (default: 0) 16 weight_decay (float, optional): weight decay (L2 penalty) (default: 0) 18 .. _Adaptive Subgradient Methods for Online Learning and Stochastic 19 Optimization: http://jmlr.org/papers/v12/duchi11a.html 22 def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0):
24 raise ValueError(
"Invalid learning rate: {}".format(lr))
25 if not 0.0 <= lr_decay:
26 raise ValueError(
"Invalid lr_decay value: {}".format(lr_decay))
27 if not 0.0 <= weight_decay:
28 raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
29 if not 0.0 <= initial_accumulator_value:
30 raise ValueError(
"Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
32 defaults = dict(lr=lr, lr_decay=lr_decay, weight_decay=weight_decay,
33 initial_accumulator_value=initial_accumulator_value)
34 super(Adagrad, self).__init__(params, defaults)
36 for group
in self.param_groups:
37 for p
in group[
'params']:
40 state[
'sum'] = torch.full_like(p.data, initial_accumulator_value)
42 def share_memory(self):
43 for group
in self.param_groups:
44 for p
in group[
'params']:
46 state[
'sum'].share_memory_()
48 def step(self, closure=None):
49 """Performs a single optimization step. 52 closure (callable, optional): A closure that reevaluates the model 56 if closure
is not None:
59 for group
in self.param_groups:
60 for p
in group[
'params']:
69 if group[
'weight_decay'] != 0:
70 if p.grad.data.is_sparse:
71 raise RuntimeError(
"weight_decay option is not compatible with sparse gradients")
72 grad = grad.add(group[
'weight_decay'], p.data)
74 clr = group[
'lr'] / (1 + (state[
'step'] - 1) * group[
'lr_decay'])
77 grad = grad.coalesce()
78 grad_indices = grad._indices()
79 grad_values = grad._values()
82 def make_sparse(values):
83 constructor = grad.new
84 if grad_indices.dim() == 0
or values.dim() == 0:
85 return constructor().resize_as_(grad)
86 return constructor(grad_indices, values, size)
87 state[
'sum'].add_(make_sparse(grad_values.pow(2)))
88 std = state[
'sum'].sparse_mask(grad)
89 std_values = std._values().sqrt_().add_(1e-10)
90 p.data.add_(-clr, make_sparse(grad_values / std_values))
92 state[
'sum'].addcmul_(1, grad, grad)
93 std = state[
'sum'].sqrt().add_(1e-10)
94 p.data.addcdiv_(-clr, grad, std)
def step(self, closure=None)