3 from .optimizer
import Optimizer
7 r"""Implements lazy version of Adam algorithm suitable for sparse tensors. 9 In this variant, only moments that show up in the gradient get updated, and 10 only those portions of the gradient get applied to the parameters. 13 params (iterable): iterable of parameters to optimize or dicts defining 15 lr (float, optional): learning rate (default: 1e-3) 16 betas (Tuple[float, float], optional): coefficients used for computing 17 running averages of gradient and its square (default: (0.9, 0.999)) 18 eps (float, optional): term added to the denominator to improve 19 numerical stability (default: 1e-8) 21 .. _Adam\: A Method for Stochastic Optimization: 22 https://arxiv.org/abs/1412.6980 25 def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8):
27 raise ValueError(
"Invalid learning rate: {}".format(lr))
29 raise ValueError(
"Invalid epsilon value: {}".format(eps))
30 if not 0.0 <= betas[0] < 1.0:
31 raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
32 if not 0.0 <= betas[1] < 1.0:
33 raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
34 defaults = dict(lr=lr, betas=betas, eps=eps)
35 super(SparseAdam, self).__init__(params, defaults)
37 def step(self, closure=None):
38 """Performs a single optimization step. 41 closure (callable, optional): A closure that reevaluates the model 45 if closure
is not None:
48 for group
in self.param_groups:
49 for p
in group[
'params']:
53 if not grad.is_sparse:
54 raise RuntimeError(
'SparseAdam does not support dense gradients, please consider Adam instead')
62 state[
'exp_avg'] = torch.zeros_like(p.data)
64 state[
'exp_avg_sq'] = torch.zeros_like(p.data)
68 grad = grad.coalesce()
69 grad_indices = grad._indices()
70 grad_values = grad._values()
73 def make_sparse(values):
74 constructor = grad.new
75 if grad_indices.dim() == 0
or values.dim() == 0:
76 return constructor().resize_as_(grad)
77 return constructor(grad_indices, values, size)
79 exp_avg, exp_avg_sq = state[
'exp_avg'], state[
'exp_avg_sq']
80 beta1, beta2 = group[
'betas']
85 old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
86 exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
87 exp_avg.add_(make_sparse(exp_avg_update_values))
88 old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
89 exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
90 exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
93 numer = exp_avg_update_values.add_(old_exp_avg_values)
94 exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
95 denom = exp_avg_sq_update_values.sqrt_().add_(group[
'eps'])
96 del exp_avg_update_values, exp_avg_sq_update_values
98 bias_correction1 = 1 - beta1 ** state[
'step']
99 bias_correction2 = 1 - beta2 ** state[
'step']
100 step_size = group[
'lr'] * math.sqrt(bias_correction2) / bias_correction1
102 p.data.add_(make_sparse(-step_size * numer.div_(denom)))
def step(self, closure=None)