Caffe2 - Python API
A deep learning, cross platform ML framework
sparse_adam.py
1 import math
2 import torch
3 from .optimizer import Optimizer
4 
5 
6 class SparseAdam(Optimizer):
7  r"""Implements lazy version of Adam algorithm suitable for sparse tensors.
8 
9  In this variant, only moments that show up in the gradient get updated, and
10  only those portions of the gradient get applied to the parameters.
11 
12  Arguments:
13  params (iterable): iterable of parameters to optimize or dicts defining
14  parameter groups
15  lr (float, optional): learning rate (default: 1e-3)
16  betas (Tuple[float, float], optional): coefficients used for computing
17  running averages of gradient and its square (default: (0.9, 0.999))
18  eps (float, optional): term added to the denominator to improve
19  numerical stability (default: 1e-8)
20 
21  .. _Adam\: A Method for Stochastic Optimization:
22  https://arxiv.org/abs/1412.6980
23  """
24 
25  def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8):
26  if not 0.0 < lr:
27  raise ValueError("Invalid learning rate: {}".format(lr))
28  if not 0.0 < eps:
29  raise ValueError("Invalid epsilon value: {}".format(eps))
30  if not 0.0 <= betas[0] < 1.0:
31  raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
32  if not 0.0 <= betas[1] < 1.0:
33  raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
34  defaults = dict(lr=lr, betas=betas, eps=eps)
35  super(SparseAdam, self).__init__(params, defaults)
36 
37  def step(self, closure=None):
38  """Performs a single optimization step.
39 
40  Arguments:
41  closure (callable, optional): A closure that reevaluates the model
42  and returns the loss.
43  """
44  loss = None
45  if closure is not None:
46  loss = closure()
47 
48  for group in self.param_groups:
49  for p in group['params']:
50  if p.grad is None:
51  continue
52  grad = p.grad.data
53  if not grad.is_sparse:
54  raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
55 
56  state = self.state[p]
57 
58  # State initialization
59  if len(state) == 0:
60  state['step'] = 0
61  # Exponential moving average of gradient values
62  state['exp_avg'] = torch.zeros_like(p.data)
63  # Exponential moving average of squared gradient values
64  state['exp_avg_sq'] = torch.zeros_like(p.data)
65 
66  state['step'] += 1
67 
68  grad = grad.coalesce() # the update is non-linear so indices must be unique
69  grad_indices = grad._indices()
70  grad_values = grad._values()
71  size = grad.size()
72 
73  def make_sparse(values):
74  constructor = grad.new
75  if grad_indices.dim() == 0 or values.dim() == 0:
76  return constructor().resize_as_(grad)
77  return constructor(grad_indices, values, size)
78 
79  exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
80  beta1, beta2 = group['betas']
81 
82  # Decay the first and second moment running average coefficient
83  # old <- b * old + (1 - b) * new
84  # <==> old += (1 - b) * (new - old)
85  old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
86  exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
87  exp_avg.add_(make_sparse(exp_avg_update_values))
88  old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
89  exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
90  exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
91 
92  # Dense addition again is intended, avoiding another sparse_mask
93  numer = exp_avg_update_values.add_(old_exp_avg_values)
94  exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
95  denom = exp_avg_sq_update_values.sqrt_().add_(group['eps'])
96  del exp_avg_update_values, exp_avg_sq_update_values
97 
98  bias_correction1 = 1 - beta1 ** state['step']
99  bias_correction2 = 1 - beta2 ** state['step']
100  step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
101 
102  p.data.add_(make_sparse(-step_size * numer.div_(denom)))
103 
104  return loss
def step(self, closure=None)
Definition: sparse_adam.py:37