Caffe2 - C++ API
A deep learning, cross platform ML framework
adagrad_op.cc
1 #include "adagrad_op.h"
2 
3 namespace caffe2 {
4 
5 REGISTER_CPU_OPERATOR(Adagrad, AdagradOp<float, CPUContext>);
6 OPERATOR_SCHEMA(Adagrad)
7  .NumInputs(4)
8  .NumOutputs(2, 4)
9  .AllowInplace({{0, 0}, {1, 1}})
10  .SetDoc(R"DOC(
11 
12 Computes the AdaGrad update for an input gradient and accumulated
13 history. Concretely, given inputs (param, grad, moment, learning_rate),
14 computes
15 
16  new_moment = moment + square(grad)
17  effective_lr = learning_rate / (sqrt(new_moment) + epsilon)
18  update = learning_rate * grad / (sqrt(new_moment) + epsilon)
19  new_param = param + update
20 and returns (new_param, new_moment).
21 
22 Optionally returns effective_lr and update as well.
23 
24 )DOC")
25  .Input(0, "param", "Parameters to be updated")
26  .Input(1, "moment", "Moment history")
27  .Input(2, "grad", "Gradient computed")
28  .Input(3, "lr", "learning rate")
29  .Output(0, "output_param", "Updated parameters")
30  .Output(1, "output_moment", "Updated moment")
31  .Output(2, "output_effective_lr", "(optional) Effective learning rate")
32  .Output(3, "output_update", "(optional) Actual update that is applied.")
33 
34  .Arg("epsilon", "Default 1e-5")
35  .Arg(
36  "decay",
37  "Default 1. If it is in (0, 1), the gradient square sum "
38  "is decayed by this factor.");
39 
40 static OpSchema::Cost CostInferenceForSparseAdagrad(
41  const OperatorDef& /* unused */,
42  const vector<TensorShape>& inputs) {
43  CAFFE_ENFORCE_GE(
44  inputs.size(), 4, "SparseAdagrad requires at least 4 inputs");
45 
46  const TensorShape param = inputs[0];
47  const TensorShape moment = inputs[1];
48  const TensorShape indices = inputs[2];
49  const TensorShape grad = inputs[3];
50 
51  uint64_t n = nElemFromDim(indices);
52  uint64_t grad_size = nElemFromDim(grad);
53 
54  OpSchema::Cost c;
55  // See adagrad_op.h (note that decay is 1 for SparseAdagrad).
56  // 2 multiplications, 3 additions, 1 division, and 1 sqrt
57  // (optimistically count sqrt as one flop).
58  c.flops = grad_size * 7;
59  c.bytes_written =
60  grad_size * (sizeof(param.data_type()) + sizeof(moment.data_type()));
61  c.bytes_read = c.bytes_written + grad_size * sizeof(grad.data_type()) +
62  n * sizeof(indices.data_type());
63 
64  return c;
65 }
66 
67 REGISTER_CPU_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CPUContext>);
68 OPERATOR_SCHEMA(SparseAdagrad)
69  .NumInputs(5)
70  .NumOutputs(2)
71  .EnforceOneToOneInplace()
72  .SetDoc(R"DOC(
73 
74 Given inputs (param, moment, indices, grad, lr), runs the dense AdaGrad
75 update on (param, grad, moment[indices], lr), and returns (new_param,
76 new_moment) as in the dense case.
77 
78 )DOC")
79  .Input(0, "param", "Parameters to be updated")
80  .Input(1, "moment", "Moment history")
81  .Input(2, "indices", "Sparse indices")
82  .Input(3, "grad", "Gradient computed")
83  .Input(4, "lr", "learning rate")
84  .Output(0, "output_param", "Updated parameters")
85  .Output(1, "output_moment_1", "Updated moment")
86  .Arg("epsilon", "Default 1e-5")
87  .CostInferenceFunction(
88  OpSchema::CostInferenceFunctionType(CostInferenceForSparseAdagrad));
89 
90 REGISTER_CPU_OPERATOR(
91  RowWiseSparseAdagrad,
92  RowWiseSparseAdagradOp<float, CPUContext>);
93 OPERATOR_SCHEMA(RowWiseSparseAdagrad)
94  .NumInputs(5)
95  .NumOutputs(2)
96  .EnforceOneToOneInplace()
97  .SetDoc(R"DOC(
98 
99 Given inputs (param, moment, indices, grad, lr), runs a modified sparse Adagrad
100 update on (param, grad, moment[indices], lr), and returns (new_param,
101 new_momwnr), where moment is a 1D tensor with length equal to the number of
102 rows in param: shape(moment) == shape(param)[0]. Each element of moment is
103 applied to an entire row of param, and the new moment is calculated by adding
104 the average squared sum of gradients across each row. Note that indices must
105 also be a 1D tensor indexing into the rows of param.
106 
107 )DOC")
108  .Input(0, "param", "Parameters to be updated")
109  .Input(1, "moment", "Moment history")
110  .Input(2, "indices", "Sparse indices")
111  .Input(3, "grad", "Gradient computed")
112  .Input(4, "lr", "learning rate")
113  .Output(0, "output_param", "Updated parameters")
114  .Output(1, "output_moment_1", "Updated moment")
115  .Arg("epsilon", "Default 1e-5");
116 
117 SHOULD_NOT_DO_GRADIENT(Adagrad);
118 SHOULD_NOT_DO_GRADIENT(SparseAdagrad);
119 SHOULD_NOT_DO_GRADIENT(RowWiseSparseAdagrad);
120 }
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
std::function< struct Cost(const OperatorDef &, const vector< TensorShape > &)> CostInferenceFunctionType
Registers a function that takes in an OperatorDef and a series of input shapes and returns the total ...