Caffe2 - C++ API
A deep learning, cross platform ML framework
learning_rate_adaption_op.cc
1 #include "caffe2/sgd/learning_rate_adaption_op.h"
2 
3 namespace caffe2 {
4 
5 REGISTER_CPU_OPERATOR(
6  LearningRateAdaption,
7  LearningRateAdaptionOp<float, CPUContext>);
8 
9 OPERATOR_SCHEMA(LearningRateAdaption)
10  .NumInputs(3)
11  .NumOutputs(1)
12  .AllowInplace({{0, 0}})
13  .SetDoc(R"DOC(
14  Learning Rate Adaption is an operation that perform one iteration of
15  gradient descent based on learning rate:
16  lr(k) = lr(k-1) - lr_alpha * df(k-1)/dlr,
17  where df(k-1)/dlr is the gradient of objective function f on lr, and
18  lr_alpha is a learning rate hyperparameter. It can be prove that
19  df(k-1)/dlr equals INNERPRODUCT(grad(k-1), -grad(k-2)), where grad(k-1) is
20  the grad of f(k-1) on parameters. When the argument
21  "normalized_lr_adaption" is false, we simply perform the
22  following update:
23  lr(k) = lr(k-1) - lr_alpha * INNERPRODUCT(grad(k-1), grad(k-2)).
24  If we set "normalized_lr_adaption" to be true, we do not directly apply
25  INNERPRODUCT(grad(k-1), -grad(k-2)) as the grad. Instead, we perform the
26  following update:
27  lr(k) = lr(k-1) + lr_alpha * cosineSimilarity(grad(k-1), grad(k-2)).
28 )DOC")
29  .Arg(
30  "lr_alpha",
31  "the learning rate for performing gradient descent on learning rate lr")
32  .Arg(
33  "normalized_lr_adaption",
34  "whether to apply normalized lr adaption or not")
35  .Input(0, "lr", "Learning rate")
36  .Input(1, "grad", "Gradient computed")
37  .Input(2, "effgrad", "The effective grad")
38  .Output(0, "output_lr", "Updated learning rate");
39 
40 NO_GRADIENT(LearningRateAdaption);
41 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13