Caffe2 - C++ API
A deep learning, cross platform ML framework
smooth_l1_loss_op.cc
1 
17 #include "smooth_l1_loss_op.h"
18 
19 namespace caffe2 {
20 
21 REGISTER_CPU_OPERATOR(SmoothL1Loss, SmoothL1LossOp<float, CPUContext>);
22 REGISTER_CPU_OPERATOR(
23  SmoothL1LossGradient,
24  SmoothL1LossGradientOp<float, CPUContext>);
25 
26 OPERATOR_SCHEMA(SmoothL1Loss)
27  .NumInputs(4)
28  .NumOutputs(1)
29  .SetDoc(R"DOC(
30 Smooth L1 Loss is a minor variation of Huber loss in which the point of
31 transition between L2 loss and L1 loss is adjustable by a hyper-parameter beta:
32 
33  SmoothL1(x) = 0.5 * x^2 / beta if |x| < beta
34  |x| - 0.5 * beta otherwise.
35 
36 SmoothL1 is used in Fast R-CNN and decendants as the loss function for bounding
37 box regression.
38 
39 The loss computed by this op has a flexible form:
40 
41  scale / N * sum_i alpha_out[i] * SmoothL1(alpha_in[i] * (y_hat[i] - y[i])).
42 
43 The weights alpha_in and alpha_out are called the "inside" and "outside"
44 weights, respectively. The inside weights are typically set to either 0 or 1 to
45 implement ignoring (when 0) certain samples. The outside weights can be used
46 to implement a per-sample loss weight. The overall loss is scaled by scale / N,
47 where N is the number of batch elements in the input predictions.
48 )DOC")
49  .Arg(
50  "beta",
51  "(float) default 1.0; L2 to L1 transition point.")
52  .Arg(
53  "scale",
54  "(float) default 1.0; multiply the loss by this scale factor.")
55  .Input(
56  0,
57  "Y_hat",
58  "Tensor of predictions (at least 1D).")
59  .Input(
60  1,
61  "Y",
62  "Tensor of labels with the same shape as Y_hat.")
63  .Input(
64  2,
65  "alpha_in",
66  "Tensor of inside weights with the same shape as Y.")
67  .Input(
68  3,
69  "alpha_out",
70  "Tensor of outside weights with the same shape as Y.")
71  .Output(
72  0,
73  "loss",
74  "Scalar loss.");
75 
76 OPERATOR_SCHEMA(SmoothL1LossGradient)
77  .NumInputs(5)
78  .NumOutputs(1)
79  .Input(
80  0,
81  "Y_hat",
82  "See SmoothL1Loss.")
83  .Input(
84  1,
85  "Y",
86  "See SmoothL1Loss.")
87  .Input(
88  2,
89  "alpha_in",
90  "See SmoothL1Loss.")
91  .Input(
92  3,
93  "alpha_out",
94  "See SmoothL1Loss.")
95  .Input(
96  4,
97  "d_loss",
98  "Gradient of forward output 0 (loss).")
99  .Output(
100  0,
101  "d_Y_hat",
102  "Gradient of forward input 0 (Y_hat).");
103 
105  using GradientMakerBase::GradientMakerBase;
106  vector<OperatorDef> GetGradientDefs() override {
107  return SingleGradientDef(
108  "SmoothL1LossGradient",
109  "",
110  vector<string>{I(0), I(1), I(2), I(3), GO(0)},
111  vector<string>{GI(0)});
112  }
113 };
114 
115 REGISTER_GRADIENT(SmoothL1Loss, GetSmoothL1LossGradient);
116 
117 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...