Caffe2 - C++ API
A deep learning, cross platform ML framework
sigmoid_focal_loss_op.cc
1 
17 #include "sigmoid_focal_loss_op.h"
18 
19 namespace caffe2 {
20 
21 REGISTER_CPU_OPERATOR(SigmoidFocalLoss, SigmoidFocalLossOp<float, CPUContext>);
22 REGISTER_CPU_OPERATOR(
23  SigmoidFocalLossGradient,
24  SigmoidFocalLossGradientOp<float, CPUContext>);
25 
26 OPERATOR_SCHEMA(SigmoidFocalLoss)
27  .NumInputs(3)
28  .NumOutputs(1)
29  .SetDoc(R"DOC(
30 The binary form of Focal Loss designed for use in RetinaNet-like models.
31 The input is assumed to be unnormalized scores (sometimes called 'logits')
32 arranged in a 4D tensor with shape (N, C, H, W), where N is the number of
33 elements in the batch, H and W are the height and width, and C = num_anchors *
34 num_classes defines num_anchors 'groups' of logits, each of length
35 num_classes. For the binary form of Focal Loss, num_classes does not include
36 the background category. (So, for COCO, num_classes = 80, not 81.)
37 
38 The binary form of focal loss is:
39 
40  FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t),
41 
42 where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0,
43 respectively.
44 
45 See: https://arxiv.org/abs/1708.02002 for details.
46 )DOC")
47  .Arg(
48  "scale",
49  "(float) default 1.0; multiply the loss by this scale factor.")
50  .Arg(
51  "alpha",
52  "(float) default 0.25; Focal Loss's alpha hyper-parameter.")
53  .Arg(
54  "gamma",
55  "(float) default 1.0; Focal Loss's gamma hyper-parameter.")
56  .Arg(
57  "num_classes",
58  "(int) default 80; number of classes (excluding background).")
59  .Input(
60  0,
61  "logits",
62  "4D tensor of sigmoid inputs (called 'scores' or 'logits') with shape "
63  "(N, C, H, W), where C = num_anchors * num_classes.")
64  .Input(
65  1,
66  "labels",
67  "4D tensor of labels with shape (N, num_anchors, H, W). Each entry is "
68  "a class label in [0, num_classes - 1] (inclusive). The label "
69  "identifies the one class that should have a sigmoid target of 1.")
70  .Input(
71  2,
72  "normalizer",
73  "Scalar; the loss is normalized by 1 / max(1, normalizer)."
74  )
75  .Output(
76  0,
77  "loss",
78  "Scalar loss.");
79 
80 OPERATOR_SCHEMA(SigmoidFocalLossGradient)
81  .NumInputs(4)
82  .NumOutputs(1)
83  .Input(
84  0,
85  "logits",
86  "See SigmoidFocalLoss.")
87  .Input(
88  1,
89  "labels",
90  "See SigmoidFocalLoss.")
91  .Input(
92  2,
93  "normalizer",
94  "See SigmoidFocalLoss.")
95  .Input(
96  3,
97  "d_loss",
98  "Gradient of forward output 0 (loss)")
99  .Output(
100  0,
101  "d_logits",
102  "Gradient of forward input 0 (logits)");
103 
105  using GradientMakerBase::GradientMakerBase;
106 
107  vector<OperatorDef> GetGradientDefs() override {
108  vector<string> blob_names{
109  {I(0), I(1), I(2), GO(0)},
110  };
111 
112  return SingleGradientDef(
113  "SigmoidFocalLossGradient", "", blob_names, vector<string>{GI(0)});
114  }
115 };
116 
117 REGISTER_GRADIENT(SigmoidFocalLoss, GetSigmoidFocalLossGradient);
118 
119 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...