Caffe2 - C++ API
A deep learning, cross platform ML framework
softmax_focal_loss_op.cc
1 
17 #include "softmax_focal_loss_op.h"
18 #include "caffe2/operators/softmax_shared.h"
19 
20 namespace caffe2 {
21 
22 REGISTER_CPU_OPERATOR(SoftmaxFocalLoss, SoftmaxFocalLossOp<float, CPUContext>);
23 REGISTER_CPU_OPERATOR(
24  SoftmaxFocalLossGradient,
25  SoftmaxFocalLossGradientOp<float, CPUContext>);
26 
27 OPERATOR_SCHEMA(SoftmaxFocalLoss)
28  .NumInputs(3)
29  .NumOutputs(2)
30  .SetDoc(R"DOC(
31 A multiclass form of Focal Loss designed for use in RetinaNet-like models.
32 The input is assumed to be unnormalized scores (sometimes called 'logits')
33 arranged in a 4D tensor with shape (N, C, H, W), where N is the number of
34 elements in the batch, H and W are the height and width, and C = num_anchors *
35 num_classes. The softmax is applied num_anchors times along the C axis.
36 
37 The softmax version of focal loss is:
38 
39  FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t),
40 
41 where p_i = exp(s_i) / sum_j exp(s_j), t is the target (ground truth) class, and
42 s_j is the unnormalized score for class j.
43 
44 See: https://arxiv.org/abs/1708.02002 for details.
45 )DOC")
46  .Arg(
47  "scale",
48  "(float) default 1.0; multiply the loss by this scale factor.")
49  .Arg(
50  "alpha",
51  "(float) default 0.25; Focal Loss's alpha hyper-parameter.")
52  .Arg(
53  "gamma",
54  "(float) default 1.0; Focal Loss's gamma hyper-parameter.")
55  .Arg(
56  "num_classes",
57  "(int) default 81; number of classes in each softmax group.")
58  .Input(
59  0,
60  "scores",
61  "4D tensor of softmax inputs (called 'scores' or 'logits') with shape "
62  "(N, C, H, W), where C = num_anchors * num_classes defines num_anchors "
63  "groups of contiguous num_classes softmax inputs.")
64  .Input(
65  1,
66  "labels",
67  "4D tensor of labels with shape (N, num_anchors, H, W). Each entry is "
68  "a class label in [0, num_classes - 1] (inclusive).")
69  .Input(
70  2,
71  "normalizer",
72  "Scalar; the loss is normalized by 1 / max(1, normalizer)."
73  )
74  .Output(
75  0,
76  "loss",
77  "Scalar loss.")
78  .Output(
79  1,
80  "probabilities",
81  "4D tensor of softmax probabilities with shape (N, C, H, W), where "
82  "C = num_anchors * num_classes, and softmax was applied to each of the "
83  "num_anchors groups; within a group the num_classes values sum to 1.");
84 
85 OPERATOR_SCHEMA(SoftmaxFocalLossGradient)
86  .NumInputs(5)
87  .NumOutputs(1)
88  .Input(
89  0,
90  "scores",
91  "See SoftmaxFocalLoss.")
92  .Input(
93  1,
94  "labels",
95  "See SoftmaxFocalLoss.")
96  .Input(
97  2,
98  "normalizer",
99  "See SoftmaxFocalLoss.")
100  .Input(
101  3,
102  "probabilities",
103  "Output 1 from SoftmaxFocalLoss; See SoftmaxFocalLoss.")
104  .Input(
105  4,
106  "d_loss",
107  "Gradient of forward output 0 (loss)")
108  .Output(
109  0,
110  "d_scores",
111  "Gradient of forward input 0 (scores)");
112 
114  using GradientMakerBase::GradientMakerBase;
115  vector<OperatorDef> GetGradientDefs() override {
116  return SingleGradientDef(
117  "SoftmaxFocalLossGradient",
118  "",
119  vector<string>{I(0), I(1), I(2), O(1), GO(0)},
120  vector<string>{GI(0)});
121  }
122 };
123 
124 REGISTER_GRADIENT(SoftmaxFocalLoss, GetSoftmaxFocalLossGradient);
125 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...