17 #include "softmax_focal_loss_op.h" 18 #include "caffe2/operators/softmax_shared.h" 22 REGISTER_CPU_OPERATOR(SoftmaxFocalLoss, SoftmaxFocalLossOp<float, CPUContext>);
23 REGISTER_CPU_OPERATOR(
24 SoftmaxFocalLossGradient,
25 SoftmaxFocalLossGradientOp<float, CPUContext>);
27 OPERATOR_SCHEMA(SoftmaxFocalLoss)
31 A multiclass form of Focal Loss designed for use in RetinaNet-like models. 32 The input is assumed to be unnormalized scores (sometimes called 'logits') 33 arranged in a 4D tensor with shape (N, C, H, W), where N is the number of 34 elements in the batch, H and W are the height and width, and C = num_anchors * 35 num_classes. The softmax is applied num_anchors times along the C axis. 37 The softmax version of focal loss is: 39 FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t), 41 where p_i = exp(s_i) / sum_j exp(s_j), t is the target (ground truth) class, and 42 s_j is the unnormalized score for class j. 44 See: https://arxiv.org/abs/1708.02002 for details. 48 "(float) default 1.0; multiply the loss by this scale factor.")
51 "(float) default 0.25; Focal Loss's alpha hyper-parameter.")
54 "(float) default 1.0; Focal Loss's gamma hyper-parameter.")
57 "(int) default 81; number of classes in each softmax group.")
61 "4D tensor of softmax inputs (called 'scores' or 'logits') with shape " 62 "(N, C, H, W), where C = num_anchors * num_classes defines num_anchors " 63 "groups of contiguous num_classes softmax inputs.")
67 "4D tensor of labels with shape (N, num_anchors, H, W). Each entry is " 68 "a class label in [0, num_classes - 1] (inclusive).")
72 "Scalar; the loss is normalized by 1 / max(1, normalizer)." 81 "4D tensor of softmax probabilities with shape (N, C, H, W), where " 82 "C = num_anchors * num_classes, and softmax was applied to each of the " 83 "num_anchors groups; within a group the num_classes values sum to 1.");
85 OPERATOR_SCHEMA(SoftmaxFocalLossGradient)
91 "See SoftmaxFocalLoss.")
95 "See SoftmaxFocalLoss.")
99 "See SoftmaxFocalLoss.")
103 "Output 1 from SoftmaxFocalLoss; See SoftmaxFocalLoss.")
107 "Gradient of forward output 0 (loss)")
111 "Gradient of forward input 0 (scores)");
114 using GradientMakerBase::GradientMakerBase;
115 vector<OperatorDef> GetGradientDefs()
override {
117 "SoftmaxFocalLossGradient",
119 vector<string>{I(0), I(1), I(2), O(1), GO(0)},
120 vector<string>{GI(0)});
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...