1 #include "caffe2/operators/hard_sigmoid_op.h" 7 #include "caffe2/utils/eigen_utils.h" 13 bool HardSigmoidFunctor<CPUContext>::
14 operator()(
const int N,
const T* X,
T* Y, CPUContext* )
const {
15 EigenVectorArrayMap<T>(Y, N) =
16 (ConstEigenVectorArrayMap<T>(X, N) *
T(alpha) +
T(beta))
24 bool HardSigmoidGradientFunctor<CPUContext>::Forward(
25 const std::vector<int>& Y_dims,
26 const std::vector<int>& ,
31 const int size = std::accumulate(
32 Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
33 ConstEigenVectorArrayMap<T> Y_arr(Y, size);
34 EigenVectorArrayMap<T>(dX, size) =
35 (Y_arr >
T(0) && Y_arr <
T(1))
36 .select(ConstEigenVectorArrayMap<T>(dY, size) * alpha,
T(0));
42 OpSchema::Cost CostInferenceForHardSigmoid(
43 const OperatorDef& def,
44 const vector<TensorShape>& in) {
45 struct OpSchema::Cost cost = PointwiseCostInference<4>(def, in);
46 cost.params_bytes = 0;
52 REGISTER_CPU_OPERATOR(
54 UnaryElementwiseWithArgsOp<
57 HardSigmoidFunctor<CPUContext>>);
58 REGISTER_CPU_OPERATOR(
60 BinaryElementwiseWithArgsOp<
63 HardSigmoidGradientFunctor<CPUContext>>);
66 OPERATOR_SCHEMA(HardSigmoid)
69 .AllowInplace({{0, 0}})
70 .CostInferenceFunction(CostInferenceForHardSigmoid)
71 .IdenticalTypeAndShape()
73 Applies hard sigmoid operation to the input data element-wise. 74 The HardSigmoid operation takes one input $X$, produces one output $Y$, and is defined as: 76 $$Y = max(0,min(1,x * alpha + beta))$$ 79 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.h 80 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.cc 84 <summary> <b>Example</b> </summary> 90 workspace.ResetWorkspace() 92 op = core.CreateOperator( 100 workspace.FeedBlob("X", np.random.randn(5).astype(np.float32)) 101 print("input:", workspace.FetchBlob("X")) 102 workspace.RunOperatorOnce(op) 103 print("sigmoid:", workspace.FetchBlob("Y")) 111 input: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ] 112 hard_sigmoid: [ 0.81488073, 0.56326419, 0.85684538, 0.78901446, 0.06546044] 120 .Arg("alpha",
"float: the slope of the function. Defaults to 0.2")
121 .Arg(
"beta",
"float: the bias value of the function. Defaults to 0.5")
122 .Input(0,
"X",
"1D input tensor")
123 .Output(0,
"Y",
"1D output tensor with same shape as input")
124 .InheritOnnxSchema();
127 OPERATOR_SCHEMA(HardSigmoidGradient)
130 .AllowInplace({{1, 0}})
132 HardSigmoidGradient takes both Y and dY as well as an argument alpha and uses 133 this to update dX according to the chain rule and derivatives of the hard 139 class GetHardSigmoidGradient :
public GradientMakerBase {
140 using GradientMakerBase::GradientMakerBase;
141 std::vector<OperatorDef> GetGradientDefs()
override {
142 return SingleGradientDef(
143 def_.type() +
"Gradient",
145 std::vector<std::string>{O(0), GO(0)},
146 std::vector<std::string>{GI(0)});
152 REGISTER_GRADIENT(HardSigmoid, GetHardSigmoidGradient);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...