1 #include "caffe2/operators/leaky_relu_op.h" 3 #include "caffe2/utils/eigen_utils.h" 4 #include "caffe2/utils/math.h" 9 bool LeakyReluOp<float, CPUContext>::RunOnDevice() {
10 const auto& X = Input(0);
12 auto* Y = Output(0, X.sizes(), at::dtype<float>());
13 ConstEigenVectorMap<float> Xvec(X.template data<float>(), X.numel());
14 EigenVectorMap<float> Yvec(Y->template mutable_data<float>(), Y->numel());
15 Yvec = Xvec.cwiseMax(0.f) + Xvec.cwiseMin(0.f) * alpha_;
20 bool LeakyReluGradientOp<float, CPUContext>::RunOnDevice() {
21 const auto& Y = Input(0);
22 const auto& dY = Input(1);
24 auto* dX = Output(0, Y.sizes(), at::dtype<float>());
25 CAFFE_ENFORCE_EQ(Y.numel(), dY.numel());
26 ConstEigenVectorMap<float> Yvec(Y.template data<float>(), Y.numel());
27 ConstEigenVectorMap<float> dYvec(dY.template data<float>(), dY.numel());
28 EigenVectorMap<float> dXvec(dX->template mutable_data<float>(), dX->numel());
29 Eigen::VectorXf gtZero = (Yvec.array() >= 0.0f).cast<float>();
30 dXvec = dYvec.array() * gtZero.array() -
31 dYvec.array() * (gtZero.array() - 1.0f) * alpha_;
35 REGISTER_CPU_OPERATOR(LeakyRelu, LeakyReluOp<float, CPUContext>);
36 REGISTER_CPU_OPERATOR(
38 LeakyReluGradientOp<float, CPUContext>);
40 OPERATOR_SCHEMA(LeakyRelu)
43 .Arg(
"alpha",
"*(type: float; default: 0.01)* Coefficient of leakage.")
44 .AllowInplace({{0, 0}})
45 .CostInferenceFunction(PointwiseCostInference<2>)
46 .IdenticalTypeAndShape()
48 The *LeakyRelu* op takes one input tensor $X$ and an argument $alpha$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise leaky relu operation, defined as 50 $$y=LeakyRelu(x) =\begin{cases}\alpha x & x < 0\\x & otherwise\end{cases}$$ 52 The default value of *alpha* is 0.01. 56 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.h 57 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.cc 62 <summary> <b>Example</b> </summary> 68 workspace.ResetWorkspace() 70 op = core.CreateOperator( 77 workspace.FeedBlob("X", np.random.randn(3, 3).astype(np.float32)) 78 print("X:\n", workspace.FetchBlob("X"), "\n") 80 workspace.RunOperatorOnce(op) 81 print("Y:\n", workspace.FetchBlob("Y")) 90 [[-0.91060215 0.09374836 2.1429708 ] 91 [-0.748983 0.19164062 -1.5130422 ] 92 [-0.29539835 -0.8530696 0.7673204 ]] 95 [[-0.00910602 0.09374836 2.1429708 ] 96 [-0.00748983 0.19164062 -0.01513042] 97 [-0.00295398 -0.0085307 0.7673204 ]] 105 .Input(0, "X",
"Input tensor of data to be operated on.")
106 .Output(0,
"Y",
"Output tensor, calculated as described above.");
108 OPERATOR_SCHEMA(LeakyReluGradient)
111 .AllowInplace({{1, 0}})
112 .Arg(
"alpha",
"Coefficient of leakage")
113 .InheritOnnxSchema();
116 using GradientMakerBase::GradientMakerBase;
117 vector<OperatorDef> GetGradientDefs()
override {
121 vector<string>{O(0), GO(0)},
122 vector<string>{GI(0)});
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...