1 #include "caffe2/operators/softsign_op.h" 3 #include "caffe2/utils/eigen_utils.h" 12 bool SoftsignFunctor<CPUContext>::
13 operator()(
const int N,
const T* X,
T* Y, CPUContext* )
const {
14 ConstEigenVectorArrayMap<T> X_arr(X, N);
15 EigenVectorMap<T>(Y, N) = (
T(1) + X_arr.abs()).inverse() * X_arr;
21 bool SoftsignGradientFunctor<CPUContext>::Forward(
22 const std::vector<int>& X_dims,
23 const std::vector<int>& ,
28 const int size = std::accumulate(
29 X_dims.cbegin(), X_dims.cend(), 1, std::multiplies<int>());
30 ConstEigenVectorArrayMap<T> dY_arr(dY, size);
31 ConstEigenVectorArrayMap<T> X_arr(X, size);
32 EigenVectorMap<T>(dX, size) =
33 dY_arr * (
T(1) + X_arr.abs()).square().inverse();
37 REGISTER_CPU_OPERATOR(
42 SoftsignFunctor<CPUContext>>);
43 REGISTER_CPU_GRADIENT_OPERATOR(
48 SoftsignGradientFunctor<CPUContext>>);
50 OPERATOR_SCHEMA(Softsign)
53 .AllowInplace({{0, 0}})
54 .IdenticalTypeAndShape()
56 *Softsign* takes one input data tensor $X$ and produces one output data $Y,$ where the softsign function, $y = \frac{x}{1+ |x|}$, is applied to $X$ elementwise. This operation can be done in an in-place fashion too, by providing the same input and output blobs. 60 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softsign_op.cc 65 <summary> <b>Example</b> </summary> 71 workspace.ResetWorkspace() 73 op = core.CreateOperator( 79 workspace.FeedBlob("X", np.random.randn(3, 3).astype(np.float32)) 80 print("X:\n", workspace.FetchBlob("X"), "\n") 82 workspace.RunOperatorOnce(op) 83 print("Y:\n", workspace.FetchBlob("Y")) 92 [[-1.3060539 0.7242748 -1.9907674 ] 93 [-0.64802396 -0.03244735 0.7455406 ] 94 [-0.298492 -0.5774271 2.8364444 ]] 97 [[-0.5663588 0.420046 -0.6656376 ] 98 [-0.39321268 -0.03142761 0.4271116 ] 99 [-0.2298759 -0.36605626 0.739342 ]] 107 .Input(0, "input",
"Input data blob to be operated on.")
108 .Output(0,
"output",
"Output data blob with same shape as input")
109 .InheritOnnxSchema();
111 GRADIENT_OPERATOR_SCHEMA(SoftsignGradient)
114 .AllowInplace({{1, 0}})
116 Calculates the softsign gradient (sgn(x)/(1+|x|)^2) of the given input tensor 119 .Input(0, "input",
"1-D input tensor")
120 .Input(1,
"input",
"1-D input tensor")
124 "The softsign gradient (sgn(x)/(1+|x|)^2) values of the input tensor " 125 "computed element-wise");
129 class GetSoftsignGradient :
public GradientMakerBase {
130 using GradientMakerBase::GradientMakerBase;
131 std::vector<OperatorDef> GetGradientDefs()
override {
134 "Cannot compute softsign gradient " 135 "if you choose to do an in-place calculation.");
137 return SingleGradientDef(
140 std::vector<std::string>{I(0), GO(0)},
141 std::vector<std::string>{GI(0)});
147 REGISTER_GRADIENT(Softsign, GetSoftsignGradient);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...