1 #include "caffe2/core/operator_gradient.h" 2 #include "caffe2/operators/reduce_front_back_sum_mean_ops.h" 13 void SumReduceDimsOp<CPUContext, true, false>::Compute(
17 const int32_t* lengths_data,
19 for (
int j = 0; j < cols; j++) {
21 int length = lengths_data ==
nullptr ? rows : lengths_data[j];
22 for (
int i = 1; i < length; i++) {
23 sum += in_data[i * cols + j];
32 void SumReduceDimsOp<CPUContext, false, false>::Compute(
36 const int32_t* lengths_data,
38 for (
int i = 0; i < rows; i++) {
39 int offset = i * cols;
40 T sum = in_data[offset];
41 int length = lengths_data ==
nullptr ? cols : lengths_data[i];
42 for (
int j = 1; j < length; j++) {
43 sum += in_data[offset + j];
52 void SumReduceDimsGradientOp<CPUContext, true, false>::Compute(
56 const int* lengths_data,
58 for (
int i = 0; i < rows * cols; i++) {
61 if (lengths_data ==
nullptr || row < lengths_data[col]) {
62 dXdata[i] = dYdata[col];
72 void SumReduceDimsGradientOp<CPUContext, false, false>::Compute(
76 const int* lengths_data,
78 for (
int i = 0; i < rows * cols; i++) {
81 if (lengths_data ==
nullptr || col < lengths_data[row]) {
82 dXdata[i] = dYdata[row];
89 REGISTER_CPU_OPERATOR(ReduceFrontSum, SumReduceDimsOp<CPUContext, true, false>);
90 REGISTER_CPU_OPERATOR(
91 ReduceFrontSumGradient,
92 SumReduceDimsGradientOp<CPUContext, true, false>);
95 using GradientMakerBase::GradientMakerBase;
96 vector<OperatorDef> GetGradientDefs()
override {
97 vector<string> grad_in = {GO(0), I(0)};
98 if (def_.input_size() == 2) {
99 grad_in.push_back(I(1));
102 "ReduceFrontSumGradient",
"", grad_in, vector<string>{GI(0)});
109 REGISTER_CPU_OPERATOR(
110 ReduceBackSumGradient,
114 using GradientMakerBase::GradientMakerBase;
115 vector<OperatorDef> GetGradientDefs()
override {
116 vector<string> grad_in = {GO(0), I(0)};
117 if (def_.input_size() == 2) {
118 grad_in.push_back(I(1));
121 "ReduceBackSumGradient",
"", grad_in, vector<string>{GI(0)});
127 #define REDUCTION_OP_SHAPE_INFERENCE(is_front_reducer) \ 128 CAFFE_ENFORCE_LE(1, in.size()); \ 129 CAFFE_ENFORCE_GE(2, in.size()); \ 130 ArgumentHelper helper(def); \ 131 int num_reduce_dims = helper.GetSingleArgument<int>("num_reduce_dim", 1); \ 132 int start_index = is_front_reducer ? num_reduce_dims : 0; \ 133 int end_index = is_front_reducer ? in[0].dims_size() \ 134 : in[0].dims_size() - num_reduce_dims; \ 135 vector<int> output_shape; \ 136 for (int i = start_index; i < end_index; ++i) { \ 137 output_shape.push_back(in[0].dims(i)); \ 139 return vector<TensorShape>{ \ 140 CreateTensorShape(output_shape, in[0].data_type())}; 142 OPERATOR_SCHEMA(ReduceFrontSum)
147 "(*int*): number of dimensions to reduce (default=1)")
149 Reduces the input tensor along the last dimension of the by applying **sum**. 151 Can reduce more than one of the "first" dimensions by setting `num_reduce_dim`. 153 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation. 154 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$. 155 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension. 157 For example, if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [sum(1,4), sum(5,1,7), sum(2), sum(9,2)] = [2.5, 4.333, 2, 5.5]$ 160 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc 164 <summary> <b>Example</b> </summary> 170 workspace.ResetWorkspace() 172 op = core.CreateOperator( 179 workspace.FeedBlob("X", np.random.randint(10, size=(2,3,3)).astype(np.float32)) 180 print("X:", workspace.FetchBlob("X")) 181 workspace.RunOperatorOnce(op) 182 print("Y:", workspace.FetchBlob("Y")) 205 .Input(0, "X",
"(*Tensor`<float>`*): input tensor")
206 .Input(1,
"lengths",
"(*Tensor`<int>`*): number of elements in each sample")
207 .Output(0,
"Y",
"(*Tensor`<float>`*): reduced tensor")
208 .TensorInferenceFunction([](
const OperatorDef& def,
209 const vector<TensorShape>& in) {
210 REDUCTION_OP_SHAPE_INFERENCE(
true)
212 OPERATOR_SCHEMA(ReduceFrontSumGradient).NumInputs(2, 3).NumOutputs(1);
214 OPERATOR_SCHEMA(ReduceBackSum)
219 "(*int*): number of dimensions to reduce (default=1)")
221 Reduces the input tensor along the last dimension of the by applying **sum**. 223 Can reduce more than one of the "last" dimensions by setting `num_reduce_dim`. 225 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation. 226 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$. 227 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension. 229 For example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [sum(1,5), sum(4,1,8), sum(2)] = [6, 13, 2]$ 233 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc 237 <summary> <b>Example</b> </summary> 243 workspace.ResetWorkspace() 245 op = core.CreateOperator( 252 workspace.FeedBlob("X", np.random.randint(10, size=(1,2,3,3)).astype(np.float32)) 253 print("X:", workspace.FetchBlob("X")) 254 workspace.RunOperatorOnce(op) 255 print("Y:", workspace.FetchBlob("Y")) 278 .Input(0, "X",
"(*Tensor`<float>`*): input tensor")
279 .Input(1,
"lengths",
"(*Tensor`<int>`*): number of elements in each sample")
280 .Output(0,
"Y",
"(*Tensor`<float>`*): reduced tensor")
281 .TensorInferenceFunction([](
const OperatorDef& def,
282 const vector<TensorShape>& in) {
283 REDUCTION_OP_SHAPE_INFERENCE(
false)
285 OPERATOR_SCHEMA(ReduceBackSumGradient).NumInputs(2, 3).NumOutputs(1);
287 #undef REDUCTION_OP_SHAPE_INFERENCE
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...