Caffe2 - C++ API
A deep learning, cross platform ML framework
reduce_front_back_sum_ops.cc
1 #include "caffe2/core/operator_gradient.h"
2 #include "caffe2/operators/reduce_front_back_sum_mean_ops.h"
3 
4 namespace caffe2 {
5 
6 /***
7  Sum Ops
8 ***/
9 
10 // ReduceFrontSum: columnwise sum
11 template <>
12 template <typename T>
13 void SumReduceDimsOp<CPUContext, true, false>::Compute(
14  int rows,
15  int cols,
16  const T* in_data,
17  const int32_t* lengths_data,
18  T* out_data) {
19  for (int j = 0; j < cols; j++) {
20  T sum = in_data[j];
21  int length = lengths_data == nullptr ? rows : lengths_data[j];
22  for (int i = 1; i < length; i++) {
23  sum += in_data[i * cols + j];
24  }
25  out_data[j] = sum;
26  }
27 }
28 
29 // ReduceBackSum: rowwise sum
30 template <>
31 template <typename T>
32 void SumReduceDimsOp<CPUContext, false, false>::Compute(
33  int rows,
34  int cols,
35  const T* in_data,
36  const int32_t* lengths_data,
37  T* out_data) {
38  for (int i = 0; i < rows; i++) {
39  int offset = i * cols;
40  T sum = in_data[offset];
41  int length = lengths_data == nullptr ? cols : lengths_data[i];
42  for (int j = 1; j < length; j++) {
43  sum += in_data[offset + j];
44  }
45  out_data[i] = sum;
46  }
47 }
48 
49 // ReduceFrontSumGradient
50 template <>
51 template <typename T>
52 void SumReduceDimsGradientOp<CPUContext, true, false>::Compute(
53  int rows,
54  int cols,
55  const T* dYdata,
56  const int* lengths_data,
57  T* dXdata) {
58  for (int i = 0; i < rows * cols; i++) {
59  int row = i / cols;
60  int col = i % cols;
61  if (lengths_data == nullptr || row < lengths_data[col]) {
62  dXdata[i] = dYdata[col];
63  } else {
64  dXdata[i] = 0;
65  }
66  }
67 }
68 
69 // ReduceBackSumGradient
70 template <>
71 template <typename T>
72 void SumReduceDimsGradientOp<CPUContext, false, false>::Compute(
73  int rows,
74  int cols,
75  const T* dYdata,
76  const int* lengths_data,
77  T* dXdata) {
78  for (int i = 0; i < rows * cols; i++) {
79  int row = i / cols;
80  int col = i % cols;
81  if (lengths_data == nullptr || col < lengths_data[row]) {
82  dXdata[i] = dYdata[row];
83  } else {
84  dXdata[i] = 0;
85  }
86  }
87 }
88 
89 REGISTER_CPU_OPERATOR(ReduceFrontSum, SumReduceDimsOp<CPUContext, true, false>);
90 REGISTER_CPU_OPERATOR(
91  ReduceFrontSumGradient,
92  SumReduceDimsGradientOp<CPUContext, true, false>);
93 
95  using GradientMakerBase::GradientMakerBase;
96  vector<OperatorDef> GetGradientDefs() override {
97  vector<string> grad_in = {GO(0), I(0)};
98  if (def_.input_size() == 2) {
99  grad_in.push_back(I(1));
100  }
101  return SingleGradientDef(
102  "ReduceFrontSumGradient", "", grad_in, vector<string>{GI(0)});
103  }
104 };
105 
106 REGISTER_GRADIENT(ReduceFrontSum, GetReduceFrontSumGradient);
107 
108 REGISTER_CPU_OPERATOR(ReduceBackSum, SumReduceDimsOp<CPUContext, false, false>);
109 REGISTER_CPU_OPERATOR(
110  ReduceBackSumGradient,
112 
114  using GradientMakerBase::GradientMakerBase;
115  vector<OperatorDef> GetGradientDefs() override {
116  vector<string> grad_in = {GO(0), I(0)};
117  if (def_.input_size() == 2) {
118  grad_in.push_back(I(1));
119  }
120  return SingleGradientDef(
121  "ReduceBackSumGradient", "", grad_in, vector<string>{GI(0)});
122  }
123 };
124 
125 REGISTER_GRADIENT(ReduceBackSum, GetReduceBackSumGradient);
126 
127 #define REDUCTION_OP_SHAPE_INFERENCE(is_front_reducer) \
128  CAFFE_ENFORCE_LE(1, in.size()); \
129  CAFFE_ENFORCE_GE(2, in.size()); \
130  ArgumentHelper helper(def); \
131  int num_reduce_dims = helper.GetSingleArgument<int>("num_reduce_dim", 1); \
132  int start_index = is_front_reducer ? num_reduce_dims : 0; \
133  int end_index = is_front_reducer ? in[0].dims_size() \
134  : in[0].dims_size() - num_reduce_dims; \
135  vector<int> output_shape; \
136  for (int i = start_index; i < end_index; ++i) { \
137  output_shape.push_back(in[0].dims(i)); \
138  } \
139  return vector<TensorShape>{ \
140  CreateTensorShape(output_shape, in[0].data_type())};
141 
142 OPERATOR_SCHEMA(ReduceFrontSum)
143  .NumInputs(1, 2)
144  .NumOutputs(1)
145  .Arg(
146  "num_reduce_dims",
147  "(*int*): number of dimensions to reduce (default=1)")
148  .SetDoc(R"DOC(
149 Reduces the input tensor along the last dimension of the by applying **sum**.
150 
151 Can reduce more than one of the "first" dimensions by setting `num_reduce_dim`.
152 
153 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.
154 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.
155 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.
156 
157 For example, if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [sum(1,4), sum(5,1,7), sum(2), sum(9,2)] = [2.5, 4.333, 2, 5.5]$
158 
159 Github Links:
160 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc
161 
162 <details>
163 
164 <summary> <b>Example</b> </summary>
165 
166 **Code**
167 
168 ```
169 
170 workspace.ResetWorkspace()
171 
172 op = core.CreateOperator(
173  "ReduceFrontSum",
174  ["X"],
175  ["Y"],
176  num_reduce_dim=2
177 )
178 
179 workspace.FeedBlob("X", np.random.randint(10, size=(2,3,3)).astype(np.float32))
180 print("X:", workspace.FetchBlob("X"))
181 workspace.RunOperatorOnce(op)
182 print("Y:", workspace.FetchBlob("Y"))
183 
184 ```
185 
186 **Result**
187 
188 ```
189 
190 X:
191 [[[4. 1. 1.]
192  [0. 6. 7.]
193  [7. 8. 6.]]
194 
195  [[5. 7. 7.]
196  [0. 1. 6.]
197  [2. 9. 0.]]]
198 Y: [18. 32. 27.]
199 
200 ```
201 
202 </details>
203 
204 )DOC")
205  .Input(0, "X", "(*Tensor`<float>`*): input tensor")
206  .Input(1, "lengths", "(*Tensor`<int>`*): number of elements in each sample")
207  .Output(0, "Y", "(*Tensor`<float>`*): reduced tensor")
208  .TensorInferenceFunction([](const OperatorDef& def,
209  const vector<TensorShape>& in) {
210  REDUCTION_OP_SHAPE_INFERENCE(true)
211  });
212 OPERATOR_SCHEMA(ReduceFrontSumGradient).NumInputs(2, 3).NumOutputs(1);
213 
214 OPERATOR_SCHEMA(ReduceBackSum)
215  .NumInputs(1, 2)
216  .NumOutputs(1)
217  .Arg(
218  "num_reduce_dims",
219  "(*int*): number of dimensions to reduce (default=1)")
220  .SetDoc(R"DOC(
221 Reduces the input tensor along the last dimension of the by applying **sum**.
222 
223 Can reduce more than one of the "last" dimensions by setting `num_reduce_dim`.
224 
225 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.
226 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.
227 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.
228 
229 For example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [sum(1,5), sum(4,1,8), sum(2)] = [6, 13, 2]$
230 
231 
232 Github Links:
233 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc
234 
235 <details>
236 
237 <summary> <b>Example</b> </summary>
238 
239 **Code**
240 
241 ```
242 
243 workspace.ResetWorkspace()
244 
245 op = core.CreateOperator(
246  "ReduceBackSum",
247  ["X"],
248  ["Y"],
249  num_reduce_dim=2
250 )
251 
252 workspace.FeedBlob("X", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))
253 print("X:", workspace.FetchBlob("X"))
254 workspace.RunOperatorOnce(op)
255 print("Y:", workspace.FetchBlob("Y"))
256 
257 ```
258 
259 **Result**
260 
261 ```
262 
263 X:
264 [[[[2. 7. 7.]
265  [1. 1. 0.]
266  [9. 7. 2.]]
267 
268  [[6. 6. 4.]
269  [1. 2. 6.]
270  [6. 6. 3.]]]]
271 Y: [[36. 40.]]
272 
273 ```
274 
275 </details>
276 
277 )DOC")
278  .Input(0, "X", "(*Tensor`<float>`*): input tensor")
279  .Input(1, "lengths", "(*Tensor`<int>`*): number of elements in each sample")
280  .Output(0, "Y", "(*Tensor`<float>`*): reduced tensor")
281  .TensorInferenceFunction([](const OperatorDef& def,
282  const vector<TensorShape>& in) {
283  REDUCTION_OP_SHAPE_INFERENCE(false)
284  });
285 OPERATOR_SCHEMA(ReduceBackSumGradient).NumInputs(2, 3).NumOutputs(1);
286 
287 #undef REDUCTION_OP_SHAPE_INFERENCE
288 
289 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...