Caffe2 - C++ API
A deep learning, cross platform ML framework
reduction_front_back_ops.cc
1 
17 #include "caffe2/operators/reduction_front_back_ops.h"
18 #include "caffe2/core/operator_gradient.h"
19 
20 namespace caffe2 {
21 
22 /***
23  Sum Ops
24 ***/
25 
26 // ReduceFrontSum: columnwise sum
27 template <>
28 template <typename T>
29 void SumReduceDimsOp<CPUContext, true, false>::Compute(
30  int rows,
31  int cols,
32  const T* in_data,
33  T* out_data) {
34  for (int j = 0; j < cols; j++) {
35  T sum = in_data[j];
36  for (int i = 1; i < rows; i++) {
37  sum += in_data[i * cols + j];
38  }
39  out_data[j] = sum;
40  }
41 }
42 
43 // ReduceBackSum: rowwise sum
44 template <>
45 template <typename T>
46 void SumReduceDimsOp<CPUContext, false, false>::Compute(
47  int rows,
48  int cols,
49  const T* in_data,
50  T* out_data) {
51  for (int i = 0; i < rows; i++) {
52  int offset = i * cols;
53  T sum = in_data[offset];
54  for (int j = 1; j < cols; j++) {
55  sum += in_data[offset + j];
56  }
57  out_data[i] = sum;
58  }
59 }
60 
61 // ReduceFrontSumGradient
62 template <>
63 template <typename T>
64 void SumReduceDimsGradientOp<CPUContext, true, false>::Compute(
65  int rows,
66  int cols,
67  const T* dYdata,
68  T* dXdata) {
69  for (int i = 0; i < rows * cols; i++) {
70  dXdata[i] = dYdata[i % cols];
71  }
72 }
73 
74 // ReduceBackSumGradient
75 template <>
76 template <typename T>
77 void SumReduceDimsGradientOp<CPUContext, false, false>::Compute(
78  int rows,
79  int cols,
80  const T* dYdata,
81  T* dXdata) {
82  for (int i = 0; i < rows * cols; i++) {
83  dXdata[i] = dYdata[i / cols];
84  }
85 }
86 
87 REGISTER_CPU_OPERATOR(ReduceFrontSum, SumReduceDimsOp<CPUContext, true, false>);
88 REGISTER_CPU_OPERATOR(
89  ReduceFrontSumGradient,
90  SumReduceDimsGradientOp<CPUContext, true, false>);
91 
93  using GradientMakerBase::GradientMakerBase;
94  vector<OperatorDef> GetGradientDefs() override {
95  return SingleGradientDef(
96  "ReduceFrontSumGradient",
97  "",
98  vector<string>{GO(0), I(0)},
99  vector<string>{GI(0)});
100  }
101 };
102 
103 REGISTER_GRADIENT(ReduceFrontSum, GetReduceFrontSumGradient);
104 
105 REGISTER_CPU_OPERATOR(ReduceBackSum, SumReduceDimsOp<CPUContext, false, false>);
106 REGISTER_CPU_OPERATOR(
107  ReduceBackSumGradient,
109 
111  using GradientMakerBase::GradientMakerBase;
112  vector<OperatorDef> GetGradientDefs() override {
113  return SingleGradientDef(
114  "ReduceBackSumGradient",
115  "",
116  vector<string>{GO(0), I(0)},
117  vector<string>{GI(0)});
118  }
119 };
120 
121 REGISTER_GRADIENT(ReduceBackSum, GetReduceBackSumGradient);
122 
123 #define REDUCTION_OP_SHAPE_INFERENCE(is_front_reducer) \
124  CAFFE_ENFORCE_EQ(1, in.size()); \
125  ArgumentHelper helper(def); \
126  int num_reduce_dims = helper.GetSingleArgument<int>("num_reduce_dim", 1); \
127  int start_index = is_front_reducer ? num_reduce_dims : 0; \
128  int end_index = is_front_reducer ? in[0].dims_size() \
129  : in[0].dims_size() - num_reduce_dims; \
130  vector<int> output_shape; \
131  for (int i = start_index; i < end_index; ++i) { \
132  output_shape.push_back(in[0].dims(i)); \
133  } \
134  return vector<TensorShape>{ \
135  CreateTensorShape(output_shape, in[0].data_type())};
136 
137 OPERATOR_SCHEMA(ReduceFrontSum)
138  .NumInputs(1)
139  .NumOutputs(1)
140  .Arg("num_reduce_dims", "Number of dimensions to reduce")
141  .SetDoc(R"DOC(
142 Reduces the input tensor along the first dimension of the input
143 tensor by applying 'Sum'
144 )DOC")
145  .TensorInferenceFunction([](const OperatorDef& def,
146  const vector<TensorShape>& in) {
147  REDUCTION_OP_SHAPE_INFERENCE(true)
148  });
149 OPERATOR_SCHEMA(ReduceFrontSumGradient).NumInputs(2).NumOutputs(1);
150 
151 OPERATOR_SCHEMA(ReduceBackSum)
152  .NumInputs(1)
153  .NumOutputs(1)
154  .Arg("num_reduce_dims", "Number of dimensions to reduce")
155  .SetDoc(R"DOC(
156 Reduces the input tensor along the last dimension of the
157 input tensor by applying 'Sum'
158 )DOC")
159  .TensorInferenceFunction([](const OperatorDef& def,
160  const vector<TensorShape>& in) {
161  REDUCTION_OP_SHAPE_INFERENCE(false)
162  });
163 OPERATOR_SCHEMA(ReduceBackSumGradient).NumInputs(2).NumOutputs(1);
164 
165 /***
166  Mean Ops
167 ***/
168 
169 // ReduceFrontMean: columnwise mean
170 template <>
171 template <typename T>
173  int rows,
174  int cols,
175  const T* in_data,
176  T* out_data) {
177  for (int j = 0; j < cols; j++) {
178  T sum = in_data[j];
179  for (int i = 1; i < rows; i++) {
180  sum += in_data[i * cols + j];
181  }
182  out_data[j] = sum / rows;
183  }
184 }
185 
186 // ReduceBackMean: rowwise mean
187 template <>
188 template <typename T>
190  int rows,
191  int cols,
192  const T* in_data,
193  T* out_data) {
194  for (int i = 0; i < rows; i++) {
195  int offset = i * cols;
196  T sum = in_data[offset];
197  for (int j = 1; j < cols; j++) {
198  sum += in_data[offset + j];
199  }
200  out_data[i] = sum / cols;
201  }
202 }
203 
204 // ReduceFrontMeanGradient
205 template <>
206 template <typename T>
208  int rows,
209  int cols,
210  const T* dYdata,
211  T* dXdata) {
212  for (int i = 0; i < rows * cols; i++) {
213  dXdata[i] = dYdata[i % cols] / rows;
214  }
215 }
216 
217 // ReduceBackMeanGradient
218 template <>
219 template <typename T>
221  int rows,
222  int cols,
223  const T* dYdata,
224  T* dXdata) {
225  for (int i = 0; i < rows * cols; i++) {
226  dXdata[i] = dYdata[i / cols] / cols;
227  }
228 }
230 REGISTER_CPU_OPERATOR(ReduceFrontMean, SumReduceDimsOp<CPUContext, true, true>);
231 REGISTER_CPU_OPERATOR(
232  ReduceFrontMeanGradient,
234 
236  using GradientMakerBase::GradientMakerBase;
237  vector<OperatorDef> GetGradientDefs() override {
238  return SingleGradientDef(
239  "ReduceFrontMeanGradient",
240  "",
241  vector<string>{GO(0), I(0)},
242  vector<string>{GI(0)});
243  }
244 };
245 
246 REGISTER_GRADIENT(ReduceFrontMean, GetReduceFrontMeanGradient);
247 
248 OPERATOR_SCHEMA(ReduceFrontMean)
249  .NumInputs(1)
250  .NumOutputs(1)
251  .Arg("num_reduce_dims", "Number of dimensions to reduce")
252  .SetDoc(R"DOC(
253 Reduces the input tensor along the first dimension of the input
254 tensor by applying 'Mean'
255 )DOC")
256  .TensorInferenceFunction([](const OperatorDef& def,
257  const vector<TensorShape>& in) {
258  REDUCTION_OP_SHAPE_INFERENCE(true)
259  });
260 OPERATOR_SCHEMA(ReduceFrontMeanGradient).NumInputs(2).NumOutputs(1);
261 
262 REGISTER_CPU_OPERATOR(ReduceBackMean, SumReduceDimsOp<CPUContext, false, true>);
263 REGISTER_CPU_OPERATOR(
264  ReduceBackMeanGradient,
266 
268  using GradientMakerBase::GradientMakerBase;
269  vector<OperatorDef> GetGradientDefs() override {
270  return SingleGradientDef(
271  "ReduceBackMeanGradient",
272  "",
273  vector<string>{GO(0), I(0)},
274  vector<string>{GI(0)});
275  }
276 };
277 
278 REGISTER_GRADIENT(ReduceBackMean, GetReduceBackMeanGradient);
279 
280 OPERATOR_SCHEMA(ReduceBackMean)
281  .NumInputs(1)
282  .NumOutputs(1)
283  .Arg("num_reduce_dims", "Number of dimensions to reduce")
284  .SetDoc(R"DOC(
285 Reduces the input tensor along the last dimension of the
286 input tensor by applying 'Mean'
287 )DOC")
288  .TensorInferenceFunction([](const OperatorDef& def,
289  const vector<TensorShape>& in) {
290  REDUCTION_OP_SHAPE_INFERENCE(false)
291  });
292 OPERATOR_SCHEMA(ReduceBackMeanGradient).NumInputs(2).NumOutputs(1);
293 
294 /***
295  Max Ops
296 ***/
297 
298 // ReduceFrontMax
299 template <>
301  int rows,
302  int cols,
303  const float* data,
304  float* out_data) {
305  for (int i = 0; i < cols; i++) {
306  float mx = data[i];
307  for (int j = 1; j < rows; j++) {
308  mx = std::max(mx, data[j * cols + i]);
309  }
310  out_data[i] = mx;
311  }
312 }
313 
314 // ReduceBackMax
315 template <>
317  int rows,
318  int cols,
319  const float* data,
320  float* out_data) {
321  for (int i = 0; i < rows; i++) {
322  float mx = data[i * cols];
323  for (int j = 1; j < cols; j++) {
324  mx = std::max(mx, data[i * cols + j]);
325  }
326  out_data[i] = mx;
327  }
328 }
329 
330 // ReduceFrontMaxGradient
331 template <>
333  int rows,
334  int cols,
335  const float* dYdata,
336  const float* Xdata,
337  const float* Ydata,
338  float* dXdata) {
339  int len = cols * rows;
340  for (int i = 0; i < len; i++) {
341  int col = i % cols;
342  dXdata[i] = Xdata[i] == Ydata[col] ? dYdata[col] : 0.0f;
343  }
344 }
345 
346 // ReduceBackMaxGradient
347 template <>
349  int rows,
350  int cols,
351  const float* dYdata,
352  const float* Xdata,
353  const float* Ydata,
354  float* dXdata) {
355  int len = cols * rows;
356  for (int i = 0; i < len; i++) {
357  int row = i / cols;
358  dXdata[i] = Xdata[i] == Ydata[row] ? dYdata[row] : 0.0f;
359  }
360 }
361 
362 REGISTER_CPU_OPERATOR(ReduceFrontMax, MaxReduceDimsOp<float, CPUContext, true>);
363 REGISTER_CPU_OPERATOR(
364  ReduceFrontMaxGradient,
366 
367 REGISTER_CPU_OPERATOR(ReduceBackMax, MaxReduceDimsOp<float, CPUContext, false>);
368 REGISTER_CPU_OPERATOR(
369  ReduceBackMaxGradient,
371 
373  using GradientMakerBase::GradientMakerBase;
374  vector<OperatorDef> GetGradientDefs() override {
375  return SingleGradientDef(
376  "ReduceFrontMaxGradient",
377  "",
378  vector<string>{GO(0), I(0), O(0)},
379  vector<string>{GI(0)});
380  }
381 };
382 
383 REGISTER_GRADIENT(ReduceFrontMax, GetReduceFrontMaxGradient);
384 
386  using GradientMakerBase::GradientMakerBase;
387  vector<OperatorDef> GetGradientDefs() override {
388  return SingleGradientDef(
389  "ReduceBackMaxGradient",
390  "",
391  vector<string>{GO(0), I(0), O(0)},
392  vector<string>{GI(0)});
393  }
394 };
395 
396 REGISTER_GRADIENT(ReduceBackMax, GetReduceBackMaxGradient);
397 
398 OPERATOR_SCHEMA(ReduceFrontMax)
399  .NumInputs(1)
400  .NumOutputs(1)
401  .Arg("num_reduce_dims", "Number of dimensions to reduce")
402  .SetDoc(R"DOC(
403 Reduces the input tensor along the first dimension of the input
404 tensor by applying 'Max'
405 )DOC")
406  .TensorInferenceFunction([](const OperatorDef& def,
407  const vector<TensorShape>& in) {
408  REDUCTION_OP_SHAPE_INFERENCE(true)
409  });
410 OPERATOR_SCHEMA(ReduceFrontMaxGradient).NumInputs(3).NumOutputs(1);
411 
412 OPERATOR_SCHEMA(ReduceBackMax)
413  .NumInputs(1)
414  .NumOutputs(1)
415  .Arg("num_reduce_dims", "Number of dimensions to reduce")
416  .SetDoc(R"DOC(
417 Reduces the input tensor along the last dimension of the
418 input tensor by applying 'Max'
419 )DOC")
420  .TensorInferenceFunction([](const OperatorDef& def,
421  const vector<TensorShape>& in) {
422  REDUCTION_OP_SHAPE_INFERENCE(false)
423  });
424 OPERATOR_SCHEMA(ReduceBackMaxGradient).NumInputs(3).NumOutputs(1);
425 
426 #undef REDUCTION_OP_SHAPE_INFERENCE
427 
428 } // namespace caffe2
Copyright (c) 2016-present, Facebook, Inc.
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...