1 #include "caffe2/operators/reduce_front_back_max_ops.h" 2 #include "caffe2/core/operator_gradient.h" 6 #define REDUCTION_OP_SHAPE_INFERENCE(is_front_reducer) \ 7 CAFFE_ENFORCE_LE(1, in.size()); \ 8 CAFFE_ENFORCE_GE(2, in.size()); \ 9 ArgumentHelper helper(def); \ 10 int num_reduce_dims = helper.GetSingleArgument<int>("num_reduce_dim", 1); \ 11 int start_index = is_front_reducer ? num_reduce_dims : 0; \ 12 int end_index = is_front_reducer ? in[0].dims_size() \ 13 : in[0].dims_size() - num_reduce_dims; \ 14 vector<int> output_shape; \ 15 for (int i = start_index; i < end_index; ++i) { \ 16 output_shape.push_back(in[0].dims(i)); \ 18 return vector<TensorShape>{ \ 19 CreateTensorShape(output_shape, in[0].data_type())}; 27 void MaxReduceDimsOp<float, CPUContext, true>::Compute(
31 const int32_t* lengths_data,
33 for (
int i = 0; i < cols; i++) {
35 int length = lengths_data ==
nullptr ? rows : lengths_data[i];
36 for (
int j = 1; j < length; j++) {
37 mx = std::max(mx, data[j * cols + i]);
45 void MaxReduceDimsOp<float, CPUContext, false>::Compute(
49 const int32_t* lengths_data,
51 for (
int i = 0; i < rows; i++) {
52 float mx = data[i * cols];
53 int length = lengths_data ==
nullptr ? cols : lengths_data[i];
54 for (
int j = 1; j < length; j++) {
55 mx = std::max(mx, data[i * cols + j]);
63 void MaxReduceDimsGradientOp<float, CPUContext, true>::Compute(
69 const int32_t* lengths_data,
71 int len = cols * rows;
72 for (
int i = 0; i < len; i++) {
75 if (lengths_data !=
nullptr && row >= lengths_data[col]) {
78 dXdata[i] = Xdata[i] == Ydata[col] ? dYdata[col] : 0.0f;
85 void MaxReduceDimsGradientOp<float, CPUContext, false>::Compute(
91 const int32_t* lengths_data,
93 int len = cols * rows;
94 for (
int i = 0; i < len; i++) {
97 if (lengths_data ==
nullptr || col < lengths_data[row]) {
98 dXdata[i] = Xdata[i] == Ydata[row] ? dYdata[row] : 0.0f;
105 REGISTER_CPU_OPERATOR(ReduceFrontMax, MaxReduceDimsOp<float, CPUContext, true>);
106 REGISTER_CPU_OPERATOR(
107 ReduceFrontMaxGradient,
108 MaxReduceDimsGradientOp<float, CPUContext, true>);
110 REGISTER_CPU_OPERATOR(ReduceBackMax, MaxReduceDimsOp<float, CPUContext, false>);
111 REGISTER_CPU_OPERATOR(
112 ReduceBackMaxGradient,
113 MaxReduceDimsGradientOp<float, CPUContext, false>);
116 using GradientMakerBase::GradientMakerBase;
117 vector<OperatorDef> GetGradientDefs()
override {
118 vector<string> grad_in = {GO(0), I(0), O(0)};
119 if (def_.input_size() == 2) {
120 grad_in.push_back(I(1));
123 "ReduceFrontMaxGradient",
"", grad_in, vector<string>{GI(0)});
130 using GradientMakerBase::GradientMakerBase;
131 vector<OperatorDef> GetGradientDefs()
override {
132 vector<string> grad_in = {GO(0), I(0), O(0)};
133 if (def_.input_size() == 2) {
134 grad_in.push_back(I(1));
137 "ReduceBackMaxGradient",
"", grad_in, vector<string>{GI(0)});
143 OPERATOR_SCHEMA(ReduceFrontMax)
148 "(*int*): number of dimensions to reduce (default=1)")
150 Reduces the input tensor along the last dimension of the by applying **max**. 152 Can reduce more than one of the "first" dimensions by setting `num_reduce_dim`. 154 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation. 155 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$. 156 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension. 158 For example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [max(1,4), max(5,1,7), max(2), max(9,2)] = [4, 7, 2, 9]$ 161 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc 165 <summary> <b>Example</b> </summary> 171 workspace.ResetWorkspace() 173 op = core.CreateOperator( 180 workspace.FeedBlob("X", np.random.randint(10, size=(2,3,3)).astype(np.float32)) 181 print("X:", workspace.FetchBlob("X")) 182 workspace.RunOperatorOnce(op) 183 print("Y:", workspace.FetchBlob("Y")) 206 .Input(0, "X",
"(*Tensor`<float>`*): input tensor")
207 .Input(1,
"lengths",
"(*Tensor`<int>`*): number of elements in each sample")
208 .Output(0,
"Y",
"(*Tensor`<float>`*): reduced tensor")
209 .TensorInferenceFunction([](
const OperatorDef& def,
210 const vector<TensorShape>& in) {
211 REDUCTION_OP_SHAPE_INFERENCE(
true)
213 OPERATOR_SCHEMA(ReduceFrontMaxGradient).NumInputs(3, 4).NumOutputs(1);
215 OPERATOR_SCHEMA(ReduceBackMax)
220 "(*int*): number of dimensions to reduce (default=1)")
222 Reduces the input tensor along the last dimension of the by applying **max**. 224 Can reduce more than one of the "last" dimensions by setting `num_reduce_dim`. 226 A second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation. 227 - If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$. 228 - The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension. 230 For example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [max(1,5), max(4,1,8), max(2)] = [5, 8, 2]$ 234 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc 238 <summary> <b>Example</b> </summary> 244 workspace.ResetWorkspace() 246 op = core.CreateOperator( 253 workspace.FeedBlob("X", np.random.randint(10, size=(1,2,3,3)).astype(np.float32)) 254 print("X:", workspace.FetchBlob("X")) 255 workspace.RunOperatorOnce(op) 256 print("Y:", workspace.FetchBlob("Y")) 279 .Input(0, "X",
"(*Tensor`<float>`*): input tensor")
280 .Input(1,
"lengths",
"(*Tensor`<int>`*): number of elements in each sample")
281 .Output(0,
"Y",
"(*Tensor`<float>`*): reduced tensor")
282 .TensorInferenceFunction([](
const OperatorDef& def,
283 const vector<TensorShape>& in) {
284 REDUCTION_OP_SHAPE_INFERENCE(
false)
286 OPERATOR_SCHEMA(ReduceBackMaxGradient).NumInputs(3, 4).NumOutputs(1);
288 #undef REDUCTION_OP_SHAPE_INFERENCE
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
static vector< OperatorDef > SingleGradientDef(const Args &...args)
a helper function to allow one to create one single operator def, which is usually the case for many ...