1 #ifndef CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_ 2 #define CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_ 4 #include "caffe2/core/context.h" 5 #include "caffe2/core/logging.h" 6 #include "caffe2/core/operator.h" 7 #include "caffe2/utils/math.h" 11 template <
class Context,
bool FIRSTDIMS,
bool NORMALIZE>
14 template <
class... Args>
18 this->
template GetSingleArgument<int32_t>(
"num_reduce_dim", 1)) {}
20 USE_OPERATOR_CONTEXT_FUNCTIONS;
22 bool RunOnDevice()
override {
28 bool DoRunWithType() {
32 num_reduce_dims_ >= 0 && num_reduce_dims_ <= X.dim(),
33 "For N-dim input tensor, support num_reduce_dims in range [0, N].");
35 vector<int64_t> output_shape;
36 int start_index = FIRSTDIMS ? num_reduce_dims_ : 0;
38 FIRSTDIMS ? X.dim() : X.dim() - num_reduce_dims_;
39 for (
int i = start_index; i < end_index; ++i) {
40 output_shape.push_back(X.sizes()[i]);
42 auto* Y = Output(0, output_shape, at::dtype<T>());
44 const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)
45 : X.size_to_dim(X.dim() - num_reduce_dims_);
46 const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)
47 : X.size_from_dim(X.dim() - num_reduce_dims_);
49 const T* in_data = X.template data<T>();
50 T* out_data = Y->template mutable_data<T>();
52 if (cols == 0 || rows == 0) {
53 math::Set(Y->numel(),
static_cast<T>(0), out_data, &context_);
57 const int32_t* lengths_data =
nullptr;
58 if (InputSize() > 1) {
59 const auto& lengths =
Input(1);
60 lengths_data = lengths.template data<int32_t>();
62 num_reduce_dims_ == 1,
63 "Given lengths input, the number of reduce dimensions should be one.");
64 const int batch_size = FIRSTDIMS ? cols : rows;
66 lengths.numel() == batch_size,
67 "The size of lengths vector doesn't match the batch size.");
70 Compute(rows, cols, in_data, lengths_data, out_data);
81 const int32_t* lengths_data,
87 template <
class Context,
bool FIRSTDIMS,
bool NORMALIZE>
90 template <
class... Args>
94 this->
template GetSingleArgument<int32_t>(
"num_reduce_dim", 1)) {}
96 USE_OPERATOR_CONTEXT_FUNCTIONS;
98 bool RunOnDevice()
override {
103 template <
typename T>
104 bool DoRunWithType() {
106 auto& input_1 =
Input(1);
108 vector<int64_t> dX_sizes;
113 if (input_1.dim() == 1 && input_1.template IsType<int64_t>()) {
115 shape_.CopyFrom(input_1);
117 dX_sizes = vector<int64_t>(
118 shape_.template data<int64_t>(),
119 shape_.template data<int64_t>() + shape_.numel());
122 dX_sizes = input_1.sizes().vec();
124 auto* dX = Output(0, dX_sizes, at::dtype<T>());
126 const int rows = FIRSTDIMS ? dX->size_to_dim(num_reduce_dims_)
127 : dX->size_to_dim(dX->dim() - num_reduce_dims_);
128 const int cols = FIRSTDIMS
129 ? dX->size_from_dim(num_reduce_dims_)
130 : dX->size_from_dim(dX->dim() - num_reduce_dims_);
132 const int32_t* lengths_data =
nullptr;
133 if (InputSize() > 2) {
134 const auto& lengths =
Input(2);
135 lengths_data = lengths.template data<int32_t>();
137 num_reduce_dims_ == 1,
138 "Given lengths input, the number of reduce dimensions should be one.");
139 const int batch_size = FIRSTDIMS ? cols : rows;
141 lengths.numel() == batch_size,
142 "The size of lengths vector doesn't match the batch size.");
145 const T* dYdata = dY.template data<T>();
146 T* dXdata = dX->template mutable_data<T>();
147 Compute<T>(rows, cols, dYdata, lengths_data, dXdata);
152 template <
typename T>
157 const int32_t* lengths_data,
159 int num_reduce_dims_;
161 Tensor shape_{Context::GetDeviceType()};
166 #endif // CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_
const Tensor & Input(int idx, DeviceType type=Context::GetDeviceType())
Retrieve a non-owning reference to the input at position 'idx' for this operator. ...
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...