Caffe2 - C++ API
A deep learning, cross platform ML framework
sequence_ops.h
1 #ifndef CAFFE2_OPERATORS_SEQUENCE_OPS_H_
2 #define CAFFE2_OPERATORS_SEQUENCE_OPS_H_
3 
4 #include "caffe2/core/operator.h"
5 #include "caffe2/core/tensor.h"
6 #include "caffe2/utils/math.h"
7 
8 namespace caffe2 {
9 
10 template <class Context>
11 class GatherPaddingOp final : public Operator<Context> {
12  public:
13  USE_OPERATOR_CONTEXT_FUNCTIONS;
14  template <class... Args>
15  explicit GatherPaddingOp(Args&&... args)
16  : Operator<Context>(std::forward<Args>(args)...),
17  startPaddingWidth_(
18  this->template GetSingleArgument<int>("padding_width", 1)),
19  endPaddingWidth_(
20  this->template GetSingleArgument<int>("end_padding_width", -1)) {
21  CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
22  if (endPaddingWidth_ < 0) {
23  endPaddingWidth_ = startPaddingWidth_;
24  }
25  }
26 
27  bool RunOnDevice() override {
28  if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
29  Output(0)->Resize(std::vector<int64_t>(0));
30  auto output_0_data = Output(0)->template mutable_data<int64_t>();
31  // TODO(zhengxq): as suggested by salex@, change this to a loop.
32  math::Set<int64_t, Context>(
33  Output(0)->numel(), 0, output_0_data, &context_);
34  if (OutputSize() == 2) {
35  Output(1)->Resize(std::vector<int64_t>(0));
36  auto output_1_data = Output(1)->template mutable_data<int64_t>();
37  math::Set<int64_t, Context>(
38  Output(1)->numel(), 0, output_1_data, &context_);
39  }
40  return true;
41  }
43  this, Input(0));
44  }
45 
46  template <typename T>
47  bool DoRunWithType() {
48  const auto& in = Input(0);
49  CAFFE_ENFORCE_GE(in.dim(), 1);
50  const int32_t outer_size = in.sizes()[0];
51  const auto block_size = in.size_from_dim(1);
52  const auto pad_width = startPaddingWidth_ + endPaddingWidth_;
53 
54  // if no lengths is provided, assume it is a single full-span entry
55  const int32_t* lengths_ptr = &outer_size;
56  int64_t lengths_size = 1;
57  if (InputSize() > 1) {
58  const auto& lengths = Input(1);
59  lengths_ptr = lengths.template data<int32_t>();
60  lengths_size = lengths.numel();
61  }
62  std::vector<int64_t> padShape(in.sizes().begin() + 1, in.sizes().end());
63  // output will contain accumulator over paddings
64  Output(0)->Resize(padShape);
65  T* padding_start_ptr = Output(0)->template mutable_data<T>();
66  math::Set<T, Context>(block_size, 0.0, padding_start_ptr, &context_);
67 
68  // if no end_padding is provided, assume it's the same as start_padding
69  T* padding_end_ptr = padding_start_ptr;
70  if (OutputSize() == 2) {
71  Output(1)->Resize(padShape);
72  padding_end_ptr = Output(1)->template mutable_data<T>();
73  math::Set<T, Context>(block_size, 0.0, padding_end_ptr, &context_);
74  }
75  GatherPadding<T>(
76  outer_size,
77  lengths_size,
78  block_size,
79  pad_width,
80  in.template data<T>(),
81  lengths_ptr,
82  padding_start_ptr,
83  padding_end_ptr);
84  return true;
85  }
86 
87  private:
88  template <typename T>
89  void GatherPadding(
90  const int outer_size,
91  const int lengths_size,
92  const int block_size,
93  const int pad_width,
94  const T* in_ptr,
95  const int* lengths_ptr,
96  T* padding_start_ptr,
97  T* padding_end_ptr);
98 
99  int startPaddingWidth_;
100  int endPaddingWidth_;
101  // Scratch space required by the CUDA version
102  Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
103  Tensor lengths_prefix_sum_{Context::GetDeviceType()};
104 };
105 
106 template <class Context>
107 class RemovePaddingOp final : public Operator<Context> {
108  public:
109  USE_OPERATOR_CONTEXT_FUNCTIONS;
110  template <class... Args>
111  explicit RemovePaddingOp(Args&&... args)
112  : Operator<Context>(std::forward<Args>(args)...),
113  startPaddingWidth_(
114  this->template GetSingleArgument<int>("padding_width", 1)),
115  endPaddingWidth_(
116  this->template GetSingleArgument<int>("end_padding_width", -1)) {
117  CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
118  if (endPaddingWidth_ < 0) {
119  endPaddingWidth_ = startPaddingWidth_;
120  }
121  }
122 
123  bool RunOnDevice() override {
124  if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
125  Output(0)->CopyFrom(Input(0), true /*async*/);
126  if (OutputSize() == 2) {
127  Output(1)->CopyFrom(Input(1), true /*async*/);
128  }
129  return true;
130  }
132  this, Input(0));
133  }
134 
135  template <typename T>
136  bool DoRunWithType();
137 
138  private:
139  int startPaddingWidth_;
140  int endPaddingWidth_;
141 
142  // Scratch space required by the CUDA version
143  Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
144  Tensor lengths_prefix_sum_{Context::GetDeviceType()};
145 };
146 
147 template <class Context>
148 class AddPaddingOp final : public Operator<Context> {
149  public:
150  USE_OPERATOR_CONTEXT_FUNCTIONS;
151  template <class... Args>
152  explicit AddPaddingOp(Args&&... args)
153  : Operator<Context>(std::forward<Args>(args)...),
154  startPaddingWidth_(
155  this->template GetSingleArgument<int>("padding_width", 1)),
156  endPaddingWidth_(
157  this->template GetSingleArgument<int>("end_padding_width", -1)) {
158  CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
159  if (endPaddingWidth_ < 0) {
160  endPaddingWidth_ = startPaddingWidth_;
161  }
162  }
163 
164  bool RunOnDevice() override {
165  if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
166  Output(0)->CopyFrom(Input(0), true /*async*/);
167  if (OutputSize() == 2) {
168  Output(1)->CopyFrom(Input(1), true /*async*/);
169  }
170  return true;
171  }
173  this, Input(0));
174  }
175 
176  template <typename T>
177  bool DoRunWithType() {
178  const auto& in = Input(0);
179  CAFFE_ENFORCE_GE(in.dim(), 1);
180  const int32_t outer_size = in.sizes()[0];
181  const auto block_size = in.size_from_dim(1);
182 
183  // if no lengths is provided, assume it is a single full-span entry
184  const int32_t* lengths_ptr = nullptr;
185  int32_t lengths_size = 1;
186  if (InputSize() > 1) {
187  const auto& lengths = Input(1);
188  lengths_ptr = lengths.template data<int32_t>();
189  lengths_size = lengths.numel();
190  }
191 
192  // fetch paddings
193  // input_size == 2 : pad with zeros
194  // input_size == 3 : start and end paddings are the same
195  // input_size == 4 : different start and end paddings
196  const T* padding_start_ptr = nullptr;
197  const T* padding_end_ptr = nullptr;
198  if (InputSize() >= 3) {
199  auto& padding_start = Input(2);
200  CAFFE_ENFORCE_EQ(block_size, padding_start.numel());
201  padding_start_ptr = padding_start.template data<T>();
202  }
203  if (InputSize() == 4) {
204  auto& padding_end = Input(3);
205  CAFFE_ENFORCE_EQ(block_size, padding_end.numel());
206  padding_end_ptr = padding_end.template data<T>();
207  } else {
208  padding_end_ptr = padding_start_ptr;
209  }
210 
211  auto out_dims = in.sizes().vec();
212  out_dims[0] += (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
213  auto* out = Output(0, std::move(out_dims), at::dtype<T>());
214 
215  const auto* in_ptr = in.template data<T>();
216  auto* out_ptr = out->template mutable_data<T>();
217 
218  return MakePadding<T>(
219  in_ptr,
220  out_ptr,
221  lengths_ptr,
222  lengths_size,
223  outer_size,
224  padding_start_ptr,
225  padding_end_ptr,
226  block_size);
227  }
228 
229  private:
230  template <typename T>
231  bool MakePadding(
232  const T* in_ptr,
233  T* out_ptr,
234  const int32_t* lengths_ptr,
235  int32_t lengths_size,
236  int32_t outer_size,
237  const T* padding_start_ptr,
238  const T* padding_end_ptr,
239  int64_t block_size);
240 
241  int startPaddingWidth_;
242  int endPaddingWidth_;
243 
244  // Scratch space required by the CUDA version
245  Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
246  Tensor lengths_prefix_sum_{Context::GetDeviceType()};
247 };
248 
249 template <class Context>
250 class PadEmptySamplesOp : public Operator<Context> {
251  public:
252  USE_OPERATOR_CONTEXT_FUNCTIONS;
253  template <class... Args>
254  explicit PadEmptySamplesOp(Args&&... args)
255  : Operator<Context>(std::forward<Args>(args)...) {}
256 
257  bool RunOnDevice() override;
258 };
259 
260 } // namespace caffe2
261 
262 #endif // CAFFE2_OPERATORS_SEQUENCE_OPS_H_
const Tensor & Input(int idx, DeviceType type=Context::GetDeviceType())
Retrieve a non-owning reference to the input at position &#39;idx&#39; for this operator. ...
Definition: operator.h:702
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13