Caffe2 - C++ API
A deep learning, cross platform ML framework
conv_transpose_unpool_base_op.h
1 #pragma once
2 
3 #include "caffe2/ideep/ideep_utils.h"
4 #include "caffe2/proto/caffe2_legacy.pb.h"
5 
6 namespace caffe2 {
7 
9  public:
10  USE_IDEEP_DEF_ALIASES();
11  USE_IDEEP_OPERATOR_FUNCTIONS();
12 
13  IDEEPConvTransposeUnpoolBase(const OperatorDef& operator_def, Workspace* ws)
14  : IDEEPOperator(operator_def, ws),
15  legacy_pad_(
16  static_cast<LegacyPadding>(OperatorBase::GetSingleArgument<int>(
17  "legacy_pad",
18  LegacyPadding::NOTSET))),
19  kernel_(OperatorBase::GetRepeatedArgument<int>("kernels")),
20  stride_(OperatorBase::GetRepeatedArgument<int>("strides")),
21  pads_(OperatorBase::GetRepeatedArgument<int>("pads")),
22  adj_(OperatorBase::GetRepeatedArgument<int>("adjs")),
23  shared_buffer_(
24  OperatorBase::GetSingleArgument<int>("shared_buffer", 0)) {
25  // For the padding, they should either be the legacy padding strategy
26  // (VALID or SAME), or an explicit, non-negative value.
27  if (legacy_pad_ == LegacyPadding::VALID ||
28  legacy_pad_ == LegacyPadding::SAME) {
29  CAFFE_ENFORCE(
31  "If you use legacy padding VALID or SAME, you should not specify "
32  "any specific padding values.");
33  }
34  // Get old arguments values.
35  if (OperatorBase::HasArgument("kernel")) {
36  kernel_.resize(2, OperatorBase::GetSingleArgument<int>("kernel", 0));
37  } else if (
38  OperatorBase::HasArgument("kernel_h") &&
39  OperatorBase::HasArgument("kernel_w")) {
40  kernel_.push_back(OperatorBase::GetSingleArgument<int>("kernel_h", 0));
41  kernel_.push_back(OperatorBase::GetSingleArgument<int>("kernel_w", 0));
42  }
43 
44  if (OperatorBase::HasArgument("stride")) {
45  stride_.resize(2, OperatorBase::GetSingleArgument<int>("stride", 0));
46  } else if (
47  OperatorBase::HasArgument("stride_h") &&
48  OperatorBase::HasArgument("stride_w")) {
49  stride_.push_back(OperatorBase::GetSingleArgument<int>("stride_h", 0));
50  stride_.push_back(OperatorBase::GetSingleArgument<int>("stride_w", 0));
51  }
52 
53  if (OperatorBase::HasArgument("adj")) {
54  adj_.resize(2, OperatorBase::GetSingleArgument<int>("adj", 0));
55  } else if (
56  OperatorBase::HasArgument("adj_h") &&
57  OperatorBase::HasArgument("adj_w")) {
58  adj_.push_back(OperatorBase::GetSingleArgument<int>("adj_h", 0));
59  adj_.push_back(OperatorBase::GetSingleArgument<int>("adj_w", 0));
60  }
61 
62  if (OperatorBase::HasArgument("pad")) {
63  CAFFE_ENFORCE(
64  legacy_pad_ != LegacyPadding::VALID &&
65  legacy_pad_ != LegacyPadding::SAME,
66  "If you use legacy padding VALID or SAME, you should not specify "
67  "any specific padding values.");
68  pads_.resize(4, OperatorBase::GetSingleArgument<int>("pad", 0));
69  } else if (
70  OperatorBase::HasArgument("pad_t") &&
71  OperatorBase::HasArgument("pad_l") &&
72  OperatorBase::HasArgument("pad_b") &&
73  OperatorBase::HasArgument("pad_r")) {
74  CAFFE_ENFORCE(
75  legacy_pad_ != LegacyPadding::VALID &&
76  legacy_pad_ != LegacyPadding::SAME,
77  "If you use legacy padding VALID or SAME, you should not specify "
78  "any specific padding values.");
79  pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_t", 0));
80  pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_l", 0));
81  pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_b", 0));
82  pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_r", 0));
83  }
84 
85  // Fill default values.
86  if (kernel_.empty()) {
87  kernel_.assign({0, 0});
88  }
89 
90  if (stride_.empty()) {
91  stride_.assign(kernel_.size(), 1);
92  }
93 
94  if (pads_.empty()) {
95  pads_.assign(kernel_.size() * 2, 0);
96  }
97 
98  if (adj_.empty()) {
99  adj_.assign(kernel_.size(), 0);
100  }
101 
102  CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());
103  CAFFE_ENFORCE_EQ(adj_.size(), kernel_.size());
104 
105  if (legacy_pad_ != LegacyPadding::VALID &&
106  legacy_pad_ != LegacyPadding::SAME) {
107  CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
108  }
109 
110  for (int dim = 0; dim < kernel_.size(); ++dim) {
111  CAFFE_ENFORCE_GT(kernel_[dim], 0);
112  CAFFE_ENFORCE_GT(stride_[dim], 0);
113  CAFFE_ENFORCE_GE(adj_[dim], 0);
114  CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);
115  }
116  }
117  virtual ~IDEEPConvTransposeUnpoolBase() {}
118 
119  const ideep::tensor& Input(int index) {
120  return OperatorBase::template Input<ideep::tensor>(index);
121  }
122  ideep::tensor* Output(int index) {
123  return OperatorBase::template Output<ideep::tensor>(index);
124  }
125 
126  ideep::tensor::dims pad_tl() const {
127  return {pad_t(), pad_l()};
128  }
129 
130  ideep::tensor::dims pad_br() const {
131  return {pad_b(), pad_r()};
132  }
133 
134  ideep::tensor::dims CalcOutputDims(
135  const ideep::tensor& input,
136  int output_channel) {
137  CAFFE_ENFORCE_GT(input.get_size(), 0);
138 
139  int N = input.get_dim(0);
140  ideep::tensor::dims output_dims;
141  auto input_dims = input.get_dims();
142  itensor::dims dims;
143  dims.assign(input_dims.begin() + 2, input_dims.end());
144  for (int dim = 0; dim < dims.size(); ++dim) {
145  int dim_size = 0;
146  ComputeSizeAndPad(
147  dims[dim],
148  stride_[dim],
149  kernel_[dim],
150  adj_[dim],
151  &pads_[dim],
152  &pads_[dim + 2],
153  &dim_size);
154  output_dims.push_back(dim_size);
155  }
156 
157  output_dims.insert(output_dims.begin(), {N, output_channel});
158  return output_dims;
159  }
160 
161  bool RunOnDevice() override {
162  try {
163  return RunOnDeviceWithOrderNCHW();
164  } catch (ideep::error& e) {
165  LOG(ERROR) << "IDEEP error:" << e.message;
166  throw;
167  }
168  }
169 
170  virtual bool RunOnDeviceWithOrderNCHW() {
171  CAFFE_THROW("Not implemented");
172  }
173 
174  private:
175  LegacyPadding legacy_pad_;
176  int pad_;
177 
178  protected:
179  vector<int> kernel_;
180  vector<int> stride_;
181  vector<int> pads_;
182  vector<int> adj_;
183  bool shared_buffer_;
184 
185  // Accessors for 2D conv params.
186 
187  inline int pad_t() const {
188  return pads_[0];
189  }
190 
191  inline int pad_l() const {
192  return pads_[1];
193  }
194 
195  inline int pad_b() const {
196  return pads_[2];
197  }
198 
199  inline int pad_r() const {
200  return pads_[3];
201  }
202 
203  inline int kernel_h() const {
204  return kernel_[0];
205  }
206 
207  inline int kernel_w() const {
208  return kernel_[1];
209  }
210 
211  inline int stride_h() const {
212  return stride_[0];
213  }
214 
215  inline int stride_w() const {
216  return stride_[1];
217  }
218 
219  inline int adj_h() const {
220  return adj_[0];
221  }
222 
223  inline int adj_w() const {
224  return adj_[1];
225  }
226 
227  inline void ComputeSizeAndPad(
228  const int in_size,
229  const int stride,
230  const int kernel,
231  const int adj,
232  int* pad_head,
233  int* pad_tail,
234  int* out_size) {
235  switch (legacy_pad_) {
236  case LegacyPadding::NOTSET:
237  CAFFE_ENFORCE_GE(*pad_head, 0);
238  CAFFE_ENFORCE_GE(*pad_tail, 0);
239  *out_size =
240  (in_size - 1) * stride + kernel + adj - *pad_head - *pad_tail;
241  break;
242  // We handle cases of LegacyPadding::VALID and LegacyPadding::SAME
243  // the same way
244  case LegacyPadding::VALID:
245  case LegacyPadding::SAME:
246  *pad_head = 0;
247  *pad_tail = 0;
248  *out_size = (in_size - 1) * stride + kernel + adj;
249  break;
250  case LegacyPadding::CAFFE_LEGACY_POOLING:
251  LOG(FATAL) << "CAFFE_LEGACY_POOLING is no longer supported.";
252  break;
253  }
254  }
255 };
256 
257 #define USE_IDEEP_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS() \
258  USE_OPERATOR_BASE_FUNCTIONS; \
259  /* using override */ using IDEEPConvTransposeUnpoolBase::Input; \
260  /* using override */ using IDEEPConvTransposeUnpoolBase::Output;
261 
262 } // namespace caffe2
Workspace is a class that holds all the related objects created during runtime: (1) all blobs...
Definition: workspace.h:47
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
bool HasArgument(const string &name) const
Checks if the operator has an argument of the given name.
Definition: operator.h:70