1 #ifndef CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_ 2 #define CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_ 7 #include "caffe2/core/context.h" 8 #include "caffe2/core/logging.h" 9 #include "caffe2/core/operator.h" 10 #include "caffe2/proto/caffe2_legacy.pb.h" 11 #include "caffe2/utils/math.h" 24 const bool CAFFE2_PAD_HEAD_MORE =
false;
28 template <
class Context>
31 USE_OPERATOR_CONTEXT_FUNCTIONS;
35 static_cast<LegacyPadding>(this->
template GetSingleArgument<int>(
37 LegacyPadding::NOTSET))),
39 this->
template GetSingleArgument<int>(
"global_pooling", 0)),
40 kernel_(this->
template GetRepeatedArgument<int>(
"kernels")),
41 dilation_(this->
template GetRepeatedArgument<int>(
"dilations")),
42 stride_(this->
template GetRepeatedArgument<int>(
"strides")),
43 pads_(this->
template GetRepeatedArgument<int>(
"pads")),
45 this->
template GetSingleArgument<bool>(
"float16_compute",
false)),
46 group_(this->
template GetSingleArgument<int>(
"group", 1)),
47 order_(StringToStorageOrder(
48 this->
template GetSingleArgument<string>(
"order",
"NCHW"))),
50 this->
template GetSingleArgument<int>(
"shared_buffer", 0)),
54 if (legacy_pad_ == LegacyPadding::VALID ||
55 legacy_pad_ == LegacyPadding::SAME) {
58 "If you use legacy padding VALID or SAME, you should not specify " 59 "any specific padding values.");
64 kernel_.resize(2, this->
template GetSingleArgument<int>(
"kernel", 0));
68 kernel_.push_back(this->
template GetSingleArgument<int>(
"kernel_h", 0));
69 kernel_.push_back(this->
template GetSingleArgument<int>(
"kernel_w", 0));
73 stride_.resize(2, this->
template GetSingleArgument<int>(
"stride", 0));
77 stride_.push_back(this->
template GetSingleArgument<int>(
"stride_h", 0));
78 stride_.push_back(this->
template GetSingleArgument<int>(
"stride_w", 0));
82 dilation_.resize(2, this->
template GetSingleArgument<int>(
"dilation", 0));
87 this->
template GetSingleArgument<int>(
"dilation_h", 0));
89 this->
template GetSingleArgument<int>(
"dilation_w", 0));
94 legacy_pad_ != LegacyPadding::VALID &&
95 legacy_pad_ != LegacyPadding::SAME,
96 "If you use legacy padding VALID or SAME, you should not specify " 97 "any specific padding values.");
98 pads_.resize(4, this->
template GetSingleArgument<int>(
"pad", 0));
105 legacy_pad_ != LegacyPadding::VALID &&
106 legacy_pad_ != LegacyPadding::SAME,
107 "If you use legacy padding VALID or SAME, you should not specify " 108 "any specific padding values.");
109 pads_.push_back(this->
template GetSingleArgument<int>(
"pad_t", 0));
110 pads_.push_back(this->
template GetSingleArgument<int>(
"pad_l", 0));
111 pads_.push_back(this->
template GetSingleArgument<int>(
"pad_b", 0));
112 pads_.push_back(this->
template GetSingleArgument<int>(
"pad_r", 0));
116 if (kernel_.size() == 0) {
117 kernel_.assign({0, 0});
120 if (stride_.size() == 0) {
121 stride_.resize(kernel_.size(), 1);
124 if (pads_.size() == 0) {
125 pads_.resize(kernel_.size() * 2, 0);
128 if (dilation_.size() == 0) {
129 dilation_.resize(kernel_.size(), 1);
132 CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());
133 CAFFE_ENFORCE_EQ(dilation_.size(), kernel_.size());
135 if (legacy_pad_ != LegacyPadding::VALID &&
136 legacy_pad_ != LegacyPadding::SAME) {
137 CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
140 if (global_pooling_) {
141 for (
size_t dim = 0; dim < kernel_.size(); ++dim) {
143 pads_[2 * dim] == 0 && pads_[2 * dim + 1] == 0 &&
144 dilation_[dim] == 1 && stride_[dim] == 1,
145 "If global_pooling is set pad, dilation and stride shouldn't be set.");
152 if (operator_def.name().find(
"Conv") == 0 ||
153 operator_def.name().find(
"Pool") != std::string::npos) {
154 for (
size_t dim = 0; dim < kernel_.size(); ++dim) {
155 CAFFE_ENFORCE_GE(pads_[dim], 0);
156 CAFFE_ENFORCE_GE(pads_[kernel_.size() + dim], 0);
159 "If you are doing convolution or pooling, you will need to set " 160 "explicitly the kernel size.");
164 for (
size_t dim = 0; dim < kernel_.size(); ++dim) {
165 CAFFE_ENFORCE_GE(kernel_[dim], 0);
166 CAFFE_ENFORCE_GE(dilation_[dim], 0);
167 CAFFE_ENFORCE_GE(stride_[dim], 0);
172 vector<int> GetDims(
const Tensor& input) {
175 case StorageOrder::NCHW:
176 dims.assign(input.sizes().begin() + 2, input.sizes().end());
178 case StorageOrder::NHWC:
179 dims.assign(input.sizes().begin() + 1, input.sizes().end() - 1);
182 CAFFE_THROW(
"Unknown storage order : ", order_);
188 int GetDimsSize(
const Tensor& input) {
191 case StorageOrder::NCHW:
192 size = std::accumulate(
193 input.sizes().begin() + 2,
196 std::multiplies<int>());
198 case StorageOrder::NHWC:
199 size = std::accumulate(
200 input.sizes().begin() + 1,
201 input.sizes().end() - 1,
203 std::multiplies<int>());
206 CAFFE_THROW(
"Unknown storage order : ", order_);
219 std::vector<int64_t> GetOutputSize(
const Tensor& input,
int output_channel) {
220 CAFFE_ENFORCE_GE(input.dim(), 2);
221 const int inner_size = input.size_from_dim(1);
222 CAFFE_ENFORCE_GT(inner_size, 0);
223 std::vector<int64_t> output_dims;
238 void SetOutputSize(
const Tensor& input,
Tensor* output,
int output_channel) {
239 const int inner_size = input.size_from_dim(1);
240 CAFFE_ENFORCE_GT(inner_size, 0);
241 std::vector<int> output_dims;
253 output->Resize(output_dims);
258 static void InferOutputSize(
260 const int output_channel,
261 const StorageOrder order,
262 const bool global_pooling,
263 const LegacyPadding legacy_pad,
264 const std::vector<int>& dilation,
265 const std::vector<int>& stride,
266 std::vector<int>* kernel,
267 std::vector<int>* pads,
268 std::vector<int>* output_dims) {
269 CAFFE_ENFORCE_NE(order, StorageOrder::UNKNOWN);
270 const int ndim = input_dims.
size() - 2;
271 output_dims->resize(ndim + 2);
272 output_dims->front() = input_dims.
front();
273 if (order == StorageOrder::NCHW) {
274 output_dims->at(1) = output_channel;
276 output_dims->back() = output_channel;
278 const int offset = order == StorageOrder::NCHW ? 2 : 1;
279 if (global_pooling) {
280 std::copy_n(input_dims.cbegin() + offset, ndim, kernel->begin());
281 std::fill_n(output_dims->begin() + offset, ndim, 1LL);
283 for (
int i = 0; i < ndim; ++i) {
285 input_dims[i + offset],
292 &output_dims->at(i + offset));
297 static void InferOutputSize64(
298 const at::IntList& input_dims,
299 const int output_channel,
300 const StorageOrder order,
301 const bool global_pooling,
302 const LegacyPadding legacy_pad,
303 const std::vector<int>& dilation,
304 const std::vector<int>& stride,
305 std::vector<int>* kernel,
306 std::vector<int>* pads,
307 std::vector<int64_t>* output_dims) {
308 CAFFE_ENFORCE_NE(order, StorageOrder::UNKNOWN);
309 const int ndim = input_dims.size() - 2;
310 output_dims->resize(ndim + 2);
311 output_dims->front() = input_dims.front();
312 if (order == StorageOrder::NCHW) {
313 output_dims->at(1) = output_channel;
315 output_dims->back() = output_channel;
317 const int offset = order == StorageOrder::NCHW ? 2 : 1;
318 if (global_pooling) {
319 std::copy_n(input_dims.cbegin() + offset, ndim, kernel->begin());
320 std::fill_n(output_dims->begin() + offset, ndim, 1LL);
322 for (
int i = 0; i < ndim; ++i) {
324 input_dims[i + offset],
331 &output_dims->at(i + offset));
338 void ComputePads(
const vector<int>& dims) {
339 if (global_pooling_) {
341 }
else if (legacy_pad_ != LegacyPadding::NOTSET) {
343 for (
int dim = 0; dim < dims.size(); ++dim) {
351 &pads_[dims.size() + dim],
357 bool HasPad()
const {
358 if (kernel_.size() == 2) {
359 return pad_t() > 0 || pad_b() > 0 || pad_l() > 0 || pad_r() > 0;
362 pads_.cbegin(), pads_.cend(), [](
const int x) {
return x > 0; });
365 bool HasStride()
const {
366 if (kernel_.size() == 2) {
367 return stride_h() > 1 || stride_w() > 1;
370 stride_.cbegin(), stride_.cend(), [](
const int x) {
return x > 1; });
373 void SetDeviceTensor(
const std::vector<int>& data,
Tensor* tensor) {
374 bool reset_tensor_device_ =
false;
376 if (tensor->numel() != data.size()) {
377 tensor->Resize(data.size());
378 reset_tensor_device_ =
true;
380 const int* tensor_data = tensor->template data<int>();
381 for (
int d_i = 0; d_i < data.size(); ++d_i) {
382 if (tensor_data[d_i] != data[d_i]) {
383 reset_tensor_device_ =
true;
389 if (reset_tensor_device_) {
390 context_.template Copy<int, CPUContext, Context>(
391 data.size(), data.data(), tensor->template mutable_data<int>());
395 template <
typename T>
396 void SetBiasMultiplier(
const int size,
Tensor* bias_multiplier_) {
397 if (bias_multiplier_->numel() != size) {
400 bias_multiplier_->Resize(std::vector<int64_t>{size});
401 math::Set<T, Context>(
404 bias_multiplier_->template mutable_data<T>(),
409 bool RunOnDevice()
override {
410 if (!global_pooling_) {
411 for (
size_t dim = 0; dim < kernel_.size(); ++dim) {
412 CAFFE_ENFORCE_GT(kernel_[dim], 0);
416 case StorageOrder::NHWC:
418 return RunOnDeviceWithOrderNHWC();
419 case StorageOrder::NCHW:
421 return RunOnDeviceWithOrderNCHW();
423 CAFFE_THROW(
"Unknown Storage order: ", order_);
429 virtual bool RunOnDeviceWithOrderNHWC() {
430 CAFFE_NOT_IMPLEMENTED;
432 virtual bool RunOnDeviceWithOrderNCHW() {
433 CAFFE_NOT_IMPLEMENTED;
437 const OperatorDef& def,
438 const vector<TensorShape>& inputs) {
439 CAFFE_ENFORCE_GE(inputs.size(), 2,
"Conv requires at least 2 inputs");
441 const TensorShape X = inputs[0];
442 const TensorShape W = inputs[1];
443 const TensorShape Y = TensorInferenceForConv(def, inputs)[0];
446 StringToStorageOrder(helper.GetSingleArgument<
string>(
"order",
"NCHW"));
452 uint64_t kernel_w = 1;
453 uint64_t kernel_t = 1;
454 uint64_t in_channels;
455 uint64_t out_channels;
457 if (X.dims_size() == 0 || W.dims_size() == 0) {
461 if (X.dims_size() == 5) {
463 CAFFE_ENFORCE_EQ(order, StorageOrder::NCHW,
"Conv3D only supports NCHW");
467 kernel_t = W.dims(2);
468 kernel_h = W.dims(3);
469 kernel_w = W.dims(4);
470 in_channels = W.dims(1);
471 out_channels = W.dims(0);
472 }
else if (X.dims_size() == 4) {
474 CAFFE_ENFORCE_EQ(W.dims_size(), 4,
"Conv2D should have 4D filter tensor");
475 if (order == StorageOrder::NHWC) {
478 kernel_h = W.dims(1);
479 kernel_w = W.dims(2);
480 in_channels = W.dims(3);
481 out_channels = W.dims(0);
485 kernel_h = W.dims(2);
486 kernel_w = W.dims(3);
487 in_channels = W.dims(1);
488 out_channels = W.dims(0);
492 CAFFE_ENFORCE_EQ(W.dims_size(), 3,
"Conv1D should have 3D filter tensor");
493 if (order == StorageOrder::NHWC) {
495 kernel_h = W.dims(1);
496 in_channels = W.dims(2);
497 out_channels = W.dims(0);
500 kernel_h = W.dims(2);
501 in_channels = W.dims(1);
502 out_channels = W.dims(0);
506 uint64_t nElemX = nElemFromDim(X);
507 uint64_t nElemW = nElemFromDim(W);
508 uint64_t nElemBias = inputs.size() > 2 ? nElemFromDim(inputs[2]) : 0;
511 c.flops = N * Y_t * Y_h * Y_w * kernel_t * kernel_w * kernel_h *
512 in_channels * out_channels * 2;
513 c.bytes_read = (nElemX + nElemW + nElemBias) *
sizeof(X.data_type());
515 N * out_channels * Y_t * Y_h * Y_w *
sizeof(Y.data_type());
516 c.params_bytes = out_channels * in_channels * kernel_t * kernel_h *
517 kernel_w *
sizeof(W.data_type());
521 static vector<TensorShape> TensorInferenceForSchema(
522 const OperatorDef& def,
523 const vector<TensorShape>& in,
524 int output_channel) {
526 CAFFE_ENFORCE_GT(in.size(), 0);
527 CAFFE_ENFORCE_GT(in[0].dims_size(), 0);
528 vector<int> pads = helper.GetRepeatedArgument<
int>(
"pads");
529 vector<int> kernel = helper.GetRepeatedArgument<
int>(
"kernels");
530 vector<int> strides = helper.GetRepeatedArgument<
int>(
"strides");
531 vector<int> dilations = helper.GetRepeatedArgument<
int>(
"dilation");
532 if (helper.HasArgument(
"pad")) {
533 pads.resize(4, helper.GetSingleArgument<
int>(
"pad", 0));
535 helper.HasArgument(
"pad_t") && helper.HasArgument(
"pad_l") &&
536 helper.HasArgument(
"pad_b") && helper.HasArgument(
"pad_r")) {
537 pads.push_back(helper.GetSingleArgument<
int>(
"pad_t", 0));
538 pads.push_back(helper.GetSingleArgument<
int>(
"pad_l", 0));
539 pads.push_back(helper.GetSingleArgument<
int>(
"pad_b", 0));
540 pads.push_back(helper.GetSingleArgument<
int>(
"pad_r", 0));
543 if (helper.HasArgument(
"kernel")) {
544 kernel.resize(2, helper.GetSingleArgument<
int>(
"kernel", 1));
546 helper.HasArgument(
"kernel_h") && helper.HasArgument(
"kernel_w")) {
547 kernel.push_back(helper.GetSingleArgument<
int>(
"kernel_h", 1));
548 kernel.push_back(helper.GetSingleArgument<
int>(
"kernel_w", 1));
551 if (helper.HasArgument(
"stride")) {
552 strides.resize(2, helper.GetSingleArgument<
int>(
"stride", 1));
554 helper.HasArgument(
"stride_h") && helper.HasArgument(
"stride_w")) {
555 strides.push_back(helper.GetSingleArgument<
int>(
"stride_h", 1));
556 strides.push_back(helper.GetSingleArgument<
int>(
"stride_w", 1));
559 if (helper.HasArgument(
"dilation")) {
560 strides.resize(2, helper.GetSingleArgument<
int>(
"dilation", 1));
562 helper.HasArgument(
"dilation_h") && helper.HasArgument(
"dilation_w")) {
563 strides.push_back(helper.GetSingleArgument<
int>(
"dilation_h", 1));
564 strides.push_back(helper.GetSingleArgument<
int>(
"dilation_w", 1));
567 auto check_and_set_default_value =
568 [](vector<int>& vec,
int size,
int value) {
569 if (vec.size() == 0) {
570 vec.resize(size, value);
574 check_and_set_default_value(kernel, 2, 1);
575 check_and_set_default_value(strides, kernel.size(), 1);
576 check_and_set_default_value(pads, kernel.size() * 2, 0);
577 check_and_set_default_value(dilations, kernel.size(), 1);
579 std::vector<int> output_dims;
581 GetDimsVector(in[0]),
583 StringToStorageOrder(helper.GetSingleArgument<
string>(
"order",
"NCHW")),
584 helper.GetSingleArgument<
int>(
"global_pooling", 0),
585 static_cast<LegacyPadding>(
586 helper.GetSingleArgument<
int>(
"legacy_pad", LegacyPadding::NOTSET)),
592 return {CreateTensorShape(output_dims, TensorProto::FLOAT)};
595 static std::vector<TensorShape> TensorInferenceForConv(
596 const OperatorDef& def,
597 const std::vector<TensorShape>& in) {
598 if (in[0].unknown_shape()) {
599 std::vector<TensorShape> out(1);
600 out[0].set_unknown_shape(
true);
603 return TensorInferenceForSchema(def, in, in[1].dims(0));
606 static std::vector<TensorShape> TensorInferenceForPool(
607 const OperatorDef& def,
608 const std::vector<TensorShape>& in) {
609 if (in[0].unknown_shape()) {
610 std::vector<TensorShape> out(1);
611 out[0].set_unknown_shape(
true);
616 StringToStorageOrder(helper.GetSingleArgument<
string>(
"order",
"NCHW"));
618 (order == StorageOrder::NCHW ? in[0].dims(1) : in[0].dims(3));
619 return TensorInferenceForSchema(def, in, num_channels);
622 static std::vector<TensorShape> TensorInferenceForLC(
623 const OperatorDef& def,
624 const std::vector<TensorShape>& in) {
625 if (in[0].unknown_shape()) {
626 std::vector<TensorShape> out(1);
627 out[0].set_unknown_shape(
true);
630 const int img_ndim = in[0].dims_size() - 2;
631 return TensorInferenceForSchema(def, in, in[1].dims(img_ndim));
637 LegacyPadding legacy_pad_;
638 bool global_pooling_;
640 vector<int> dilation_;
644 bool float16_compute_;
651 static inline void ComputeSizeAndPad(
656 LegacyPadding legacy_pad,
660 const int dkernel = dilation * (kernel - 1) + 1;
661 switch (legacy_pad) {
662 case LegacyPadding::NOTSET:
665 CAFFE_ENFORCE_GE(in_size + *pad_head + *pad_tail, dkernel);
666 *out_size =
static_cast<int>(
667 static_cast<float>(in_size + *pad_head + *pad_tail - dkernel) /
671 case LegacyPadding::VALID:
674 *out_size = (in_size - dkernel) / stride + 1;
676 case LegacyPadding::SAME: {
678 1 == dilation,
"Dilation not supported for legacy padding.");
679 int legacy_target_size = (in_size + stride - 1) / stride;
680 int pad_needed = (legacy_target_size - 1) * stride + kernel - in_size;
681 if (CAFFE2_PAD_HEAD_MORE) {
682 *pad_head = (pad_needed + 1) / 2;
684 *pad_head = pad_needed / 2;
686 *pad_tail = pad_needed - *pad_head;
687 *out_size = (in_size + pad_needed - dkernel) / stride + 1;
690 case LegacyPadding::CAFFE_LEGACY_POOLING:
695 CAFFE_ENFORCE_GE(*pad_head, 0);
698 *out_size = std::ceil(
699 static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);
703 if (*pad_head > 0 && (*out_size - 1) * stride >= in_size + *pad_head) {
711 int standard_out_size =
static_cast<int>(
712 static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);
716 "This should never happen. If this happens, double check the logic " 718 if (*out_size > standard_out_size) {
720 <<
"You are hitting a case where Caffe's legacy padding calculation " 721 "is hit. This leads to inefficient and sometimes incorrect " 722 "results. We are keeping this behavior for backward compatibility" 723 ", but you are strongly recommended to move away from it.";
725 *pad_tail = *pad_head + stride * (*out_size - standard_out_size);
730 static inline void ComputeSizeAndPad64(
735 LegacyPadding legacy_pad,
739 const int dkernel = dilation * (kernel - 1) + 1;
740 switch (legacy_pad) {
741 case LegacyPadding::NOTSET:
744 CAFFE_ENFORCE_GE(in_size + *pad_head + *pad_tail, dkernel);
745 *out_size =
static_cast<int>(
746 static_cast<float>(in_size + *pad_head + *pad_tail - dkernel) /
750 case LegacyPadding::VALID:
753 *out_size = (in_size - dkernel) / stride + 1;
755 case LegacyPadding::SAME: {
757 1 == dilation,
"Dilation not supported for legacy padding.");
758 int legacy_target_size = (in_size + stride - 1) / stride;
759 int pad_needed = (legacy_target_size - 1) * stride + kernel - in_size;
760 if (CAFFE2_PAD_HEAD_MORE) {
761 *pad_head = (pad_needed + 1) / 2;
763 *pad_head = pad_needed / 2;
765 *pad_tail = pad_needed - *pad_head;
766 *out_size = (in_size + pad_needed - dkernel) / stride + 1;
769 case LegacyPadding::CAFFE_LEGACY_POOLING:
774 CAFFE_ENFORCE_GE(*pad_head, 0);
777 *out_size = std::ceil(
778 static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);
782 if (*pad_head > 0 && (*out_size - 1) * stride >= in_size + *pad_head) {
790 int standard_out_size =
static_cast<int>(
791 static_cast<float>(in_size + *pad_head * 2 - kernel) / stride + 1);
795 "This should never happen. If this happens, double check the logic " 797 if (*out_size > standard_out_size) {
799 <<
"You are hitting a case where Caffe's legacy padding calculation " 800 "is hit. This leads to inefficient and sometimes incorrect " 801 "results. We are keeping this behavior for backward compatibility" 802 ", but you are strongly recommended to move away from it.";
804 *pad_tail = *pad_head + stride * (*out_size - standard_out_size);
811 inline int pad_t()
const {
815 inline int pad_l()
const {
819 inline int pad_b()
const {
823 inline int pad_r()
const {
827 inline int kernel_h()
const {
831 inline int kernel_w()
const {
835 inline int stride_h()
const {
839 inline int stride_w()
const {
843 inline int dilation_h()
const {
847 inline int dilation_w()
const {
852 inline void AllocateAndCopy(
const vector<int>& vec,
Tensor& tensor) {
853 tensor.Resize(vec.size());
854 context_.template CopyFromCPU<int>(
855 vec.size(), vec.data(), tensor.template mutable_data<int>());
858 #define USE_CONV_POOL_BASE_FUNCTIONS(Context) \ 859 USE_OPERATOR_FUNCTIONS(Context); \ 860 using ConvPoolOpBase<Context>::pads_; \ 861 using ConvPoolOpBase<Context>::pad_t; \ 862 using ConvPoolOpBase<Context>::pad_l; \ 863 using ConvPoolOpBase<Context>::pad_b; \ 864 using ConvPoolOpBase<Context>::pad_r; \ 865 using ConvPoolOpBase<Context>::legacy_pad_; \ 866 using ConvPoolOpBase<Context>::global_pooling_; \ 867 using ConvPoolOpBase<Context>::kernel_; \ 868 using ConvPoolOpBase<Context>::kernel_h; \ 869 using ConvPoolOpBase<Context>::kernel_w; \ 870 using ConvPoolOpBase<Context>::dilation_; \ 871 using ConvPoolOpBase<Context>::dilation_h; \ 872 using ConvPoolOpBase<Context>::dilation_w; \ 873 using ConvPoolOpBase<Context>::stride_; \ 874 using ConvPoolOpBase<Context>::stride_h; \ 875 using ConvPoolOpBase<Context>::stride_w; \ 876 using ConvPoolOpBase<Context>::group_; \ 877 using ConvPoolOpBase<Context>::order_; \ 878 using ConvPoolOpBase<Context>::shared_buffer_; \ 879 using ConvPoolOpBase<Context>::GetDims; \ 880 using ConvPoolOpBase<Context>::GetDimsSize; \ 881 using ConvPoolOpBase<Context>::SetDeviceTensor; \ 882 using ConvPoolOpBase<Context>::HasPad; \ 883 using ConvPoolOpBase<Context>::HasStride; \ 884 using ConvPoolOpBase<Context>::ws_ 889 #endif // CAFFE2_OPERATORS_CONV_POOL_OP_BASE_H_ AT_CPP14_CONSTEXPR const T & front() const
front - Get the first element.
A helper class to index into arguments.
Workspace is a class that holds all the related objects created during runtime: (1) all blobs...
constexpr size_t size() const
size - Get the array size.
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
bool HasArgument(const string &name) const
Checks if the operator has an argument of the given name.
AT_CPP14_CONSTEXPR const T & at(size_t Index) const
Vector compatibility.