1 #include "caffe2/operators/quantized/int8_max_pool_op.h" 5 REGISTER_CPU_OPERATOR(Int8MaxPool, int8::Int8MaxPoolOp<int8::Activation::NONE>);
8 int8::Int8MaxPoolOp<int8::Activation::RELU>);
10 const char kMaxPoolDoc_int8[] = R
"DOC( 11 consumes an input blob X and applies max pooling across the 12 the blob according to kernel sizes, stride sizes, and pad lengths defined by the 13 ConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a 14 subset of the input tensor according to the kernel size and downsampling the 15 data into the output blob Y for further processing. 18 std::function<void(OpSchema&)> MaxPoolDocGenerator( 20 bool relu_fused =
false) {
21 return [=](OpSchema& schema) {
22 string doc =
"MaxPool{dim} {pool_doc}";
23 c10::ReplaceAll(doc,
"{dim}", dim);
24 c10::ReplaceAll(doc,
"{pool_doc}", kMaxPoolDoc_int8);
29 "Input data tensor from the previous operator; dimensions depend on " 30 "whether the NCHW or NHWC operators are being used. For example, in " 31 "the former, the input has size (N x C x H x W), where N is the batch " 32 "size, C is the number of channels, and H and W are the height and the " 33 "width of the data. The corresponding permutation of dimensions is " 34 "used in the latter case.");
35 schema.Output(0,
"Y", relu_fused ?
36 "Output data tensor from max pooling across the input " 37 "tensor. Dimensions will vary based on various kernel, stride, and pad " 38 "sizes. Output will go through rectified linear" 39 "function, where y = max(0, x)." :
40 "Output data tensor from max pooling across the input " 41 "tensor. Dimensions will vary based on various kernel, stride, and pad " 42 "sizes. Output will go through rectified linear");
46 OPERATOR_SCHEMA(Int8MaxPool)
49 .Arg(
"Y_scale",
"Output tensor quantization scale")
50 .Arg(
"Y_zero_point",
"Output tensor quantization offset")
51 .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool)
52 .FillUsing(MaxPoolDocGenerator(
""));
54 OPERATOR_SCHEMA(Int8MaxPoolRelu)
57 .Arg(
"Y_scale",
"Output tensor quantization scale")
58 .Arg(
"Y_zero_point",
"Output tensor quantization offset")
59 .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool)
60 .FillUsing(MaxPoolDocGenerator(
"",
true));
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...