Caffe2 - C++ API
A deep learning, cross platform ML framework
int8_fc_op.cc
1 #include "caffe2/operators/quantized/int8_fc_op.h"
2 
3 #include <functional>
4 
5 #include "caffe2/operators/fc_inference.h"
6 
7 namespace caffe2 {
8 
9 REGISTER_CPU_OPERATOR(Int8FC, int8::Int8FCOp);
10 
11 using namespace std::placeholders;
12 OPERATOR_SCHEMA(Int8FC)
13  .NumInputs(3)
14  .NumOutputs(1)
15  .TensorInferenceFunction(std::bind(FCShapeInference, _1, _2, false))
16  .CostInferenceFunction(std::bind(CostInferenceForFC, _1, _2, false))
17  .SetDoc(R"DOC(
18 Computes the result of passing an input vector X into a fully
19 connected layer with 2D weight matrix W and 1D bias vector b. That is,
20 the layer computes Y = X * W^T + b, where X has size (M x K),
21 W has size (N x K), b has size (N), and Y has size (M x N),
22 where M is often the batch size.
23 
24 
25 NOTE: X does not need to explicitly be a 2D vector; rather, it will be
26 coerced into one. For an arbitrary n-dimensional tensor
27 X \in [a_0, a_1 * ... * a_{n-1}]. Only this case is supported!
28 Lastly, even though b is a 1D vector of size N, it is copied/resized to
29 be size (M x N) implicitly and added to each vector in the batch.
30 Each of these dimensions must be matched correctly, or else the operator
31 will throw errors.
32 )DOC")
33  .Arg("Y_scale", "Output tensor quantization scale")
34  .Arg("Y_zero_point", "Output tensor quantization offset")
35  .Input(
36  0,
37  "X",
38  "input tensor that's coerced into a 2D matrix of size (MxK) "
39  "as described above")
40  .Input(
41  1,
42  "W",
43  "A tensor that is coerced into a 2D blob of size (KxN) "
44  "containing fully connected weight matrix")
45  .Input(2, "b", "1D blob containing bias vector")
46  .Output(0, "Y", "2D output tensor");
47 
48 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13