Caffe2 - C++ API
A deep learning, cross platform ML framework
int8_dequantize_op.h
1 #ifndef CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
2 #define CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
3 
4 #include "caffe2/core/context.h"
5 #include "caffe2/core/operator.h"
6 #include "caffe2/core/tensor_int8.h"
7 #include "caffe2/operators/quantized/int8_utils.h"
8 
9 namespace caffe2 {
10 
11 namespace int8 {
12 
13 namespace {
14 
15 void Int8Dequantize(
16  const uint8_t* in,
17  float* out,
18  const int64_t N,
19  const float X_scale,
20  const int32_t X_offset) {
21  for (auto i = 0; i < N; ++i) {
22  out[i] = (static_cast<int32_t>(in[i]) - X_offset) * X_scale;
23  }
24 }
25 
26 } // namespace
27 
28 class Int8DequantizeOp final : public Operator<CPUContext> {
29  public:
31 
32  bool RunOnDevice() override {
33  const auto& X = Inputs()[0]->template Get<Int8TensorCPU>();
34 
35  auto* Y = Output(0, X.t.sizes(), at::dtype<float>());
36  int32_t X_offset = X.zero_point;
37  auto X_scale = X.scale;
38  Int8Dequantize(
39  X.t.data<uint8_t>(),
40  Y->mutable_data<float>(),
41  X.t.numel(),
42  X_scale,
43  X_offset);
44  return true;
45  }
46 };
47 
48 } // namespace int8
49 
50 } // namespace caffe2
51 
52 #endif // CAFFE2_OPERATORS_INT8_DEQUANTIZE_OP_H_
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13