Caffe2 - C++ API
A deep learning, cross platform ML framework
int8_test_utils.h
1 #ifndef CAFFE2_INT8_TEST_UTILS_H_
2 #define CAFFE2_INT8_TEST_UTILS_H_
3 
4 #include "caffe2/core/common.h"
5 #include "caffe2/core/context.h"
6 #include "caffe2/core/operator.h"
7 #include "caffe2/core/tensor.h"
8 #include "caffe2/core/tensor_int8.h"
9 
10 #include <array>
11 #include <cmath>
12 #include <random>
13 
14 #include "gtest/gtest.h"
15 
16 namespace caffe2 {
17 
18 // for quantized Add, the error shouldn't exceed 2 * scale
19 inline float addErrorTolerance(float scale) {
20  return 2 * scale;
21 }
22 
23 inline std::unique_ptr<int8::Int8TensorCPU> q(
24  const std::vector<int64_t>& dims) {
25  auto r = caffe2::make_unique<int8::Int8TensorCPU>();
26  r->scale = 0.01;
27  r->zero_point = static_cast<int32_t>(std::numeric_limits<uint8_t>::max()) / 2;
28  ReinitializeTensor(&r->t, dims, at::dtype<uint8_t>().device(CPU));
29  std::random_device rd;
30  std::mt19937 gen(rd());
31  std::uniform_int_distribution<uint8_t> dis;
32  for (auto i = 0; i < r->t.numel(); ++i) {
33  r->t.mutable_data<uint8_t>()[i] = dis(gen);
34  }
35  return r;
36 }
37 
38 inline std::unique_ptr<int8::Int8TensorCPU> biasq(
39  const std::vector<int64_t>& dims,
40  double scale) {
41  auto r = caffe2::make_unique<int8::Int8TensorCPU>();
42  r->scale = scale;
43  r->zero_point = 0;
44  r->t.Resize(dims);
45  std::random_device rd;
46  std::mt19937 gen(rd());
47  std::uniform_real_distribution<float> dis(-1, 1);
48  for (auto i = 0; i < r->t.numel(); ++i) {
49  r->t.mutable_data<int32_t>()[i] =
50  static_cast<int32_t>(dis(gen) / scale + r->zero_point);
51  }
52  return r;
53 }
54 
55 inline std::unique_ptr<TensorCPU> dq(const int8::Int8TensorCPU& XQ) {
56  auto r = caffe2::make_unique<Tensor>(CPU);
57  r->Resize(XQ.t.sizes());
58  for (auto i = 0; i < r->numel(); ++i) {
59  r->mutable_data<float>()[i] =
60  (static_cast<int32_t>(XQ.t.data<uint8_t>()[i]) - XQ.zero_point) *
61  XQ.scale;
62  }
63  return r;
64 }
65 
66 inline std::unique_ptr<TensorCPU> biasdq(const int8::Int8TensorCPU& XQ) {
67  auto r = caffe2::make_unique<Tensor>(CPU);
68  r->Resize(XQ.t.sizes());
69  for (auto i = 0; i < r->numel(); ++i) {
70  r->mutable_data<float>()[i] =
71  (XQ.t.data<int32_t>()[i] - XQ.zero_point) * XQ.scale;
72  }
73  return r;
74 }
75 
76 #define EXPECT_TENSOR_EQ(_YA, _YE) \
77  do { \
78  EXPECT_TRUE((_YA).sizes() == (_YE).sizes()); \
79  for (auto i = 0; i < (_YA).numel(); ++i) { \
80  EXPECT_FLOAT_EQ((_YA).data<float>()[i], (_YE).data<float>()[i]); \
81  } \
82  } while (0);
83 
84 #define EXPECT_TENSOR_APPROX_EQ(_YA, _YE, _tol) \
85  do { \
86  EXPECT_TRUE((_YA).sizes() == (_YE).sizes()); \
87  for (auto i = 0; i < (_YA).numel(); ++i) { \
88  EXPECT_NEAR((_YA).data<float>()[i], (_YE).data<float>()[i], (_tol)); \
89  } \
90  } while (0);
91 
92 inline void int8Copy(int8::Int8TensorCPU* dst, const int8::Int8TensorCPU& src) {
93  dst->zero_point = src.zero_point;
94  dst->scale = src.scale;
95  dst->t.CopyFrom(src.t);
96 }
97 
98 inline void add_input(
99  const vector<int64_t>& shape,
100  const vector<float>& values,
101  const string& name,
102  Workspace* ws) {
103  // auto* t = ws->CreateBlob(name)->GetMutable<TensorCPU>();
104  auto t = caffe2::make_unique<Tensor>(CPU);
105  t->Resize(shape);
106  std::copy(values.begin(), values.end(), t->mutable_data<float>());
107  BlobGetMutableTensor(ws->CreateBlob(name), CPU)->CopyFrom(*t);
108 }
109 
110 inline int randomInt(int a, int b) {
111  static std::random_device rd;
112  static std::mt19937 gen(rd());
113  return std::uniform_int_distribution<int>(a, b)(gen);
114 }
115 
116 } // namespace caffe2
117 
118 #endif // CAFFE2_INT8_TEST_UTILS_H_
void ReinitializeTensor(Tensor *tensor, at::IntArrayRef dims, at::TensorOptions options)
Reinitialize a Tensor to given dims and options if necessary, note that this will not do anything if ...
Definition: tensor.cc:127
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13