Caffe2 - C++ API
A deep learning, cross platform ML framework
kl_minimization_example.cc
1 #include "caffe2/core/logging.h"
2 #include "kl_minimization.h"
3 
4 #include <fstream>
5 #include <iostream>
6 #include <sstream>
7 
8 using namespace std;
9 using namespace dnnlowp;
10 
11 int main(int argc, const char* argv[]) {
12  if (argc < 3) {
13  cerr << "Usage: " << argv[0] << " in_file out_file [preserve_sparsity]"
14  << endl;
15  return -1;
16  }
17 
18  ifstream in(argv[1]);
19  ofstream out(argv[2]);
20  bool preserve_sparsity = argc >= 4 ? atoi(argv[3]) : false;
21 
22  string line;
23  while (getline(in, line)) {
24  istringstream ist(line);
25 
26  int op_index, output_index;
27  string op_type, tensor_name;
28  float min, max;
29  int nbins;
30 
31  ist >> op_index >> op_type >> output_index >> tensor_name >> min >> max >>
32  nbins;
33 
34  vector<uint64_t> bins;
35  for (int i = 0; i < nbins; ++i) {
36  uint64_t cnt;
37  ist >> cnt;
38  bins.push_back(cnt);
39  }
40  assert(bins.size() == nbins);
41 
42  Histogram hist = Histogram(min, max, bins);
43  TensorQuantizationParams qparams =
44  KLDivergenceMinimization().ChooseQuantizationParams(
45  hist, preserve_sparsity);
46 
47  out << op_index << " " << op_type << " " << output_index << " "
48  << tensor_name << " " << qparams.Min() << " " << qparams.Max() << endl;
49  }
50 
51  return 0;
52 }
A quantization scheme that minimizes Kullback-Leiber divergence.
bin_width = (max - min)/nbins ith bin (zero-based indexing) contains [i*bin_width, (i+1)*bin_width) with an exception that (nbins - 1)th bin contains [(nbins-1)*bin_width, nbins*bin_width]