Caffe2 - C++ API
A deep learning, cross platform ML framework
listwise_l2r_op.cc
1 #include "caffe2/operators/listwise_l2r_op.h"
2 
3 namespace caffe2 {
4 
5 namespace {
6 
7 // Returns the indices that would sort an array. For example:
8 // data = [3, 1, 2, 4]
9 // return = [1, 2, 0, 3] (reverse = false)
10 // return = [3, 0, 2, 1] (reverse = true)
11 template <typename TDATA, typename TIDX>
12 void arg_sort(const TDATA* data, TIDX* idx, const size_t N, bool reverse) {
13  std::function<bool(size_t, size_t)> cmp_lambda;
14  if (reverse) {
15  cmp_lambda = [data](size_t i, size_t j) { return data[i] > data[j]; };
16  } else {
17  cmp_lambda = [data](size_t i, size_t j) { return data[i] < data[j]; };
18  }
19  size_t n = 0;
20  std::generate(idx, idx + N, [&n] { return n++; });
21  std::sort(idx, idx + N, cmp_lambda);
22 }
23 
24 #define PAIRWISE_DIFF(vec, N) \
25  ((vec.matrix() * Eigen::MatrixXf::Ones(1, N) - \
26  Eigen::MatrixXf::Ones(N, 1) * vec.matrix().transpose()) \
27  .array())
28 
29 #define CWISE_SIGM(vec) (1. / (1. + (-(vec)).exp()))
30 
31 #define CWISE_GT(vec1, vec2) ((vec1) > (vec2))
32 
33 #define CWISE_LT(vec1, vec2) ((vec1) < (vec2))
34 
35 #define CWISE_SIGN(vec) (CWISE_GT((vec), 0).cast<float>() * 2. - 1.)
36 
37 #define CWISE_LOG_SIGM(vec, huge) \
38  (CWISE_GT((vec), (huge)) \
39  .select( \
40  0, CWISE_LT((vec), -(huge)).select(vec, CWISE_SIGM((vec)).log())))
41 
42 } // namespace
43 
44 template <>
45 void LambdaRankNdcgOp<float, CPUContext>::ResizeInvLogITensor(int size) {
46  int old_size = inv_log_i_.size();
47  int new_size = std::max(old_size, 1);
48  while (new_size < size) {
49  new_size <<= 1;
50  }
51  if (new_size != old_size) {
52  inv_log_i_.Resize(new_size);
53  auto* data = inv_log_i_.template mutable_data<float>();
54  EigenVectorArrayMap<float> vec(data, inv_log_i_.size());
55  const float log2f_ = std::log(2.f);
56  vec = log2f_ *
57  (Eigen::ArrayXf::LinSpaced(new_size, 2, 1 + new_size).log().inverse());
58  }
59  return;
60 }
61 
62 template <>
63 void LambdaRankNdcgOp<float, CPUContext>::ComputeDiscounts(int* idx, int N) {
64  discount_.Resize(N);
65  auto* discount_data = discount_.template mutable_data<float>();
66  auto* inv_log_i_data = inv_log_i_.template mutable_data<float>();
67  for (int i = 0; i < N; i++) {
68  discount_data[idx[i]] = inv_log_i_data[i];
69  }
70  return;
71 }
72 
73 template <>
74 bool LambdaRankNdcgOp<float, CPUContext>::RunOnDevice() {
75  auto& y = Input(PRED);
76  auto& r = Input(REL);
77  auto* loss = Output(LOSS);
78  auto* dy = Output(DPRED);
79 
80  const auto* y_data = y.template data<float>();
81  const auto* r_data = r.template data<float>();
82  ConstEigenVectorArrayMap<float> y_vec(y_data, y.size());
83  ConstEigenVectorArrayMap<float> r_vec(r_data, r.size());
84  CAFFE_ENFORCE(y.ndim() == 1);
85  CAFFE_ENFORCE(y.size() == r.size());
86 
87  int N = y.size();
88  ideal_idx_.Resize(N);
89  rank_idx_.Resize(N);
90  auto* rank_idx_data = rank_idx_.template mutable_data<int>();
91  auto* ideal_idx_data = ideal_idx_.template mutable_data<int>();
92  arg_sort(y_data, rank_idx_data, N, true);
93  arg_sort(r_data, ideal_idx_data, N, true);
94 
95  const double log2f_ = std::log(2.f);
96  gain_.Resize(N);
97  auto* gain_data = gain_.template mutable_data<float>();
98  EigenVectorArrayMap<float> gain_vec(gain_data, gain_.size());
99  gain_vec = (r_vec * log2f_).exp();
100 
101  ResizeInvLogITensor(N);
102  ComputeDiscounts(ideal_idx_data, N);
103  auto* ideal_discount_data = discount_.template mutable_data<float>();
104  EigenVectorArrayMap<float> ideal_discount_vec(
105  ideal_discount_data, discount_.size());
106  double idcg = (gain_vec * ideal_discount_vec).sum();
107 
108  ComputeDiscounts(rank_idx_data, N);
109  auto* discount_data = discount_.template mutable_data<float>();
110  EigenVectorArrayMap<float> discount_vec(discount_data, discount_.size());
111 
112  lambda_.Resize(N * N);
113  auto* lambda_data = lambda_.template mutable_data<float>();
114  EigenArrayMap<float> lambda_mat(lambda_data, N, N);
115  // computes lambda weight (i, j) = abs(gain_dff * discount_diff)
116  lambda_mat =
117  (PAIRWISE_DIFF(discount_vec, N) * PAIRWISE_DIFF(gain_vec, N)).abs();
118 
119  loss->Resize(1);
120  dy->Resize(N);
121  auto* loss_data = loss->template mutable_data<float>();
122  auto* dy_data = dy->template mutable_data<float>();
123  EigenVectorArrayMap<float> dy_vec(dy_data, dy->size());
124  // dy_i =
125  // \sum_j lambda_{i, j} sign(i > j) * sigm( -sign(i > j)*(yi - yj) )
126  // |++ gradient of rank loss between i & j ++|
127  dy_vec = (lambda_mat * CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) *
128  CWISE_SIGM(
129  -CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) * PAIRWISE_DIFF(y_vec, N)))
130  .rowwise()
131  .sum() /
132  idcg;
133  // loss = \sum_{i, j} lambda_{i, j} rank_loss(i, j)
134  *loss_data =
135  (lambda_mat *
136  CWISE_LOG_SIGM(
137  CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) * PAIRWISE_DIFF(y_vec, N), 100))
138  .sum() /
139  idcg;
140  return true;
141 }
142 
143 template <>
144 bool LambdaRankNdcgGradientOp<float, CPUContext>::RunOnDevice() {
145  auto& y = Input(Y);
146  auto& dy_cache = Input(DY_CACHE);
147  auto& dLoss = Input(DLOSS);
148  auto* dy = Output(DY);
149  CAFFE_ENFORCE(y.ndim() == 1);
150  CAFFE_ENFORCE(dy_cache.ndim() == 1);
151  CAFFE_ENFORCE(dy_cache.size() > 0);
152  CAFFE_ENFORCE(y.size() == dy_cache.size());
153  CAFFE_ENFORCE(dLoss.size() == 1);
154 
155  ConstEigenVectorArrayMap<float> dy_cache_vec(
156  dy_cache.template data<float>(), dy_cache.size());
157  dy->Resize(dy_cache.size());
158  EigenVectorArrayMap<float> dy_vec(
159  dy->template mutable_data<float>(), dy->size());
160  float multiplier = dLoss.template data<float>()[0];
161  dy_vec = multiplier * dy_cache_vec;
162  return true;
163 }
164 
165 namespace {
166 
167 REGISTER_CPU_OPERATOR(LambdaRankNdcg, LambdaRankNdcgOp<float, CPUContext>);
168 REGISTER_CPU_OPERATOR(
169  LambdaRankNdcgGradient,
170  LambdaRankNdcgGradientOp<float, CPUContext>);
171 
172 OPERATOR_SCHEMA(LambdaRankNdcg).NumInputs(2).NumOutputs(2).SetDoc(R"DOC(
173 It implements the LambdaRank as appeared in Wu, Qiang, et al. "Adapting boosting
174 for information retrieval measures." Information Retrieval 13.3 (2010): 254-270.
175 
176 This method heuristically optimizes the NDCG.
177 )DOC");
178 OPERATOR_SCHEMA(LambdaRankNdcgGradient).NumInputs(3).NumOutputs(1);
179 
180 class GetLambdaRankNdcgGradient : public GradientMakerBase {
181  using GradientMakerBase::GradientMakerBase;
182  vector<OperatorDef> GetGradientDefs() override {
183  return SingleGradientDef(
184  "LambdaRankNdcgGradient",
185  "",
186  vector<string>{I(0), O(1), GO(0)},
187  vector<string>{GI(0)});
188  }
189 };
190 
191 REGISTER_GRADIENT(LambdaRankNdcg, GetLambdaRankNdcgGradient);
192 
193 } // namespace
194 
195 } // namespace caffe2
TIndex size() const
Returns the size (i.e.
Definition: tensor.h:609
Copyright (c) 2016-present, Facebook, Inc.