Caffe2 - C++ API
A deep learning, cross platform ML framework
batch_sparse_to_dense_op.cc
1 #include "batch_sparse_to_dense_op.h"
2 
3 #include "caffe2/core/context.h"
4 
5 namespace caffe2 {
6 
7 template <typename T, class Context>
8 bool BatchSparseToDenseOp<T, Context>::RunOnDevice() {
9  auto& lengths = Input(LENGTHS);
10  auto& indices = Input(INDICES);
11  auto& values = Input(VALUES);
12  auto* output = Output(0);
13  CAFFE_ENFORCE_EQ(indices.size(), values.size());
14  CAFFE_ENFORCE_EQ(lengths.ndim(), 1);
15  CAFFE_ENFORCE_EQ(indices.ndim(), 1);
16 
17  const TIndex* lengths_data = lengths.template data<TIndex>();
18  const TIndex* indices_data = indices.template data<TIndex>();
19  const T* values_data = values.template data<T>();
20  TIndex batch_size = lengths.size();
21  TIndex lengths_sum = 0;
22  math::Sum<TIndex, Context>(batch_size, lengths_data, &lengths_sum, &context_);
23  CAFFE_ENFORCE_EQ(lengths_sum, indices.size());
24 
25  vector<TIndex> output_shape = {batch_size};
26  if (InputSize() == 4) {
27  auto& shaper = Input(3);
28  CAFFE_ENFORCE_EQ(shaper.ndim(), 2);
29  if (dense_last_dim_ == -1) {
30  dense_last_dim_ = shaper.dim(1);
31  } else {
32  CAFFE_ENFORCE(
33  dense_last_dim_ == shaper.dim(1),
34  "The last dim argument is not aligned with the shape input last dim");
35  }
36  } else {
37  CAFFE_ENFORCE(dense_last_dim_ >= 1, "The last dim of dense must be >= 1");
38  }
39  output_shape.push_back(dense_last_dim_);
40  output->Resize(output_shape);
41  T* output_data = output->template mutable_data<T>();
42  math::Set(
43  output->size(), static_cast<T>(default_value_), output_data, &context_);
44 
45  TIndex k = 0;
46  for (TIndex i = 0; i < batch_size; ++i) {
47  for (TIndex j = 0; j < lengths_data[i]; ++j) {
48  CAFFE_ENFORCE(
49  indices_data[k] < dense_last_dim_,
50  "An indice (",
51  indices_data[k],
52  ") is larger then last dim of dense (",
53  dense_last_dim_,
54  ").");
55  output_data[i * dense_last_dim_ + indices_data[k]] = values_data[k];
56  k += 1;
57  }
58  }
59 
60  return true;
61 }
62 
63 template <typename T, class Context>
64 bool BatchDenseToSparseOp<T, Context>::RunOnDevice() {
65  auto& lengths = Input(LENGTHS);
66  auto& indices = Input(INDICES);
67  auto& dense = Input(DENSE);
68  auto* output = Output(0);
69  CAFFE_ENFORCE_EQ(lengths.ndim(), 1);
70  CAFFE_ENFORCE_EQ(indices.ndim(), 1);
71  CAFFE_ENFORCE_EQ(dense.ndim(), 2);
72  const TIndex* lengths_data = lengths.template data<TIndex>();
73  const TIndex* indices_data = indices.template data<TIndex>();
74  const T* dense_data = dense.template data<T>();
75 
76  TIndex batch_size = lengths.size();
77  TIndex lengths_sum = 0;
78  math::Sum<TIndex, Context>(batch_size, lengths_data, &lengths_sum, &context_);
79  CAFFE_ENFORCE_EQ(lengths_sum, indices.size());
80 
81  CAFFE_ENFORCE_EQ(batch_size, dense.dim(0));
82  dense_last_dim_ = dense.dim(1);
83  vector<TIndex> output_shape = indices.dims();
84  output->Resize(output_shape);
85  T* output_data = output->template mutable_data<T>();
86 
87  TIndex k = 0;
88  for (TIndex i = 0; i < batch_size; ++i) {
89  for (TIndex j = 0; j < lengths_data[i]; ++j) {
90  CAFFE_ENFORCE(
91  indices_data[k] < dense.dim(1),
92  "An indice (",
93  indices_data[k],
94  ") is larger then last dim of dense (",
95  dense.dim(1),
96  ").");
97  output_data[k] = dense_data[i * dense.dim(1) + indices_data[k]];
98  k += 1;
99  }
100  }
101  return true;
102 }
103 
104 REGISTER_CPU_OPERATOR(
105  BatchSparseToDense,
106  BatchSparseToDenseOp<float, CPUContext>);
107 
108 OPERATOR_SCHEMA(BatchSparseToDense)
109  .NumInputs(3, 4)
110  .NumOutputs(1)
111  .SetDoc(R"DOC(
112 Convert sparse matrix representation into dense matrix.
113 
114 A sparse matrix is represented by `lengths` vector, `indices` vector,
115 and `values` vector. Each element in `lengths` vector (lengths[`i`]) represents
116 the number of indices in this batch (batch `i`).
117 With in each batch, `indices` should not have duplicate number.
118 
119 For example, with input:
120 
121  lengths = [2, 3, 1]
122  indices = [0, 1, 2, 3, 4, 5]
123  values = [6, 7, 8, 9, 10, 11]
124  dense_dim = 6
125  default_value = 0
126 
127 The output is:
128 
129  output = [[6, 7, 0, 0, 0, 0],
130  [0, 0, 8, 9, 10, 0],
131  [0, 0, 0, 0, 0, 11]]
132 
133 after running this operator.
134 )DOC")
135  .Input(
136  0,
137  "lengths",
138  "Flatten tensor, used to break down indices and values into per batch indices and values.")
139  .Input(
140  1,
141  "indices",
142  "Flatten tensor of total size = \\sum lengths, containing the indices ")
143  .Input(2, "values", "Data tensor, dimension has to match `indices`")
144  .Input(
145  3,
146  "output_shape_inference",
147  "Optional, a dense tensor whose shape define the output shape")
148  .Output(
149  0,
150  "dense",
151  "2-D dense tensor, with 1st dim = len(lengths), 2nd dim = dense_last_dim"
152  "in the arg list, the tensor is of the same data type as `values`."
153  "Missing values are filled with default_value")
154  .Arg(
155  "dense_last_dim",
156  "Optional, output dense last dimension. "
157  "If both this argument and output_shape_inference are set, "
158  "it should be consistent with output_shape_inference's last dim")
159  .Arg(
160  "default_value",
161  "Optional, missing values are filled with this value."
162  "default_value = 0 when not set");
163 
164 REGISTER_CPU_OPERATOR(
165  BatchDenseToSparse,
166  BatchDenseToSparseOp<float, CPUContext>);
167 
168 OPERATOR_SCHEMA(BatchDenseToSparse)
169  .NumInputs(3)
170  .NumOutputs(1)
171  .SetDoc(R"DOC(
172 This Op is a inverse of BatchSparseToDenseOp.
173 Basically, given a `lengths` vector, a `indices` vector,
174 and a dense matrix `dense`, output `value` vector so that, along with
175 `lengths` vector and `indices` vector, forms a sparse representation
176 of the dense matrix.
177 
178 A sparse matrix is represented by `lengths` vector, `indices` vector,
179 and `values` vector. Each element in `lengths` vector (lengths[`i`]) represents
180 the number of indices in this batch (batch `i`).
181 With in each batch, `indices` should not have duplicate number.
182 
183 For example, with input:
184 
185  lengths = [2, 3, 1]
186  indices = [0, 1, 2, 3, 4, 5]
187  output = [[6, 7, 0, 0, 0, 0],
188  [0, 0, 8, 9, 10, 0],
189  [0, 0, 0, 0, 0, 11]]
190 
191 The output is:
192 
193  values = [6, 7, 8, 9, 10, 11]
194 
195 after running this operator.
196 )DOC")
197  .Input(
198  0,
199  "lengths",
200  "Flatten lengths, Used to break down indices into per batch indices")
201  .Input(
202  1,
203  "indices",
204  "Flatten indices, tensor of total size = \\sum lengths, containing the indices ")
205  .Input(
206  2,
207  "dense",
208  "dense 2-D tensor, first dim = len(lengths), last dim > Any(indices)")
209  .Output(
210  0,
211  "values",
212  "Values, tensor of the same size as `indices` and same data type as dense tensor.");
213 
214 namespace {
215 
216 class GetBatchSparseToDenseGradient : public GradientMakerBase {
217  using GradientMakerBase::GradientMakerBase;
218  vector<OperatorDef> GetGradientDefs() override {
219  return SingleGradientDef(
220  "BatchDenseToSparse",
221  "",
222  vector<string>{I(0), I(1), GO(0)},
223  vector<string>{GI(2)});
224  }
225 };
226 
227 class GetBatchDenseToSparseGradient : public GradientMakerBase {
228  using GradientMakerBase::GradientMakerBase;
229  vector<OperatorDef> GetGradientDefs() override {
230  return SingleGradientDef(
231  "BatchSparseToDense",
232  "",
233  vector<string>{I(0), I(1), GO(0), I(2)},
234  vector<string>{GI(2)});
235  }
236 };
237 
238 REGISTER_GRADIENT(BatchSparseToDense, GetBatchSparseToDenseGradient);
239 REGISTER_GRADIENT(BatchDenseToSparse, GetBatchDenseToSparseGradient);
240 
241 } // namespace
242 } // namespace caffe2
Copyright (c) 2016-present, Facebook, Inc.