Caffe2 - C++ API
A deep learning, cross platform ML framework
batch_sparse_to_dense_op.cc
1 #include "batch_sparse_to_dense_op.h"
2 
3 #include "caffe2/core/context.h"
4 
5 namespace caffe2 {
6 
7 template <typename T, class Context>
8 bool BatchSparseToDenseOp<T, Context>::RunOnDevice() {
9  auto& lengths = Input(LENGTHS);
10  auto& indices = Input(INDICES);
11  auto& values = Input(VALUES);
12 
13  CAFFE_ENFORCE_EQ(indices.numel(), values.numel());
14  CAFFE_ENFORCE_EQ(lengths.dim(), 1);
15  CAFFE_ENFORCE_EQ(indices.dim(), 1);
16 
17  const int64_t* lengths_data = lengths.template data<int64_t>();
18  const int64_t* indices_data = indices.template data<int64_t>();
19  const T* values_data = values.template data<T>();
20  int64_t batch_size = lengths.numel();
21  int64_t lengths_sum = 0;
22  math::Sum<int64_t, Context>(batch_size, lengths_data, &lengths_sum, &context_);
23  CAFFE_ENFORCE_EQ(lengths_sum, indices.numel());
24 
25  vector<int64_t> output_shape = {batch_size};
26  if (InputSize() == 4) {
27  auto& shaper = Input(3);
28  CAFFE_ENFORCE_EQ(shaper.dim(), 2);
29  if (dense_last_dim_ == -1) {
30  dense_last_dim_ = shaper.size(1);
31  } else {
32  CAFFE_ENFORCE(
33  dense_last_dim_ == shaper.size(1),
34  "The last dim argument is not aligned with the shape input last dim");
35  }
36  } else {
37  CAFFE_ENFORCE(dense_last_dim_ >= 1, "The last dim of dense must be >= 1");
38  }
39  output_shape.push_back(dense_last_dim_);
40  auto* output = Output(0, output_shape, at::dtype<T>());
41  T* output_data = output->template mutable_data<T>();
42  math::Set(
43  output->numel(), static_cast<T>(default_value_), output_data, &context_);
44 
45  int64_t k = 0;
46  for (int64_t i = 0; i < batch_size; ++i) {
47  for (int64_t j = 0; j < lengths_data[i]; ++j) {
48  CAFFE_ENFORCE(
49  indices_data[k] < dense_last_dim_,
50  "An indice (",
51  indices_data[k],
52  ") is larger then last dim of dense (",
53  dense_last_dim_,
54  ").");
55  output_data[i * dense_last_dim_ + indices_data[k]] = values_data[k];
56  k += 1;
57  }
58  }
59 
60  return true;
61 }
62 
63 template <typename T, class Context>
64 bool BatchDenseToSparseOp<T, Context>::RunOnDevice() {
65  auto& lengths = Input(LENGTHS);
66  auto& indices = Input(INDICES);
67  auto& dense = Input(DENSE);
68 
69  CAFFE_ENFORCE_EQ(lengths.dim(), 1);
70  CAFFE_ENFORCE_EQ(indices.dim(), 1);
71  CAFFE_ENFORCE_EQ(dense.dim(), 2);
72  const int64_t* lengths_data = lengths.template data<int64_t>();
73  const int64_t* indices_data = indices.template data<int64_t>();
74  const T* dense_data = dense.template data<T>();
75 
76  int64_t batch_size = lengths.numel();
77  int64_t lengths_sum = 0;
78  math::Sum<int64_t, Context>(batch_size, lengths_data, &lengths_sum, &context_);
79  CAFFE_ENFORCE_EQ(lengths_sum, indices.numel());
80 
81  CAFFE_ENFORCE_EQ(batch_size, dense.size(0));
82  dense_last_dim_ = dense.size(1);
83  vector<int64_t> output_shape = indices.sizes().vec();
84  auto* output = Output(0, output_shape, at::dtype<T>());
85  T* output_data = output->template mutable_data<T>();
86 
87  int64_t k = 0;
88  for (int64_t i = 0; i < batch_size; ++i) {
89  for (int64_t j = 0; j < lengths_data[i]; ++j) {
90  CAFFE_ENFORCE(
91  indices_data[k] < dense.size(1),
92  "An indice (",
93  indices_data[k],
94  ") is larger then last dim of dense (",
95  dense.size(1),
96  ").");
97  output_data[k] = dense_data[i * dense.size(1) + indices_data[k]];
98  k += 1;
99  }
100  }
101  return true;
102 }
103 
104 REGISTER_CPU_OPERATOR(
105  BatchSparseToDense,
106  BatchSparseToDenseOp<float, CPUContext>);
107 
108 OPERATOR_SCHEMA(BatchSparseToDense)
109  .NumInputs(3, 4)
110  .NumOutputs(1)
111  .DisallowInputFillers() // TODO: enable the filler
112  .SetDoc(R"DOC(
113 Convert sparse matrix representation into dense matrix.
114 
115 A sparse matrix is represented by `lengths` vector, `indices` vector,
116 and `values` vector. Each element in `lengths` vector (lengths[`i`]) represents
117 the number of indices in this batch (batch `i`).
118 With in each batch, `indices` should not have duplicate number.
119 
120 For example, with input:
121 
122  lengths = [2, 3, 1]
123  indices = [0, 1, 2, 3, 4, 5]
124  values = [6, 7, 8, 9, 10, 11]
125  dense_dim = 6
126  default_value = 0
127 
128 The output is:
129 
130  output = [[6, 7, 0, 0, 0, 0],
131  [0, 0, 8, 9, 10, 0],
132  [0, 0, 0, 0, 0, 11]]
133 
134 after running this operator.
135 )DOC")
136  .Input(
137  0,
138  "lengths",
139  "Flatten tensor, used to break down indices and values into per batch indices and values.")
140  .Input(
141  1,
142  "indices",
143  "Flatten tensor of total size = \\sum lengths, containing the indices ")
144  .Input(2, "values", "Data tensor, dimension has to match `indices`")
145  .Input(
146  3,
147  "output_shape_inference",
148  "Optional, a dense tensor whose shape define the output shape")
149  .Output(
150  0,
151  "dense",
152  "2-D dense tensor, with 1st dim = len(lengths), 2nd dim = dense_last_dim"
153  "in the arg list, the tensor is of the same data type as `values`."
154  "Missing values are filled with default_value")
155  .Arg(
156  "dense_last_dim",
157  "Optional, output dense last dimension. "
158  "If both this argument and output_shape_inference are set, "
159  "it should be consistent with output_shape_inference's last dim")
160  .Arg(
161  "default_value",
162  "Optional, missing values are filled with this value."
163  "default_value = 0 when not set");
164 
165 REGISTER_CPU_OPERATOR(
166  BatchDenseToSparse,
167  BatchDenseToSparseOp<float, CPUContext>);
168 
169 OPERATOR_SCHEMA(BatchDenseToSparse)
170  .NumInputs(3)
171  .NumOutputs(1)
172  .SetDoc(R"DOC(
173 This Op is a inverse of BatchSparseToDenseOp.
174 Basically, given a `lengths` vector, a `indices` vector,
175 and a dense matrix `dense`, output `value` vector so that, along with
176 `lengths` vector and `indices` vector, forms a sparse representation
177 of the dense matrix.
178 
179 A sparse matrix is represented by `lengths` vector, `indices` vector,
180 and `values` vector. Each element in `lengths` vector (lengths[`i`]) represents
181 the number of indices in this batch (batch `i`).
182 With in each batch, `indices` should not have duplicate number.
183 
184 For example, with input:
185 
186  lengths = [2, 3, 1]
187  indices = [0, 1, 2, 3, 4, 5]
188  output = [[6, 7, 0, 0, 0, 0],
189  [0, 0, 8, 9, 10, 0],
190  [0, 0, 0, 0, 0, 11]]
191 
192 The output is:
193 
194  values = [6, 7, 8, 9, 10, 11]
195 
196 after running this operator.
197 )DOC")
198  .Input(
199  0,
200  "lengths",
201  "Flatten lengths, Used to break down indices into per batch indices")
202  .Input(
203  1,
204  "indices",
205  "Flatten indices, tensor of total size = \\sum lengths, containing the indices ")
206  .Input(
207  2,
208  "dense",
209  "dense 2-D tensor, first dim = len(lengths), last dim > Any(indices)")
210  .Output(
211  0,
212  "values",
213  "Values, tensor of the same size as `indices` and same data type as dense tensor.");
214 
215 namespace {
216 
217 class GetBatchSparseToDenseGradient : public GradientMakerBase {
218  using GradientMakerBase::GradientMakerBase;
219  vector<OperatorDef> GetGradientDefs() override {
220  return SingleGradientDef(
221  "BatchDenseToSparse",
222  "",
223  vector<string>{I(0), I(1), GO(0)},
224  vector<string>{GI(2)});
225  }
226 };
227 
228 class GetBatchDenseToSparseGradient : public GradientMakerBase {
229  using GradientMakerBase::GradientMakerBase;
230  vector<OperatorDef> GetGradientDefs() override {
231  return SingleGradientDef(
232  "BatchSparseToDense",
233  "",
234  vector<string>{I(0), I(1), GO(0), I(2)},
235  vector<string>{GI(2)});
236  }
237 };
238 
239 REGISTER_GRADIENT(BatchSparseToDense, GetBatchSparseToDenseGradient);
240 REGISTER_GRADIENT(BatchDenseToSparse, GetBatchDenseToSparseGradient);
241 
242 } // namespace
243 } // namespace caffe2
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13