1 #include "batch_bucketize_op.h" 3 #include "caffe2/core/context.h" 4 #include "caffe2/core/tensor.h" 9 bool BatchBucketizeOp<CPUContext>::RunOnDevice() {
10 auto& feature = Input(FEATURE);
11 auto& indices = Input(INDICES);
12 auto& boundaries = Input(BOUNDARIES);
13 auto& lengths = Input(LENGTHS);
15 CAFFE_ENFORCE_EQ(lengths.dim(), 1);
16 CAFFE_ENFORCE_EQ(indices.dim(), 1);
17 CAFFE_ENFORCE_EQ(boundaries.dim(), 1);
18 CAFFE_ENFORCE_EQ(feature.dim(), 2);
19 CAFFE_ENFORCE_EQ(lengths.numel(), indices.numel());
21 const auto* lengths_data = lengths.template data<int32_t>();
22 const auto* indices_data = indices.template data<int32_t>();
23 const auto* boundaries_data = boundaries.template data<float>();
24 const auto* feature_data = feature.template data<float>();
25 auto batch_size = feature.size(0);
26 auto feature_dim = feature.size(1);
27 auto output_dim = indices.numel();
29 int64_t length_sum = 0;
30 for (int64_t i = 0; i < lengths.numel(); i++) {
31 CAFFE_ENFORCE_GE(feature_dim, indices_data[i]);
32 length_sum += lengths_data[i];
34 CAFFE_ENFORCE_EQ(length_sum, boundaries.numel());
36 int64_t lower_bound = 0;
37 auto* output = Output(O, {batch_size, output_dim}, at::dtype<int32_t>());
38 auto* output_data = output->template mutable_data<int32_t>();
40 for (int64_t i = 0; i < batch_size; i++) {
42 for (int64_t j = 0; j < output_dim; j++) {
43 for (int64_t k = 0; k <= lengths_data[j]; k++) {
44 if (k == lengths_data[j] ||
45 feature_data[i * feature_dim + indices_data[j]] <=
46 boundaries_data[lower_bound + k]) {
47 output_data[i * output_dim + j] = k;
53 lower_bound += lengths_data[j];
59 REGISTER_CPU_OPERATOR(BatchBucketize, BatchBucketizeOp<CPUContext>);
61 OPERATOR_SCHEMA(BatchBucketize)
65 Bucketize the float_features into sparse features. 66 The float_features is a N * D tensor where N is the batch_size, and D is the feature_dim. 67 The indices is a 1D tensor containing the indices of the features that need to be bucketized. 68 The lengths is a 1D tensor that splits the following 'boundaries' argument. 69 The boundaries is a 1D tensor containing the border list for each feature. 71 With in each batch, `indices` should not have duplicate number, 72 and the number of elements in `indices` should be less than or euqal to `D`. 73 Each element in `lengths` vector (lengths[`i`]) represents 74 the number of boundaries in the sub border list. 75 The sum of all elements in `lengths` must be equal to the size of `boundaries`. 76 If lengths[0] = 2, the first sub border list is [0.5, 1.0], which separate the 77 value to (-inf, 0.5], (0,5, 1.0], (1.0, inf). The bucketized feature will have 78 three possible values (i.e. 0, 1, 2). 81 For example, with input: 83 float_features = [[1.42, 2.07, 3.19, 0.55, 4.32], 84 [4.57, 2.30, 0.84, 4.48, 3.09], 85 [0.89, 0.26, 2.41, 0.47, 1.05], 86 [0.03, 2.97, 2.43, 4.36, 3.11], 87 [2.74, 5.77, 0.90, 2.63, 0.38]] 90 boundaries = [0.5, 1.0, 1.5, 2.5, 3.5, 2.5] 100 after running this operator. 105 "2-D dense tensor, the second dimension must be greater or equal to the indices dimension")
109 "Flatten tensor, containing the indices of `float_features` to be bucketized. The datatype must be int32.")
113 "Flatten tensor, the size must be equal to that of `indices`. The datatype must be int32.")
117 "Flatten tensor, dimension has to match the sum of lengths")
121 "2-D dense tensor, with 1st dim = float_features.dim(0), 2nd dim = size(indices)" 122 "in the arg list, the tensor is of the same data type as `feature`.");
124 NO_GRADIENT(BatchBucketize);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...