Caffe2 - C++ API
A deep learning, cross platform ML framework
lpnorm_op.cc
1 
17 #include "caffe2/operators/lpnorm_op.h"
18 
19 namespace caffe2 {
20 
21 template <>
22 bool LpNormOp<float, CPUContext>::RunOnDevice() {
23  auto& X = Input(X_IN);
24  auto* norm = Output(OUT);
25  norm->Resize(1);
26  const float* X_data = X.data<float>();
27  if (p_ == 1) {
28  *(norm->mutable_data<float>()) =
29  (ConstEigenVectorMap<float>(X_data, X.size()).array()).abs().sum();
30  // L1(x) = sum(|x|)
31  } else if (p_ == 2) {
32  *(norm->mutable_data<float>()) =
33  (ConstEigenVectorMap<float>(X_data, X.size()).array()).square().sum();
34  // L2(x) = (sum(|x|^2))
35  }
36  return true;
37 }
38 
39 template <>
40 bool LpNormGradientOp<float, CPUContext>::RunOnDevice() {
41  auto& X = Input(X_IN);
42  auto& dnorm = Input(DER_NORM_IN);
43  auto* dX = Output(DER_X_OUT);
44  CAFFE_ENFORCE_EQ(dnorm.ndim(), 1);
45  CAFFE_ENFORCE_EQ(dnorm.dim32(0), 1);
46  dX->ResizeLike(X);
47  const float kEps = 1e-12f;
48 
49  if (p_ == 1) {
50  // Todo: implement in eigen
51  for (int i = 0; i < X.size(); ++i) {
52  float temp = (X.data<float>())[i];
53  if (temp < -kEps) {
54  dX->mutable_data<float>()[i] = -(dnorm.data<float>())[0];
55  } else if (temp > kEps) {
56  dX->mutable_data<float>()[i] = (dnorm.data<float>())[0];
57  } else {
58  dX->mutable_data<float>()[i] = 0;
59  }
60  }
61  } else if (p_ == 2) {
62  EigenVectorMap<float>(dX->mutable_data<float>(), X.size()).array() =
63  ConstEigenVectorMap<float>(X.data<float>(), X.size()).array() * 2.0f *
64  (dnorm.data<float>())[0];
65  }
66 
67  return true;
68 }
69 
70 namespace {
71 // LpNorm
72 REGISTER_CPU_OPERATOR(LpNorm, LpNormOp<float, CPUContext>);
73 REGISTER_CPU_OPERATOR(LpNormGradient, LpNormGradientOp<float, CPUContext>);
74 
75 OPERATOR_SCHEMA(LpNorm)
76  .NumInputs(1)
77  .NumOutputs(1)
78  .SetDoc(R"DOC(
79 Given one input float tensor X, and produces one output float tensor
80 of the Lp norm of tensor X, computed as Lp(x) = sum over |x^p|,
81 in which p is either 1 or 2(currently only supports l1 and l2 norm),
82 determined by the argument p.
83 )DOC")
84  .Input(0, "X", "1D input tensor")
85  .Output(0, "Z", "1D output tensor")
86  .Arg("p", "Order of the norm in p-norm");
87 
88 OPERATOR_SCHEMA(LpNormGradient)
89  .NumInputs(2)
90  .NumOutputs(1)
91  .SetDoc(R"DOC(
92 Given one input float tensor X, derivative dout, and produces one output
93 float tensor dX. dX is the derivative of the Lp norm of tensor X, computed as
94 dx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only
95 supports l1 and l2 norm) determined by the argument p.
96 )DOC")
97  .Input(0, "X", "1D input tensor")
98  .Input(1, "dout", "1D input tensor")
99  .Output(0, "dx", "1D output tensor")
100  .Arg("p", "Order of the norm in p-norm");
101 
102 class GetLpNormGradient : public GradientMakerBase {
103  using GradientMakerBase::GradientMakerBase;
104  vector<OperatorDef> GetGradientDefs() override {
105  return SingleGradientDef(
106  "LpNormGradient",
107  "",
108  vector<string>{I(0), GO(0)},
109  vector<string>{GI(0)});
110  }
111 };
112 
113 REGISTER_GRADIENT(LpNorm, GetLpNormGradient);
114 } // namespace
115 
116 } // namespace caffe2
Copyright (c) 2016-present, Facebook, Inc.