Caffe2 - C++ API
A deep learning, cross platform ML framework
ensure_cpu_output_op.h
1 #ifndef CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
2 #define CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
3 
4 #include "caffe2/core/context.h"
5 #include "caffe2/core/operator.h"
6 #include "caffe2/utils/math.h"
7 
8 namespace caffe2 {
9 
10 template <class Context>
11 class EnsureCPUOutputOp : public Operator<Context> {
12  public:
13  USE_OPERATOR_CONTEXT_FUNCTIONS;
14  template <class... Args>
15  explicit EnsureCPUOutputOp(Args&&... args)
16  : Operator<Context>(std::forward<Args>(args)...) {}
17 
18  bool RunOnDevice() override {
19  if (this->InputIsTensorType(0, CPU)) {
20  return CopyWithContext<CPUContext>();
21  } else if (this->InputIsTensorType(0, Context::GetDeviceType())) {
22  // CUDA Context will go this branch
23  return CopyWithContext<Context>();
24  } else {
25  CAFFE_THROW(
26  "Unexpected Input Blob: ",
27  OperatorBase::Inputs().at(0)->meta().name());
28  }
29  return true;
30  }
31 
32  private:
33  template <class InputContext>
34  bool CopyWithContext() {
35  // Output is always on CPU
36  auto* output = this->template Output<Tensor>(0, CPU);
37  auto& input = this->template Input<Tensor>(0, InputContext::GetDeviceType());
38  output->ResizeLike(input);
39  context_.CopyItemsToCPU(
40  input.dtype(),
41  input.numel(),
42  input.raw_data(),
43  output->raw_mutable_data(input.dtype()));
44  context_.FinishDeviceComputation();
45  return true;
46  }
47 };
48 
49 } // namespace caffe2
50 
51 #endif // CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13
Flush-To-Zero and Denormals-Are-Zero mode.