1 #include "conv_relu_op.h" 5 template <
typename T,
class Context>
6 bool ConvReluOp<T, Context>::RunOnDeviceWithOrderNCHW() {
8 for (
int i = 0; i < this->InputSize(); ++i) {
9 local_input_blobs_[i]->ShareExternal(
10 const_cast<void*>(this->Inputs()[i]->GetRaw()),
11 this->Inputs()[i]->meta());
14 if (!local_op_->RunOnDeviceWithOrderNCHW()) {
20 BlobGetMutableTensor(local_output_blobs_[0], Context::GetDeviceType());
21 const T* output_local_data = local_output->template data<T>();
24 Operator<Context>::Output(0, local_output->sizes(), at::dtype<T>());
25 T* output_data = output->template mutable_data<T>();
27 #pragma omp parallel for 29 for (
int i = 0; i < output->numel(); ++i) {
30 output_data[i] = std::max(static_cast<T>(0), output_local_data[i]);
36 template <
typename T,
class Context>
37 bool ConvReluOp<T, Context>::RunOnDeviceWithOrderNHWC() {
39 for (
int i = 0; i < this->InputSize(); ++i) {
40 local_input_blobs_[i]->ShareExternal(
41 const_cast<void*>(this->Inputs()[i]->GetRaw()),
42 this->Inputs()[i]->meta());
45 if (!local_op_->RunOnDeviceWithOrderNHWC()) {
51 BlobGetMutableTensor(local_output_blobs_[0], Context::GetDeviceType());
52 const T* output_local_data = local_output->template data<T>();
55 Operator<Context>::Output(0, local_output->sizes(), at::dtype<T>());
56 T* output_data = output->template mutable_data<T>();
58 #pragma omp parallel for 60 for (
int i = 0; i < output->numel(); ++i) {
61 output_data[i] = std::max(static_cast<T>(0), output_local_data[i]);
67 REGISTER_CPU_OPERATOR(
ConvRelu, ConvReluOp<float, CPUContext>);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...