Caffe2 - C++ API
A deep learning, cross platform ML framework
common.h
1 #ifndef CAFFE2_CORE_COMMON_H_
2 #define CAFFE2_CORE_COMMON_H_
3 
4 #include <algorithm>
5 #include <cmath>
6 #include <map>
7 #include <memory>
8 #include <numeric>
9 #include <set>
10 #include <sstream>
11 #include <string>
12 #include <type_traits>
13 #include <vector>
14 
15 #ifdef __APPLE__
16 #include <TargetConditionals.h>
17 #endif
18 
19 #if defined(_MSC_VER)
20 #include <io.h>
21 #else
22 #include <unistd.h>
23 #endif
24 
25 // Macros used during the build of this caffe2 instance. This header file
26 // is automatically generated by the cmake script during build.
27 #include "caffe2/core/macros.h"
28 
29 #include <c10/macros/Macros.h>
30 
31 #include "c10/util/string_utils.h"
32 
33 namespace caffe2 {
34 
35 // Note(Yangqing): NVCC does not play well with unordered_map on some platforms,
36 // forcing us to use std::map instead of unordered_map. This may affect speed
37 // in some cases, but in most of the computation code we do not access map very
38 // often, so it should be fine for us. I am putting a CaffeMap alias so we can
39 // change it more easily if things work out for unordered_map down the road.
40 template <typename Key, typename Value>
41 using CaffeMap = std::map<Key, Value>;
42 // using CaffeMap = std::unordered_map;
43 
44 // Using statements for common classes that we refer to in caffe2 very often.
45 // Note that we only place it inside caffe2 so the global namespace is not
46 // polluted.
47 /* using override */
48 using std::set;
49 using std::string;
50 using std::unique_ptr;
51 using std::vector;
52 
53 // Just in order to mark things as not implemented. Do not use in final code.
54 #define CAFFE_NOT_IMPLEMENTED CAFFE_THROW("Not Implemented.")
55 
56 // suppress an unused variable.
57 #ifdef _MSC_VER
58 #define CAFFE2_UNUSED
59 #define CAFFE2_USED
60 #else
61 #define CAFFE2_UNUSED __attribute__((__unused__))
62 #define CAFFE2_USED __attribute__((__used__))
63 #endif //_MSC_VER
64 
65 // Define alignment macro that is cross platform
66 #if defined(_MSC_VER)
67 #define CAFFE2_ALIGNED(x) __declspec(align(x))
68 #else
69 #define CAFFE2_ALIGNED(x) __attribute__((aligned(x)))
70 #endif
71 
72 #if defined(_MSC_VER)
73 #define CAFFE2_NORETURN __declspec(noreturn)
74 #else
75 #define CAFFE2_NORETURN __attribute__((noreturn))
76 #endif
77 
78 #if (defined _MSC_VER && !defined NOMINMAX)
79 #define NOMINMAX
80 #endif
81 
82 // make_unique is a C++14 feature. If we don't have 14, we will emulate
83 // its behavior. This is copied from folly/Memory.h
84 #if __cplusplus >= 201402L || \
85  (defined __cpp_lib_make_unique && __cpp_lib_make_unique >= 201304L) || \
86  (defined(_MSC_VER) && _MSC_VER >= 1900)
87 /* using override */
88 using std::make_unique;
89 #else
90 
91 template<typename T, typename... Args>
92 typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
93 make_unique(Args&&... args) {
94  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
95 }
96 
97 // Allows 'make_unique<T[]>(10)'. (N3690 s20.9.1.4 p3-4)
98 template<typename T>
99 typename std::enable_if<std::is_array<T>::value, std::unique_ptr<T>>::type
100 make_unique(const size_t n) {
101  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
102 }
103 
104 // Disallows 'make_unique<T[10]>()'. (N3690 s20.9.1.4 p5)
105 template<typename T, typename... Args>
106 typename std::enable_if<
107  std::extent<T>::value != 0, std::unique_ptr<T>>::type
108 make_unique(Args&&...) = delete;
109 
110 #endif
111 
112 #if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
113 using ::round;
114 #else
115 using std::round;
116 #endif // defined(__ANDROID__) && !defined(__NDK_MAJOR__)
117 
118 // dynamic cast reroute: if RTTI is disabled, go to reinterpret_cast
119 template <typename Dst, typename Src>
120 inline Dst dynamic_cast_if_rtti(Src ptr) {
121 #ifdef __GXX_RTTI
122  return dynamic_cast<Dst>(ptr);
123 #else
124  return static_cast<Dst>(ptr);
125 #endif
126 }
127 
128 // SkipIndices are used in operator_fallback_gpu.h and operator_fallback_mkl.h
129 // as utilty functions that marks input / output indices to skip when we use a
130 // CPU operator as the fallback of GPU/MKL operator option.
131 template <int... values>
132 class SkipIndices {
133  private:
134  template <int V>
135  static inline bool ContainsInternal(const int i) {
136  return (i == V);
137  }
138  template <int First, int Second, int... Rest>
139  static inline bool ContainsInternal(const int i) {
140  return (i == First) || ContainsInternal<Second, Rest...>(i);
141  }
142 
143  public:
144  static inline bool Contains(const int i) {
145  return ContainsInternal<values...>(i);
146  }
147 };
148 
149 template <>
150 class SkipIndices<> {
151  public:
152  static inline bool Contains(const int /*i*/) {
153  return false;
154  }
155 };
156 
157 // HasCudaRuntime() tells the program whether the binary has Cuda runtime
158 // linked. This function should not be used in static initialization functions
159 // as the underlying boolean variable is going to be switched on when one
160 // loads libcaffe2_gpu.so.
161 CAFFE2_API bool HasCudaRuntime();
162 CAFFE2_API bool HasHipRuntime();
163 namespace internal {
164 // Sets the Cuda Runtime flag that is used by HasCudaRuntime(). You should
165 // never use this function - it is only used by the Caffe2 gpu code to notify
166 // Caffe2 core that cuda runtime has been loaded.
167 CAFFE2_API void SetCudaRuntimeFlag();
168 CAFFE2_API void SetHipRuntimeFlag();
169 }
170 // Returns which setting Caffe2 was configured and built with (exported from
171 // CMake)
172 CAFFE2_API const std::map<string, string>& GetBuildOptions();
173 
174 } // namespace caffe2
175 
176 #endif // CAFFE2_CORE_COMMON_H_
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
Definition: blob.h:13