forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
common.h
151 lines (128 loc) · 4.23 KB
/
common.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
#ifndef CAFFE2_CORE_COMMON_H_
#define CAFFE2_CORE_COMMON_H_
#include <algorithm>
#include <cmath>
#include <map>
#include <memory>
#include <numeric>
#include <set>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#if defined(_MSC_VER)
#include <io.h>
#else
#include <unistd.h>
#endif
// Macros used during the build of this caffe2 instance. This header file
// is automatically generated by the cmake script during build.
#include "caffe2/core/macros.h"
#include <c10/macros/Macros.h>
#include "c10/util/string_utils.h"
namespace caffe2 {
// Note(Yangqing): NVCC does not play well with unordered_map on some platforms,
// forcing us to use std::map instead of unordered_map. This may affect speed
// in some cases, but in most of the computation code we do not access map very
// often, so it should be fine for us. I am putting a CaffeMap alias so we can
// change it more easily if things work out for unordered_map down the road.
template <typename Key, typename Value>
using CaffeMap = std::map<Key, Value>;
// using CaffeMap = std::unordered_map;
// Using statements for common classes that we refer to in caffe2 very often.
// Note that we only place it inside caffe2 so the global namespace is not
// polluted.
/* using override */
using std::set;
using std::string;
using std::unique_ptr;
using std::vector;
// Just in order to mark things as not implemented. Do not use in final code.
#define CAFFE_NOT_IMPLEMENTED CAFFE_THROW("Not Implemented.")
// suppress an unused variable.
#if defined(_MSC_VER) && !defined(__clang__)
#define CAFFE2_UNUSED __pragma(warning(suppress : 4100 4101))
#define CAFFE2_USED
#else
#define CAFFE2_UNUSED __attribute__((__unused__))
#define CAFFE2_USED __attribute__((__used__))
#endif //_MSC_VER
// Define alignment macro that is cross platform
#if defined(_MSC_VER) && !defined(__clang__)
#define CAFFE2_ALIGNED(x) __declspec(align(x))
#else
#define CAFFE2_ALIGNED(x) __attribute__((aligned(x)))
#endif
#if (defined _MSC_VER && !defined NOMINMAX)
#define NOMINMAX
#endif
#if defined(__has_cpp_attribute)
#if __has_cpp_attribute(nodiscard)
#define CAFFE2_NODISCARD [[nodiscard]]
#endif
#endif
#if !defined(CAFFE2_NODISCARD)
#define CAFFE2_NODISCARD
#endif
using std::make_unique;
#if defined(__ANDROID__) && !defined(__NDK_MAJOR__)
using ::round;
#else
using std::round;
#endif // defined(__ANDROID__) && !defined(__NDK_MAJOR__)
// dynamic cast reroute: if RTTI is disabled, go to reinterpret_cast
template <typename Dst, typename Src>
inline Dst dynamic_cast_if_rtti(Src ptr) {
#ifdef __GXX_RTTI
return dynamic_cast<Dst>(ptr);
#else
return static_cast<Dst>(ptr);
#endif
}
// SkipIndices are used in operator_fallback_gpu.h and operator_fallback_mkl.h
// as utility functions that marks input / output indices to skip when we use a
// CPU operator as the fallback of GPU/MKL operator option.
template <int... values>
class SkipIndices {
private:
template <int V>
static inline bool ContainsInternal(const int i) {
return (i == V);
}
template <int First, int Second, int... Rest>
static inline bool ContainsInternal(const int i) {
return (i == First) || ContainsInternal<Second, Rest...>(i);
}
public:
static inline bool Contains(const int i) {
return ContainsInternal<values...>(i);
}
};
template <>
class SkipIndices<> {
public:
static inline bool Contains(const int /*i*/) {
return false;
}
};
// HasCudaRuntime() tells the program whether the binary has Cuda runtime
// linked. This function should not be used in static initialization functions
// as the underlying boolean variable is going to be switched on when one
// loads libtorch_gpu.so.
TORCH_API bool HasCudaRuntime();
TORCH_API bool HasHipRuntime();
namespace internal {
// Sets the Cuda Runtime flag that is used by HasCudaRuntime(). You should
// never use this function - it is only used by the Caffe2 gpu code to notify
// Caffe2 core that cuda runtime has been loaded.
TORCH_API void SetCudaRuntimeFlag();
TORCH_API void SetHipRuntimeFlag();
} // namespace internal
// Returns which setting Caffe2 was configured and built with (exported from
// CMake)
TORCH_API const std::map<string, string>& GetBuildOptions();
} // namespace caffe2
#endif // CAFFE2_CORE_COMMON_H_