forked from ix-project/silo
-
Notifications
You must be signed in to change notification settings - Fork 1
/
core.h
206 lines (176 loc) · 4.46 KB
/
core.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
#pragma once
#include <atomic>
#include <sys/types.h>
#include "macros.h"
#include "util.h"
/**
* XXX: CoreIDs are not recyclable for now, so NMAXCORES is really the number
* of threads which can ever be spawned in the system
*/
class coreid {
public:
static const unsigned NMaxCores = NMAXCORES;
static inline unsigned
core_id()
{
if (unlikely(tl_core_id == -1)) {
// initialize per-core data structures
tl_core_id = g_core_count.fetch_add(1, std::memory_order_acq_rel);
// did we exceed max cores?
ALWAYS_ASSERT(unsigned(tl_core_id) < NMaxCores);
}
return tl_core_id;
}
/**
* Since our current allocation scheme does not allow for holes in the
* allocation, this function is quite wasteful. Don't abuse.
*
* Returns -1 if it is impossible to do this w/o exceeding max allocations
*/
static int
allocate_contiguous_aligned_block(unsigned n, unsigned alignment);
/**
* WARNING: this function is scary, and exists solely as a hack
*
* You are allowed to set your own core id under several conditions
* (the idea is that somebody else has allocated a block of core ids
* and is assigning one to you, under the promise of uniqueness):
*
* 1) You haven't already called core_id() yet (so you have no assignment)
* 2) The number you are setting is < the current assignment counter (meaning
* it was previously assigned by someone)
*
* These are necessary but not sufficient conditions for uniqueness
*/
static void
set_core_id(unsigned cid)
{
ALWAYS_ASSERT(cid < NMaxCores);
ALWAYS_ASSERT(cid < g_core_count.load(std::memory_order_acquire));
ALWAYS_ASSERT(tl_core_id == -1);
tl_core_id = cid; // sigh
}
// actual number of CPUs online for the system
static unsigned num_cpus_online();
private:
// the core ID of this core: -1 if not set
static __thread int tl_core_id;
// contains a running count of all the cores
static std::atomic<unsigned> g_core_count CACHE_ALIGNED;
};
// requires T to have no-arg ctor
template <typename T, bool CallDtor = false, bool Pedantic = true>
class percore {
public:
percore()
{
for (size_t i = 0; i < size(); i++) {
using namespace util;
new (&(elems()[i])) aligned_padded_elem<T, Pedantic>();
}
}
~percore()
{
if (!CallDtor)
return;
for (size_t i = 0; i < size(); i++) {
using namespace util;
elems()[i].~aligned_padded_elem<T, Pedantic>();
}
}
inline T &
operator[](unsigned i)
{
INVARIANT(i < NMAXCORES);
return elems()[i].elem;
}
inline const T &
operator[](unsigned i) const
{
INVARIANT(i < NMAXCORES);
return elems()[i].elem;
}
inline T &
my()
{
return (*this)[coreid::core_id()];
}
inline const T &
my() const
{
return (*this)[coreid::core_id()];
}
// XXX: make an iterator
inline size_t
size() const
{
return NMAXCORES;
}
protected:
inline util::aligned_padded_elem<T, Pedantic> *
elems()
{
return (util::aligned_padded_elem<T, Pedantic> *) &bytes_[0];
}
inline const util::aligned_padded_elem<T, Pedantic> *
elems() const
{
return (const util::aligned_padded_elem<T, Pedantic> *) &bytes_[0];
}
char bytes_[sizeof(util::aligned_padded_elem<T, Pedantic>) * NMAXCORES];
};
namespace private_ {
template <typename T>
struct buf {
char bytes_[sizeof(T)];
inline T * cast() { return (T *) &bytes_[0]; }
inline const T * cast() const { return (T *) &bytes_[0]; }
};
}
template <typename T>
class percore_lazy : private percore<private_::buf<T>, false> {
typedef private_::buf<T> buf_t;
public:
percore_lazy()
{
NDB_MEMSET(&flags_[0], 0, sizeof(flags_));
}
template <class... Args>
inline T &
get(unsigned i, Args &&... args)
{
buf_t &b = this->elems()[i].elem;
if (unlikely(!flags_[i])) {
flags_[i] = true;
T *px = new (&b.bytes_[0]) T(std::forward<Args>(args)...);
return *px;
}
return *b.cast();
}
template <class... Args>
inline T &
my(Args &&... args)
{
return get(coreid::core_id(), std::forward<Args>(args)...);
}
inline T *
view(unsigned i)
{
buf_t &b = this->elems()[i].elem;
return flags_[i] ? b.cast() : nullptr;
}
inline const T *
view(unsigned i) const
{
const buf_t &b = this->elems()[i].elem;
return flags_[i] ? b.cast() : nullptr;
}
inline const T *
myview() const
{
return view(coreid::core_id());
}
private:
bool flags_[NMAXCORES];
CACHE_PADOUT;
};