9 * XXX: CoreIDs are not recyclable for now, so NMAXCORES is really the number
10 * of threads which can ever be spawned in the system
14 static const unsigned NMaxCores = NMAXCORES;
16 static inline unsigned
19 if (unlikely(tl_core_id == -1)) {
20 // initialize per-core data structures
21 tl_core_id = g_core_count.fetch_add(1, std::memory_order_acq_rel);
22 // did we exceed max cores?
23 ALWAYS_ASSERT(unsigned(tl_core_id) < NMaxCores);
29 * Since our current allocation scheme does not allow for holes in the
30 * allocation, this function is quite wasteful. Don't abuse.
32 * Returns -1 if it is impossible to do this w/o exceeding max allocations
35 allocate_contiguous_aligned_block(unsigned n, unsigned alignment);
38 * WARNING: this function is scary, and exists solely as a hack
40 * You are allowed to set your own core id under several conditions
41 * (the idea is that somebody else has allocated a block of core ids
42 * and is assigning one to you, under the promise of uniqueness):
44 * 1) You haven't already called core_id() yet (so you have no assignment)
45 * 2) The number you are setting is < the current assignment counter (meaning
46 * it was previously assigned by someone)
48 * These are necessary but not sufficient conditions for uniqueness
51 set_core_id(unsigned cid)
53 ALWAYS_ASSERT(cid < NMaxCores);
54 ALWAYS_ASSERT(cid < g_core_count.load(std::memory_order_acquire));
55 ALWAYS_ASSERT(tl_core_id == -1);
56 tl_core_id = cid; // sigh
59 // actual number of CPUs online for the system
60 static unsigned num_cpus_online();
63 // the core ID of this core: -1 if not set
64 static __thread int tl_core_id;
66 // contains a running count of all the cores
67 static std::atomic<unsigned> g_core_count CACHE_ALIGNED;
70 // requires T to have no-arg ctor
71 template <typename T, bool CallDtor = false, bool Pedantic = true>
77 for (size_t i = 0; i < size(); i++) {
79 new (&(elems()[i])) aligned_padded_elem<T, Pedantic>();
87 for (size_t i = 0; i < size(); i++) {
89 elems()[i].~aligned_padded_elem<T, Pedantic>();
94 operator[](unsigned i)
96 INVARIANT(i < NMAXCORES);
97 return elems()[i].elem;
101 operator[](unsigned i) const
103 INVARIANT(i < NMAXCORES);
104 return elems()[i].elem;
110 return (*this)[coreid::core_id()];
116 return (*this)[coreid::core_id()];
119 // XXX: make an iterator
129 inline util::aligned_padded_elem<T, Pedantic> *
132 return (util::aligned_padded_elem<T, Pedantic> *) &bytes_[0];
135 inline const util::aligned_padded_elem<T, Pedantic> *
138 return (const util::aligned_padded_elem<T, Pedantic> *) &bytes_[0];
141 char bytes_[sizeof(util::aligned_padded_elem<T, Pedantic>) * NMAXCORES];
145 template <typename T>
147 char bytes_[sizeof(T)];
148 inline T * cast() { return (T *) &bytes_[0]; }
149 inline const T * cast() const { return (T *) &bytes_[0]; }
153 template <typename T>
154 class percore_lazy : private percore<private_::buf<T>, false> {
155 typedef private_::buf<T> buf_t;
160 NDB_MEMSET(&flags_[0], 0, sizeof(flags_));
163 template <class... Args>
165 get(unsigned i, Args &&... args)
167 buf_t &b = this->elems()[i].elem;
168 if (unlikely(!flags_[i])) {
170 T *px = new (&b.bytes_[0]) T(std::forward<Args>(args)...);
176 template <class... Args>
180 return get(coreid::core_id(), std::forward<Args>(args)...);
186 buf_t &b = this->elems()[i].elem;
187 return flags_[i] ? b.cast() : nullptr;
191 view(unsigned i) const
193 const buf_t &b = this->elems()[i].elem;
194 return flags_[i] ? b.cast() : nullptr;
200 return view(coreid::core_id());
204 bool flags_[NMAXCORES];