1 #ifndef _NDB_ALLOCATOR_H_
2 #define _NDB_ALLOCATOR_H_
16 // our allocator doesn't let allocations exceed maxpercore over a single core
18 // Initialize can be called many times- but only the first call has effect.
20 // w/o calling Initialize(), behavior for this class is undefined
21 static void Initialize(size_t ncpus, size_t maxpercore);
23 static void DumpStats();
25 // returns an arena linked-list
27 AllocateArenas(size_t cpu, size_t sz);
29 // allocates nhugepgs * hugepagesize contiguous bytes from CPU's region and
30 // returns the raw, unmanaged pointer.
32 // Note that memory returned from here cannot be released back to the
33 // allocator, so this should only be used for data structures which live
34 // throughput the duration of the system (ie log buffers)
36 AllocateUnmanaged(size_t cpu, size_t nhugepgs);
39 ReleaseArenas(void **arenas);
41 static const size_t LgAllocAlignment = 4; // all allocations aligned to 2^4 = 16
42 static const size_t AllocAlignment = 1 << LgAllocAlignment;
43 static const size_t MAX_ARENAS = 32;
45 static inline std::pair<size_t, size_t>
48 const size_t allocsz = util::round_up<size_t, LgAllocAlignment>(sz);
49 const size_t arena = allocsz / AllocAlignment - 1;
50 return std::make_pair(allocsz, arena);
53 // slow, but only needs to be called on initialization
55 FaultRegion(size_t cpu);
57 // returns true if managed by this allocator, false otherwise
59 ManagesPointer(const void *p)
61 return p >= g_memstart && p < g_memend;
64 // assumes p is managed by this allocator- returns the CPU from which this pointer
67 PointerToCpu(const void *p)
69 ALWAYS_ASSERT(p >= g_memstart);
70 ALWAYS_ASSERT(p < g_memend);
72 (reinterpret_cast<const char *>(p) -
73 reinterpret_cast<const char *>(g_memstart)) / g_maxpercore;
74 ALWAYS_ASSERT(ret < g_ncpus);
80 uint32_t unit_; // 0-indexed
83 // returns nullptr if p is not managed, or has not been allocated yet.
84 // p does not have to be properly aligned
85 static const pgmetadata *
86 PointerToPgMetadata(const void *p);
92 static const size_t sz = GetPageSizeImpl();
99 static const size_t sz = GetHugepageSizeImpl();
104 static size_t GetPageSizeImpl();
105 static size_t GetHugepageSizeImpl();
106 static bool UseMAdvWillNeed();
110 : region_begin(nullptr),
112 region_faulted(false)
114 NDB_MEMSET(arenas, 0, sizeof(arenas));
116 regionctx(const regionctx &) = delete;
117 regionctx(regionctx &&) = delete;
118 regionctx &operator=(const regionctx &) = delete;
120 // set by Initialize()
127 std::mutex fault_lock; // XXX: hacky
128 void *arenas[MAX_ARENAS];
131 // assumes caller has the regionctx lock held, and
132 // will release the lock.
134 AllocateUnmanagedWithLock(regionctx &pc, size_t nhugepgs);
136 // [g_memstart, g_memstart + ncpus * maxpercore) is the region of memory mmap()-ed
137 static void *g_memstart;
138 static void *g_memend; // g_memstart + ncpus * maxpercore
139 static size_t g_ncpus;
140 static size_t g_maxpercore;
142 static percore<regionctx> g_regions CACHE_ALIGNED;
145 #endif /* _NDB_ALLOCATOR_H_ */