2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include <type_traits>
28 #include <unordered_map>
31 #include <folly/Hash.h>
32 #include <folly/Indestructible.h>
33 #include <folly/Likely.h>
34 #include <folly/Memory.h>
35 #include <folly/Portability.h>
36 #include <folly/ThreadId.h>
37 #include <folly/portability/BitsFunctexcept.h>
38 #include <folly/portability/Memory.h>
42 // This file contains several classes that might be useful if you are
43 // trying to dynamically optimize cache locality: CacheLocality reads
44 // cache sharing information from sysfs to determine how CPUs should be
45 // grouped to minimize contention, Getcpu provides fast access to the
46 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
47 // optimally spread accesses among a predetermined number of stripes.
49 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
50 // substantially less than the cost of a cache miss. This means that we
51 // can effectively use it to reduce cache line ping-pong on striped data
52 // structures such as IndexedMemPool or statistics counters.
54 // Because CacheLocality looks at all of the cache levels, it can be
55 // used for different levels of optimization. AccessSpreader(2) does
56 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
57 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
58 // perfect L1 spreading in a system with hyperthreading enabled.
60 struct CacheLocality {
62 /// 1 more than the maximum value that can be returned from sched_getcpu
63 /// or getcpu. This is the number of hardware thread contexts provided
67 /// Holds the number of caches present at each cache level (0 is
68 /// the closest to the cpu). This is the number of AccessSpreader
69 /// stripes needed to avoid cross-cache communication at the specified
70 /// layer. numCachesByLevel.front() is the number of L1 caches and
71 /// numCachesByLevel.back() is the number of last-level caches.
72 std::vector<size_t> numCachesByLevel;
74 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
75 /// range 0..numCpus-1, where neighboring locality indices are more
76 /// likely to share caches then indices far away. All of the members
77 /// of a particular cache level be contiguous in their locality index.
78 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
79 /// then cpus with a locality index < 16 will share one last-level
80 /// cache and cpus with a locality index >= 16 will share the other.
81 std::vector<size_t> localityIndexByCpu;
83 /// Returns the best CacheLocality information available for the current
84 /// system, cached for fast access. This will be loaded from sysfs if
85 /// possible, otherwise it will be correct in the number of CPUs but
86 /// not in their sharing structure.
88 /// If you are into yo dawgs, this is a shared cache of the local
89 /// locality of the shared caches.
91 /// The template parameter here is used to allow injection of a
92 /// repeatable CacheLocality structure during testing. Rather than
93 /// inject the type of the CacheLocality provider into every data type
94 /// that transitively uses it, all components select between the default
95 /// sysfs implementation and a deterministic implementation by keying
96 /// off the type of the underlying atomic. See DeterministicScheduler.
97 template <template <typename> class Atom = std::atomic>
98 static const CacheLocality& system();
100 /// Reads CacheLocality information from a tree structured like
101 /// the sysfs filesystem. The provided function will be evaluated
102 /// for each sysfs file that needs to be queried. The function
103 /// should return a string containing the first line of the file
104 /// (not including the newline), or an empty string if the file does
105 /// not exist. The function will be called with paths of the form
106 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
107 /// Throws an exception if no caches can be parsed at all.
108 static CacheLocality readFromSysfsTree(
109 const std::function<std::string(std::string)>& mapping);
111 /// Reads CacheLocality information from the real sysfs filesystem.
112 /// Throws an exception if no cache information can be loaded.
113 static CacheLocality readFromSysfs();
115 /// Returns a usable (but probably not reflective of reality)
116 /// CacheLocality structure with the specified number of cpus and a
117 /// single cache level that associates one cpu per cache.
118 static CacheLocality uniform(size_t numCpus);
121 /// Memory locations on the same cache line are subject to false
122 /// sharing, which is very bad for performance. Microbenchmarks
123 /// indicate that pairs of cache lines also see interference under
124 /// heavy use of atomic operations (observed for atomic increment on
125 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
126 kFalseSharingRange = 128
130 kFalseSharingRange == 128,
131 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
134 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
136 /// An attribute that will cause a variable or field to be aligned so that
137 /// it doesn't have false sharing with anything at a smaller memory address.
138 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
140 /// Knows how to derive a function pointer to the VDSO implementation of
141 /// getcpu(2), if available
143 /// Function pointer to a function with the same signature as getcpu(2).
144 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
146 /// Returns a pointer to the VDSO implementation of getcpu(2), if
147 /// available, or nullptr otherwise. This function may be quite
148 /// expensive, be sure to cache the result.
149 static Func resolveVdsoFunc();
153 template <template <typename> class Atom>
154 struct SequentialThreadId {
156 /// Returns the thread id assigned to the current thread
157 static unsigned get() {
159 if (UNLIKELY(rv == 0)) {
160 rv = currentId = ++prevId;
166 static Atom<unsigned> prevId;
168 static FOLLY_TLS unsigned currentId;
171 template <template <typename> class Atom>
172 Atom<unsigned> SequentialThreadId<Atom>::prevId(0);
174 template <template <typename> class Atom>
175 FOLLY_TLS unsigned SequentialThreadId<Atom>::currentId(0);
177 // Suppress this instantiation in other translation units. It is
178 // instantiated in CacheLocality.cpp
179 extern template struct SequentialThreadId<std::atomic>;
182 struct HashingThreadId {
183 static unsigned get() {
184 return hash::twang_32from64(getCurrentThreadID());
188 /// A class that lazily binds a unique (for each implementation of Atom)
189 /// identifier to a thread. This is a fallback mechanism for the access
190 /// spreader if __vdso_getcpu can't be loaded
191 template <typename ThreadId>
192 struct FallbackGetcpu {
193 /// Fills the thread id into the cpu and node out params (if they
194 /// are non-null). This method is intended to act like getcpu when a
195 /// fast-enough form of getcpu isn't available or isn't desired
196 static int getcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
197 auto id = ThreadId::get();
209 typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
211 typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
214 /// AccessSpreader arranges access to a striped data structure in such a
215 /// way that concurrently executing threads are likely to be accessing
216 /// different stripes. It does NOT guarantee uncontended access.
217 /// Your underlying algorithm must be thread-safe without spreading, this
218 /// is merely an optimization. AccessSpreader::current(n) is typically
219 /// much faster than a cache miss (12 nanos on my dev box, tested fast
220 /// in both 2.6 and 3.2 kernels).
222 /// If available (and not using the deterministic testing implementation)
223 /// AccessSpreader uses the getcpu system call via VDSO and the
224 /// precise locality information retrieved from sysfs by CacheLocality.
225 /// This provides optimal anti-sharing at a fraction of the cost of a
228 /// When there are not as many stripes as processors, we try to optimally
229 /// place the cache sharing boundaries. This means that if you have 2
230 /// stripes and run on a dual-socket system, your 2 stripes will each get
231 /// all of the cores from a single socket. If you have 16 stripes on a
232 /// 16 core system plus hyperthreading (32 cpus), each core will get its
233 /// own stripe and there will be no cache sharing at all.
235 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
236 /// loaded, or for use during deterministic testing. Using sched_getcpu
237 /// or the getcpu syscall would negate the performance advantages of
238 /// access spreading, so we use a thread-local value and a shared atomic
239 /// counter to spread access out. On systems lacking both a fast getcpu()
240 /// and TLS, we hash the thread id to spread accesses.
242 /// AccessSpreader is templated on the template type that is used
243 /// to implement atomics, as a way to instantiate the underlying
244 /// heuristics differently for production use and deterministic unit
245 /// testing. See DeterministicScheduler for more. If you aren't using
246 /// DeterministicScheduler, you can just use the default template parameter
248 template <template <typename> class Atom = std::atomic>
249 struct AccessSpreader {
251 /// Returns the stripe associated with the current CPU. The returned
252 /// value will be < numStripes.
253 static size_t current(size_t numStripes) {
254 // widthAndCpuToStripe[0] will actually work okay (all zeros), but
255 // something's wrong with the caller
256 assert(numStripes > 0);
259 getcpuFunc(&cpu, nullptr, nullptr);
260 return widthAndCpuToStripe[std::min(size_t(kMaxCpus),
261 numStripes)][cpu % kMaxCpus];
265 /// If there are more cpus than this nothing will crash, but there
266 /// might be unnecessary sharing
267 enum { kMaxCpus = 128 };
269 typedef uint8_t CompactStripe;
271 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
272 "kMaxCpus should be a power of two so modulo is fast");
273 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
274 "stripeByCpu element type isn't wide enough");
276 /// Points to the getcpu-like function we are using to obtain the
277 /// current cpu. It should not be assumed that the returned cpu value
278 /// is in range. We use a static for this so that we can prearrange a
279 /// valid value in the pre-constructed state and avoid the need for a
280 /// conditional on every subsequent invocation (not normally a big win,
281 /// but 20% on some inner loops here).
282 static Getcpu::Func getcpuFunc;
284 /// For each level of splitting up to kMaxCpus, maps the cpu (mod
285 /// kMaxCpus) to the stripe. Rather than performing any inequalities
286 /// or modulo on the actual number of cpus, we just fill in the entire
288 static CompactStripe widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus];
290 static bool initialized;
292 /// Returns the best getcpu implementation for Atom
293 static Getcpu::Func pickGetcpuFunc() {
294 auto best = Getcpu::resolveVdsoFunc();
295 return best ? best : &FallbackGetcpuType::getcpu;
298 /// Always claims to be on CPU zero, node zero
299 static int degenerateGetcpu(unsigned* cpu, unsigned* node, void*) {
300 if (cpu != nullptr) {
303 if (node != nullptr) {
309 // The function to call for fast lookup of getcpu is a singleton, as
310 // is the precomputed table of locality information. AccessSpreader
311 // is used in very tight loops, however (we're trying to race an L1
312 // cache miss!), so the normal singleton mechanisms are noticeably
313 // expensive. Even a not-taken branch guarding access to getcpuFunc
314 // slows AccessSpreader::current from 12 nanos to 14. As a result, we
315 // populate the static members with simple (but valid) values that can
316 // be filled in by the linker, and then follow up with a normal static
317 // initializer call that puts in the proper version. This means that
318 // when there are initialization order issues we will just observe a
319 // zero stripe. Once a sanitizer gets smart enough to detect this as
320 // a race or undefined behavior, we can annotate it.
322 static bool initialize() {
323 getcpuFunc = pickGetcpuFunc();
325 auto& cacheLocality = CacheLocality::system<Atom>();
326 auto n = cacheLocality.numCpus;
327 for (size_t width = 0; width <= kMaxCpus; ++width) {
328 auto numStripes = std::max(size_t{1}, width);
329 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
330 auto index = cacheLocality.localityIndexByCpu[cpu];
332 // as index goes from 0..n, post-transform value goes from
334 widthAndCpuToStripe[width][cpu] =
335 CompactStripe((index * numStripes) / n);
336 assert(widthAndCpuToStripe[width][cpu] < numStripes);
338 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
339 widthAndCpuToStripe[width][cpu] = widthAndCpuToStripe[width][cpu - n];
346 template <template <typename> class Atom>
347 Getcpu::Func AccessSpreader<Atom>::getcpuFunc =
348 AccessSpreader<Atom>::degenerateGetcpu;
350 template <template <typename> class Atom>
351 typename AccessSpreader<Atom>::CompactStripe
352 AccessSpreader<Atom>::widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus] = {};
354 template <template <typename> class Atom>
355 bool AccessSpreader<Atom>::initialized = AccessSpreader<Atom>::initialize();
357 // Suppress this instantiation in other translation units. It is
358 // instantiated in CacheLocality.cpp
359 extern template struct AccessSpreader<std::atomic>;
362 * A simple freelist allocator. Allocates things of size sz, from
363 * slabs of size allocSize. Takes a lock on each
364 * allocation/deallocation.
366 class SimpleAllocator {
368 uint8_t* mem_{nullptr};
369 uint8_t* end_{nullptr};
370 void* freelist_{nullptr};
373 std::vector<void*> blocks_;
376 SimpleAllocator(size_t allocSize, size_t sz);
378 void* allocateHard();
380 // Inline fast-paths.
382 std::lock_guard<std::mutex> g(m_);
383 // Freelist allocation.
385 auto mem = freelist_;
386 freelist_ = *static_cast<void**>(freelist_);
390 // Bump-ptr allocation.
391 if (intptr_t(mem_) % 128 == 0) {
392 // Avoid allocating pointers that may look like malloc
394 mem_ += std::min(sz_, alignof(std::max_align_t));
396 if (mem_ && (mem_ + sz_ <= end_)) {
400 assert(intptr_t(mem) % 128 != 0);
404 return allocateHard();
406 void deallocate(void* mem) {
407 std::lock_guard<std::mutex> g(m_);
408 *static_cast<void**>(mem) = freelist_;
414 * An allocator that can be used with CacheLocality to allocate
417 * There is actually nothing special about the memory itself (it is
418 * not bound to numa nodes or anything), but the allocator guarantees
419 * that memory allocatd from the same stripe will only come from cache
420 * lines also allocated to the same stripe. This means multiple
421 * things using CacheLocality can allocate memory in smaller-than
422 * cacheline increments, and be assured that it won't cause more false
423 * sharing than it otherwise would.
425 * Note that allocation and deallocation takes a per-sizeclass lock.
427 template <size_t Stripes>
428 class CoreAllocator {
431 static constexpr size_t AllocSize{4096};
433 uint8_t sizeClass(size_t size) {
436 } else if (size <= 16) {
438 } else if (size <= 32) {
440 } else if (size <= 64) {
442 } else { // punt to malloc.
447 std::array<SimpleAllocator, 4> allocators_{
448 {{AllocSize, 8}, {AllocSize, 16}, {AllocSize, 32}, {AllocSize, 64}}};
451 void* allocate(size_t size) {
452 auto cl = sizeClass(size);
455 CacheLocality::kFalseSharingRange == 128,
456 "kFalseSharingRange changed");
457 // Align to a cacheline
458 size = size + (CacheLocality::kFalseSharingRange - 1);
459 size &= ~size_t(CacheLocality::kFalseSharingRange - 1);
461 detail::aligned_malloc(size, CacheLocality::kFalseSharingRange);
463 std::__throw_bad_alloc();
467 return allocators_[cl].allocate();
469 void deallocate(void* mem) {
474 // See if it came from this allocator or malloc.
475 if (intptr_t(mem) % 128 != 0) {
477 reinterpret_cast<void*>(intptr_t(mem) & ~intptr_t(AllocSize - 1));
478 auto allocator = *static_cast<SimpleAllocator**>(addr);
479 allocator->deallocate(mem);
481 detail::aligned_free(mem);
486 Allocator* get(size_t stripe) {
487 assert(stripe < Stripes);
488 return &allocators_[stripe];
492 Allocator allocators_[Stripes];
495 template <size_t Stripes>
496 typename CoreAllocator<Stripes>::Allocator* getCoreAllocator(size_t stripe) {
497 // We cannot make sure that the allocator will be destroyed after
498 // all the objects allocated with it, so we leak it.
499 static Indestructible<CoreAllocator<Stripes>> allocator;
500 return allocator->get(stripe);
503 template <typename T, size_t Stripes>
504 StlAllocator<typename CoreAllocator<Stripes>::Allocator, T> getCoreAllocatorStl(
506 auto alloc = getCoreAllocator<Stripes>(stripe);
507 return StlAllocator<typename CoreAllocator<Stripes>::Allocator, T>(alloc);