2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_CACHELOCALITY_H_
18 #define FOLLY_DETAIL_CACHELOCALITY_H_
27 #include <type_traits>
29 #include <folly/Hash.h>
30 #include <folly/Likely.h>
31 #include <folly/Portability.h>
33 namespace folly { namespace detail {
35 // This file contains several classes that might be useful if you are
36 // trying to dynamically optimize cache locality: CacheLocality reads
37 // cache sharing information from sysfs to determine how CPUs should be
38 // grouped to minimize contention, Getcpu provides fast access to the
39 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
40 // optimally spread accesses among a predetermined number of stripes.
42 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
43 // substantially less than the cost of a cache miss. This means that we
44 // can effectively use it to reduce cache line ping-pong on striped data
45 // structures such as IndexedMemPool or statistics counters.
47 // Because CacheLocality looks at all of the cache levels, it can be
48 // used for different levels of optimization. AccessSpreader(2) does
49 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
50 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
51 // perfect L1 spreading in a system with hyperthreading enabled.
53 struct CacheLocality {
55 /// 1 more than the maximum value that can be returned from sched_getcpu
56 /// or getcpu. This is the number of hardware thread contexts provided
60 /// Holds the number of caches present at each cache level (0 is
61 /// the closest to the cpu). This is the number of AccessSpreader
62 /// stripes needed to avoid cross-cache communication at the specified
63 /// layer. numCachesByLevel.front() is the number of L1 caches and
64 /// numCachesByLevel.back() is the number of last-level caches.
65 std::vector<size_t> numCachesByLevel;
67 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
68 /// range 0..numCpus-1, where neighboring locality indices are more
69 /// likely to share caches then indices far away. All of the members
70 /// of a particular cache level be contiguous in their locality index.
71 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
72 /// then cpus with a locality index < 16 will share one last-level
73 /// cache and cpus with a locality index >= 16 will share the other.
74 std::vector<size_t> localityIndexByCpu;
77 /// Returns the best CacheLocality information available for the current
78 /// system, cached for fast access. This will be loaded from sysfs if
79 /// possible, otherwise it will be correct in the number of CPUs but
80 /// not in their sharing structure.
82 /// If you are into yo dawgs, this is a shared cache of the local
83 /// locality of the shared caches.
85 /// The template parameter here is used to allow injection of a
86 /// repeatable CacheLocality structure during testing. Rather than
87 /// inject the type of the CacheLocality provider into every data type
88 /// that transitively uses it, all components select between the default
89 /// sysfs implementation and a deterministic implementation by keying
90 /// off the type of the underlying atomic. See DeterministicScheduler.
91 template <template<typename> class Atom = std::atomic>
92 static const CacheLocality& system();
95 /// Reads CacheLocality information from a tree structured like
96 /// the sysfs filesystem. The provided function will be evaluated
97 /// for each sysfs file that needs to be queried. The function
98 /// should return a string containing the first line of the file
99 /// (not including the newline), or an empty string if the file does
100 /// not exist. The function will be called with paths of the form
101 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
102 /// Throws an exception if no caches can be parsed at all.
103 static CacheLocality readFromSysfsTree(
104 const std::function<std::string(std::string)>& mapping);
106 /// Reads CacheLocality information from the real sysfs filesystem.
107 /// Throws an exception if no cache information can be loaded.
108 static CacheLocality readFromSysfs();
110 /// Returns a usable (but probably not reflective of reality)
111 /// CacheLocality structure with the specified number of cpus and a
112 /// single cache level that associates one cpu per cache.
113 static CacheLocality uniform(size_t numCpus);
116 /// Memory locations on the same cache line are subject to false
117 /// sharing, which is very bad for performance. Microbenchmarks
118 /// indicate that pairs of cache lines also see interference under
119 /// heavy use of atomic operations (observed for atomic increment on
120 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
121 kFalseSharingRange = 128
124 static_assert(kFalseSharingRange == 128,
125 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
128 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
130 /// An attribute that will cause a variable or field to be aligned so that
131 /// it doesn't have false sharing with anything at a smaller memory address.
132 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
134 /// Holds a function pointer to the VDSO implementation of getcpu(2),
137 /// Function pointer to a function with the same signature as getcpu(2).
138 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
140 /// Returns a pointer to the VDSO implementation of getcpu(2), if
141 /// available, or nullptr otherwise
142 static Func vdsoFunc();
146 template <template<typename> class Atom>
147 struct SequentialThreadId {
149 /// Returns the thread id assigned to the current thread
150 static size_t get() {
152 if (UNLIKELY(rv == 0)) {
153 rv = currentId = ++prevId;
159 static Atom<size_t> prevId;
161 static FOLLY_TLS size_t currentId;
165 struct HashingThreadId {
166 static size_t get() {
167 pthread_t pid = pthread_self();
169 memcpy(&id, &pid, std::min(sizeof(pid), sizeof(id)));
170 return hash::twang_32from64(id);
174 /// A class that lazily binds a unique (for each implementation of Atom)
175 /// identifier to a thread. This is a fallback mechanism for the access
176 /// spreader if __vdso_getcpu can't be loaded
177 template <typename ThreadId>
178 struct FallbackGetcpu {
179 /// Fills the thread id into the cpu and node out params (if they
180 /// are non-null). This method is intended to act like getcpu when a
181 /// fast-enough form of getcpu isn't available or isn't desired
182 static int getcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
183 auto id = ThreadId::get();
195 typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
197 typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
200 template <template<typename> class Atom, size_t kMaxCpus>
201 struct AccessSpreaderArray;
203 /// AccessSpreader arranges access to a striped data structure in such a
204 /// way that concurrently executing threads are likely to be accessing
205 /// different stripes. It does NOT guarantee uncontended access.
206 /// Your underlying algorithm must be thread-safe without spreading, this
207 /// is merely an optimization. AccessSpreader::current(n) is typically
208 /// much faster than a cache miss (22 nanos on my dev box, tested fast
209 /// in both 2.6 and 3.2 kernels).
211 /// You are free to create your own AccessSpreader-s or to cache the
212 /// results of AccessSpreader<>::shared(n), but you will probably want
213 /// to use one of the system-wide shared ones. Calling .current() on
214 /// a particular AccessSpreader instance only saves about 1 nanosecond
215 /// over calling AccessSpreader<>::shared(n).
217 /// If available (and not using the deterministic testing implementation)
218 /// AccessSpreader uses the getcpu system call via VDSO and the
219 /// precise locality information retrieved from sysfs by CacheLocality.
220 /// This provides optimal anti-sharing at a fraction of the cost of a
223 /// When there are not as many stripes as processors, we try to optimally
224 /// place the cache sharing boundaries. This means that if you have 2
225 /// stripes and run on a dual-socket system, your 2 stripes will each get
226 /// all of the cores from a single socket. If you have 16 stripes on a
227 /// 16 core system plus hyperthreading (32 cpus), each core will get its
228 /// own stripe and there will be no cache sharing at all.
230 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
231 /// loaded, or for use during deterministic testing. Using sched_getcpu or
232 /// the getcpu syscall would negate the performance advantages of access
233 /// spreading, so we use a thread-local value and a shared atomic counter
234 /// to spread access out.
236 /// AccessSpreader is templated on the template type that is used
237 /// to implement atomics, as a way to instantiate the underlying
238 /// heuristics differently for production use and deterministic unit
239 /// testing. See DeterministicScheduler for more. If you aren't using
240 /// DeterministicScheduler, you can just use the default template parameter
242 template <template<typename> class Atom = std::atomic>
243 struct AccessSpreader {
245 /// Returns a never-destructed shared AccessSpreader instance.
246 /// numStripes should be > 0.
247 static const AccessSpreader& shared(size_t numStripes) {
248 // sharedInstances[0] actually has numStripes == 1
249 assert(numStripes > 0);
251 // the last shared element handles all large sizes
252 return AccessSpreaderArray<Atom,kMaxCpus>::sharedInstance[
253 std::min(size_t(kMaxCpus), numStripes)];
256 /// Returns the stripe associated with the current CPU, assuming
257 /// that there are numStripes (non-zero) stripes. Equivalent to
258 /// AccessSpreader::shared(numStripes)->current.
259 static size_t current(size_t numStripes) {
260 return shared(numStripes).current();
263 /// stripeByCore uses 1 stripe per L1 cache, according to
264 /// CacheLocality::system<>(). Use stripeByCore.numStripes() to see
265 /// its width, or stripeByCore.current() to get the current stripe
266 static const AccessSpreader stripeByCore;
268 /// stripeByChip uses 1 stripe per last-level cache, which is the fewest
269 /// number of stripes for which off-chip communication can be avoided
270 /// (assuming all caches are on-chip). Use stripeByChip.numStripes()
271 /// to see its width, or stripeByChip.current() to get the current stripe
272 static const AccessSpreader stripeByChip;
275 /// Constructs an AccessSpreader that will return values from
276 /// 0 to numStripes-1 (inclusive), precomputing the mapping
277 /// from CPU to stripe. There is no use in having more than
278 /// CacheLocality::system<Atom>().localityIndexByCpu.size() stripes or
280 explicit AccessSpreader(size_t spreaderNumStripes,
281 const CacheLocality& cacheLocality =
282 CacheLocality::system<Atom>(),
283 Getcpu::Func getcpuFunc = nullptr)
284 : getcpuFunc_(getcpuFunc ? getcpuFunc : pickGetcpuFunc(spreaderNumStripes))
285 , numStripes_(spreaderNumStripes)
287 auto n = cacheLocality.numCpus;
288 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
289 auto index = cacheLocality.localityIndexByCpu[cpu];
291 // as index goes from 0..n, post-transform value goes from
293 stripeByCpu[cpu] = (index * numStripes_) / n;
294 assert(stripeByCpu[cpu] < numStripes_);
296 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
297 stripeByCpu[cpu] = stripeByCpu[cpu - n];
301 /// Returns 1 more than the maximum value that can be returned from
303 size_t numStripes() const {
307 /// Returns the stripe associated with the current CPU
308 size_t current() const {
310 getcpuFunc_(&cpu, nullptr, nullptr);
311 return stripeByCpu[cpu % kMaxCpus];
316 /// If there are more cpus than this nothing will crash, but there
317 /// might be unnecessary sharing
318 enum { kMaxCpus = 128 };
320 typedef uint8_t CompactStripe;
322 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
323 "kMaxCpus should be a power of two so modulo is fast");
324 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
325 "stripeByCpu element type isn't wide enough");
328 /// Points to the getcpu-like function we are using to obtain the
329 /// current cpu. It should not be assumed that the returned cpu value
330 /// is in range. We use a member for this instead of a static so that
331 /// this fetch preloads a prefix the stripeByCpu array
332 Getcpu::Func getcpuFunc_;
334 /// A precomputed map from cpu to stripe. Rather than add a layer of
335 /// indirection requiring a dynamic bounds check and another cache miss,
336 /// we always precompute the whole array
337 CompactStripe stripeByCpu[kMaxCpus];
341 /// Returns the best getcpu implementation for this type and width
342 /// of AccessSpreader
343 static Getcpu::Func pickGetcpuFunc(size_t numStripes);
347 Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t);
350 /// An array of kMaxCpus+1 AccessSpreader<Atom> instances constructed
351 /// with default params, with the zero-th element having 1 stripe
352 template <template<typename> class Atom, size_t kMaxStripe>
353 struct AccessSpreaderArray {
355 AccessSpreaderArray() {
356 for (size_t i = 0; i <= kMaxStripe; ++i) {
357 new (raw + i) AccessSpreader<Atom>(std::max(size_t(1), i));
361 ~AccessSpreaderArray() {
362 for (size_t i = 0; i <= kMaxStripe; ++i) {
363 auto p = static_cast<AccessSpreader<Atom>*>(static_cast<void*>(raw + i));
364 p->~AccessSpreader();
368 AccessSpreader<Atom> const& operator[] (size_t index) const {
369 return *static_cast<AccessSpreader<Atom> const*>(
370 static_cast<void const*>(raw + index));
375 // AccessSpreader uses sharedInstance
376 friend AccessSpreader<Atom>;
378 static AccessSpreaderArray<Atom,kMaxStripe> sharedInstance;
381 /// aligned_storage is uninitialized, we use placement new since there
382 /// is no AccessSpreader default constructor
383 typename std::aligned_storage<sizeof(AccessSpreader<Atom>),
384 CacheLocality::kFalseSharingRange>::type
390 #endif /* FOLLY_DETAIL_CacheLocality_H_ */