2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_CACHELOCALITY_H_
18 #define FOLLY_DETAIL_CACHELOCALITY_H_
26 #include <type_traits>
28 #include <folly/Likely.h>
29 #include <folly/Portability.h>
31 namespace folly { namespace detail {
33 // This file contains several classes that might be useful if you are
34 // trying to dynamically optimize cache locality: CacheLocality reads
35 // cache sharing information from sysfs to determine how CPUs should be
36 // grouped to minimize contention, Getcpu provides fast access to the
37 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
38 // optimally spread accesses among a predetermined number of stripes.
40 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
41 // substantially less than the cost of a cache miss. This means that we
42 // can effectively use it to reduce cache line ping-pong on striped data
43 // structures such as IndexedMemPool or statistics counters.
45 // Because CacheLocality looks at all of the cache levels, it can be
46 // used for different levels of optimization. AccessSpreader(2) does
47 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
48 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
49 // perfect L1 spreading in a system with hyperthreading enabled.
51 struct CacheLocality {
53 /// 1 more than the maximum value that can be returned from sched_getcpu
54 /// or getcpu. This is the number of hardware thread contexts provided
58 /// Holds the number of caches present at each cache level (0 is
59 /// the closest to the cpu). This is the number of AccessSpreader
60 /// stripes needed to avoid cross-cache communication at the specified
61 /// layer. numCachesByLevel.front() is the number of L1 caches and
62 /// numCachesByLevel.back() is the number of last-level caches.
63 std::vector<size_t> numCachesByLevel;
65 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
66 /// range 0..numCpus-1, where neighboring locality indices are more
67 /// likely to share caches then indices far away. All of the members
68 /// of a particular cache level be contiguous in their locality index.
69 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
70 /// then cpus with a locality index < 16 will share one last-level
71 /// cache and cpus with a locality index >= 16 will share the other.
72 std::vector<size_t> localityIndexByCpu;
75 /// Returns the best CacheLocality information available for the current
76 /// system, cached for fast access. This will be loaded from sysfs if
77 /// possible, otherwise it will be correct in the number of CPUs but
78 /// not in their sharing structure.
80 /// If you are into yo dawgs, this is a shared cache of the local
81 /// locality of the shared caches.
83 /// The template parameter here is used to allow injection of a
84 /// repeatable CacheLocality structure during testing. Rather than
85 /// inject the type of the CacheLocality provider into every data type
86 /// that transitively uses it, all components select between the default
87 /// sysfs implementation and a deterministic implementation by keying
88 /// off the type of the underlying atomic. See DeterministicScheduler.
89 template <template<typename> class Atom = std::atomic>
90 static const CacheLocality& system();
93 /// Reads CacheLocality information from a tree structured like
94 /// the sysfs filesystem. The provided function will be evaluated
95 /// for each sysfs file that needs to be queried. The function
96 /// should return a string containing the first line of the file
97 /// (not including the newline), or an empty string if the file does
98 /// not exist. The function will be called with paths of the form
99 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
100 /// Throws an exception if no caches can be parsed at all.
101 static CacheLocality readFromSysfsTree(
102 const std::function<std::string(std::string)>& mapping);
104 /// Reads CacheLocality information from the real sysfs filesystem.
105 /// Throws an exception if no cache information can be loaded.
106 static CacheLocality readFromSysfs();
108 /// Returns a usable (but probably not reflective of reality)
109 /// CacheLocality structure with the specified number of cpus and a
110 /// single cache level that associates one cpu per cache.
111 static CacheLocality uniform(size_t numCpus);
114 /// Memory locations on the same cache line are subject to false
115 /// sharing, which is very bad for performance. Microbenchmarks
116 /// indicate that pairs of cache lines also see interference under
117 /// heavy use of atomic operations (observed for atomic increment on
118 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
119 kFalseSharingRange = 128
122 static_assert(kFalseSharingRange == 128,
123 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
126 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
128 /// An attribute that will cause a variable or field to be aligned so that
129 /// it doesn't have false sharing with anything at a smaller memory address.
130 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING __attribute__((__aligned__(128)))
132 /// Holds a function pointer to the VDSO implementation of getcpu(2),
135 /// Function pointer to a function with the same signature as getcpu(2).
136 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
138 /// Returns a pointer to the VDSO implementation of getcpu(2), if
139 /// available, or nullptr otherwise
140 static Func vdsoFunc();
143 /// A class that lazily binds a unique (for each implementation of Atom)
144 /// identifier to a thread. This is a fallback mechanism for the access
145 /// spreader if we are in testing (using DeterministicAtomic) or if
146 /// __vdso_getcpu can't be dynamically loaded
147 template <template<typename> class Atom>
148 struct SequentialThreadId {
150 /// Returns the thread id assigned to the current thread
151 static size_t get() {
153 if (UNLIKELY(rv == 0)) {
154 rv = currentId = ++prevId;
159 /// Fills the thread id into the cpu and node out params (if they
160 /// are non-null). This method is intended to act like getcpu when a
161 /// fast-enough form of getcpu isn't available or isn't desired
162 static int getcpu(unsigned* cpu, unsigned* node, void* unused) {
174 static Atom<size_t> prevId;
176 static FOLLY_TLS size_t currentId;
179 template <template<typename> class Atom, size_t kMaxCpus>
180 struct AccessSpreaderArray;
182 /// AccessSpreader arranges access to a striped data structure in such a
183 /// way that concurrently executing threads are likely to be accessing
184 /// different stripes. It does NOT guarantee uncontended access.
185 /// Your underlying algorithm must be thread-safe without spreading, this
186 /// is merely an optimization. AccessSpreader::current(n) is typically
187 /// much faster than a cache miss (22 nanos on my dev box, tested fast
188 /// in both 2.6 and 3.2 kernels).
190 /// You are free to create your own AccessSpreader-s or to cache the
191 /// results of AccessSpreader<>::shared(n), but you will probably want
192 /// to use one of the system-wide shared ones. Calling .current() on
193 /// a particular AccessSpreader instance only saves about 1 nanosecond
194 /// over calling AccessSpreader<>::shared(n).
196 /// If available (and not using the deterministic testing implementation)
197 /// AccessSpreader uses the getcpu system call via VDSO and the
198 /// precise locality information retrieved from sysfs by CacheLocality.
199 /// This provides optimal anti-sharing at a fraction of the cost of a
202 /// When there are not as many stripes as processors, we try to optimally
203 /// place the cache sharing boundaries. This means that if you have 2
204 /// stripes and run on a dual-socket system, your 2 stripes will each get
205 /// all of the cores from a single socket. If you have 16 stripes on a
206 /// 16 core system plus hyperthreading (32 cpus), each core will get its
207 /// own stripe and there will be no cache sharing at all.
209 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
210 /// loaded, or for use during deterministic testing. Using sched_getcpu or
211 /// the getcpu syscall would negate the performance advantages of access
212 /// spreading, so we use a thread-local value and a shared atomic counter
213 /// to spread access out.
215 /// AccessSpreader is templated on the template type that is used
216 /// to implement atomics, as a way to instantiate the underlying
217 /// heuristics differently for production use and deterministic unit
218 /// testing. See DeterministicScheduler for more. If you aren't using
219 /// DeterministicScheduler, you can just use the default template parameter
221 template <template<typename> class Atom = std::atomic>
222 struct AccessSpreader {
224 /// Returns a never-destructed shared AccessSpreader instance.
225 /// numStripes should be > 0.
226 static const AccessSpreader& shared(size_t numStripes) {
227 // sharedInstances[0] actually has numStripes == 1
228 assert(numStripes > 0);
230 // the last shared element handles all large sizes
231 return AccessSpreaderArray<Atom,kMaxCpus>::sharedInstance[
232 std::min(size_t(kMaxCpus), numStripes)];
235 /// Returns the stripe associated with the current CPU, assuming
236 /// that there are numStripes (non-zero) stripes. Equivalent to
237 /// AccessSpreader::shared(numStripes)->current.
238 static size_t current(size_t numStripes) {
239 return shared(numStripes).current();
242 /// stripeByCore uses 1 stripe per L1 cache, according to
243 /// CacheLocality::system<>(). Use stripeByCore.numStripes() to see
244 /// its width, or stripeByCore.current() to get the current stripe
245 static const AccessSpreader stripeByCore;
247 /// stripeByChip uses 1 stripe per last-level cache, which is the fewest
248 /// number of stripes for which off-chip communication can be avoided
249 /// (assuming all caches are on-chip). Use stripeByChip.numStripes()
250 /// to see its width, or stripeByChip.current() to get the current stripe
251 static const AccessSpreader stripeByChip;
254 /// Constructs an AccessSpreader that will return values from
255 /// 0 to numStripes-1 (inclusive), precomputing the mapping
256 /// from CPU to stripe. There is no use in having more than
257 /// CacheLocality::system<Atom>().localityIndexByCpu.size() stripes or
259 explicit AccessSpreader(size_t spreaderNumStripes,
260 const CacheLocality& cacheLocality =
261 CacheLocality::system<Atom>(),
262 Getcpu::Func getcpuFunc = nullptr)
263 : getcpuFunc_(getcpuFunc ? getcpuFunc : pickGetcpuFunc(spreaderNumStripes))
264 , numStripes_(spreaderNumStripes)
266 auto n = cacheLocality.numCpus;
267 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
268 auto index = cacheLocality.localityIndexByCpu[cpu];
270 // as index goes from 0..n, post-transform value goes from
272 stripeByCpu[cpu] = (index * numStripes_) / n;
273 assert(stripeByCpu[cpu] < numStripes_);
275 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
276 stripeByCpu[cpu] = stripeByCpu[cpu - n];
280 /// Returns 1 more than the maximum value that can be returned from
282 size_t numStripes() const {
286 /// Returns the stripe associated with the current CPU
287 size_t current() const {
289 getcpuFunc_(&cpu, nullptr, nullptr);
290 return stripeByCpu[cpu % kMaxCpus];
295 /// If there are more cpus than this nothing will crash, but there
296 /// might be unnecessary sharing
297 enum { kMaxCpus = 128 };
299 typedef uint8_t CompactStripe;
301 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
302 "kMaxCpus should be a power of two so modulo is fast");
303 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
304 "stripeByCpu element type isn't wide enough");
307 /// Points to the getcpu-like function we are using to obtain the
308 /// current cpu. It should not be assumed that the returned cpu value
309 /// is in range. We use a member for this instead of a static so that
310 /// this fetch preloads a prefix the stripeByCpu array
311 Getcpu::Func getcpuFunc_;
313 /// A precomputed map from cpu to stripe. Rather than add a layer of
314 /// indirection requiring a dynamic bounds check and another cache miss,
315 /// we always precompute the whole array
316 CompactStripe stripeByCpu[kMaxCpus];
320 /// Returns the best getcpu implementation for this type and width
321 /// of AccessSpreader
322 static Getcpu::Func pickGetcpuFunc(size_t numStripes);
326 Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t);
329 /// An array of kMaxCpus+1 AccessSpreader<Atom> instances constructed
330 /// with default params, with the zero-th element having 1 stripe
331 template <template<typename> class Atom, size_t kMaxStripe>
332 struct AccessSpreaderArray {
334 AccessSpreaderArray() {
335 for (size_t i = 0; i <= kMaxStripe; ++i) {
336 new (raw + i) AccessSpreader<Atom>(std::max(size_t(1), i));
340 ~AccessSpreaderArray() {
341 for (size_t i = 0; i <= kMaxStripe; ++i) {
342 auto p = static_cast<AccessSpreader<Atom>*>(static_cast<void*>(raw + i));
343 p->~AccessSpreader();
347 AccessSpreader<Atom> const& operator[] (size_t index) const {
348 return *static_cast<AccessSpreader<Atom> const*>(
349 static_cast<void const*>(raw + index));
354 // AccessSpreader uses sharedInstance
355 friend AccessSpreader<Atom>;
357 static AccessSpreaderArray<Atom,kMaxStripe> sharedInstance;
360 /// aligned_storage is uninitialized, we use placement new since there
361 /// is no AccessSpreader default constructor
362 typename std::aligned_storage<sizeof(AccessSpreader<Atom>),
363 CacheLocality::kFalseSharingRange>::type
369 #endif /* FOLLY_DETAIL_CacheLocality_H_ */