2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author Nathan Bronson (ngbronson@fb.com)
24 #include <type_traits>
25 #include <folly/Likely.h>
26 #include <folly/detail/CacheLocality.h>
27 #include <folly/detail/Futex.h>
28 #include <folly/portability/Asm.h>
29 #include <folly/portability/SysResource.h>
31 // SharedMutex is a reader-writer lock. It is small, very fast, scalable
32 // on multi-core, and suitable for use when readers or writers may block.
33 // Unlike most other reader-writer locks, its throughput with concurrent
34 // readers scales linearly; it is able to acquire and release the lock
35 // in shared mode without cache line ping-ponging. It is suitable for
36 // a wide range of lock hold times because it starts with spinning,
37 // proceeds to using sched_yield with a preemption heuristic, and then
38 // waits using futex and precise wakeups.
40 // SharedMutex provides all of the methods of folly::RWSpinLock,
41 // boost::shared_mutex, boost::upgrade_mutex, and C++14's
42 // std::shared_timed_mutex. All operations that can block are available
43 // in try, try-for, and try-until (system_clock or steady_clock) versions.
45 // SharedMutexReadPriority gives priority to readers,
46 // SharedMutexWritePriority gives priority to writers. SharedMutex is an
47 // alias for SharedMutexWritePriority, because writer starvation is more
48 // likely than reader starvation for the read-heavy workloads targetted
51 // In my tests SharedMutex is as good or better than the other
52 // reader-writer locks in use at Facebook for almost all use cases,
53 // sometimes by a wide margin. (If it is rare that there are actually
54 // concurrent readers then RWSpinLock can be a few nanoseconds faster.)
55 // I compared it to folly::RWSpinLock, folly::RWTicketSpinLock64,
56 // boost::shared_mutex, pthread_rwlock_t, and a RWLock that internally uses
57 // spinlocks to guard state and pthread_mutex_t+pthread_cond_t to block.
58 // (Thrift's ReadWriteMutex is based underneath on pthread_rwlock_t.)
59 // It is generally as good or better than the rest when evaluating size,
60 // speed, scalability, or latency outliers. In the corner cases where
61 // it is not the fastest (such as single-threaded use or heavy write
62 // contention) it is never very much worse than the best. See the bottom
63 // of folly/test/SharedMutexTest.cpp for lots of microbenchmark results.
65 // Comparison to folly::RWSpinLock:
67 // * SharedMutex is faster than RWSpinLock when there are actually
68 // concurrent read accesses (sometimes much faster), and ~5 nanoseconds
69 // slower when there is not actually any contention. SharedMutex is
70 // faster in every (benchmarked) scenario where the shared mode of
71 // the lock is actually useful.
73 // * Concurrent shared access to SharedMutex scales linearly, while total
74 // RWSpinLock throughput drops as more threads try to access the lock
75 // in shared mode. Under very heavy read contention SharedMutex can
76 // be two orders of magnitude faster than RWSpinLock (or any reader
77 // writer lock that doesn't use striping or deferral).
79 // * SharedMutex can safely protect blocking calls, because after an
80 // initial period of spinning it waits using futex().
82 // * RWSpinLock prioritizes readers, SharedMutex has both reader- and
83 // writer-priority variants, but defaults to write priority.
85 // * RWSpinLock's upgradeable mode blocks new readers, while SharedMutex's
86 // doesn't. Both semantics are reasonable. The boost documentation
87 // doesn't explicitly talk about this behavior (except by omitting
88 // any statement that those lock modes conflict), but the boost
89 // implementations do allow new readers while the upgradeable mode
90 // is held. See https://github.com/boostorg/thread/blob/master/
91 // include/boost/thread/pthread/shared_mutex.hpp
93 // * RWSpinLock::UpgradedHolder maps to SharedMutex::UpgradeHolder
94 // (UpgradeableHolder would be even more pedantically correct).
95 // SharedMutex's holders have fewer methods (no reset) and are less
96 // tolerant (promotion and downgrade crash if the donor doesn't own
97 // the lock, and you must use the default constructor rather than
98 // passing a nullptr to the pointer constructor).
100 // Both SharedMutex and RWSpinLock provide "exclusive", "upgrade",
101 // and "shared" modes. At all times num_threads_holding_exclusive +
102 // num_threads_holding_upgrade <= 1, and num_threads_holding_exclusive ==
103 // 0 || num_threads_holding_shared == 0. RWSpinLock has the additional
104 // constraint that num_threads_holding_shared cannot increase while
105 // num_threads_holding_upgrade is non-zero.
107 // Comparison to the internal RWLock:
109 // * SharedMutex doesn't allow a maximum reader count to be configured,
110 // so it can't be used as a semaphore in the same way as RWLock.
112 // * SharedMutex is 4 bytes, RWLock is 256.
114 // * SharedMutex is as fast or faster than RWLock in all of my
115 // microbenchmarks, and has positive rather than negative scalability.
117 // * RWLock and SharedMutex are both writer priority locks.
119 // * SharedMutex avoids latency outliers as well as RWLock.
121 // * SharedMutex uses different names (t != 0 below):
123 // RWLock::lock(0) => SharedMutex::lock()
125 // RWLock::lock(t) => SharedMutex::try_lock_for(milliseconds(t))
127 // RWLock::tryLock() => SharedMutex::try_lock()
129 // RWLock::unlock() => SharedMutex::unlock()
131 // RWLock::enter(0) => SharedMutex::lock_shared()
133 // RWLock::enter(t) =>
134 // SharedMutex::try_lock_shared_for(milliseconds(t))
136 // RWLock::tryEnter() => SharedMutex::try_lock_shared()
138 // RWLock::leave() => SharedMutex::unlock_shared()
140 // * RWLock allows the reader count to be adjusted by a value other
141 // than 1 during enter() or leave(). SharedMutex doesn't currently
142 // implement this feature.
144 // * RWLock's methods are marked const, SharedMutex's aren't.
146 // Reader-writer locks have the potential to allow concurrent access
147 // to shared read-mostly data, but in practice they often provide no
148 // improvement over a mutex. The problem is the cache coherence protocol
149 // of modern CPUs. Coherence is provided by making sure that when a cache
150 // line is written it is present in only one core's cache. Since a memory
151 // write is required to acquire a reader-writer lock in shared mode, the
152 // cache line holding the lock is invalidated in all of the other caches.
153 // This leads to cache misses when another thread wants to acquire or
154 // release the lock concurrently. When the RWLock is colocated with the
155 // data it protects (common), cache misses can also continue occur when
156 // a thread that already holds the lock tries to read the protected data.
158 // Ideally, a reader-writer lock would allow multiple cores to acquire
159 // and release the lock in shared mode without incurring any cache misses.
160 // This requires that each core records its shared access in a cache line
161 // that isn't read or written by other read-locking cores. (Writers will
162 // have to check all of the cache lines.) Typical server hardware when
163 // this comment was written has 16 L1 caches and cache lines of 64 bytes,
164 // so a lock striped over all L1 caches would occupy a prohibitive 1024
165 // bytes. Nothing says that we need a separate set of per-core memory
166 // locations for each lock, however. Each SharedMutex instance is only
167 // 4 bytes, but all locks together share a 2K area in which they make a
168 // core-local record of lock acquisitions.
170 // SharedMutex's strategy of using a shared set of core-local stripes has
171 // a potential downside, because it means that acquisition of any lock in
172 // write mode can conflict with acquisition of any lock in shared mode.
173 // If a lock instance doesn't actually experience concurrency then this
174 // downside will outweight the upside of improved scalability for readers.
175 // To avoid this problem we dynamically detect concurrent accesses to
176 // SharedMutex, and don't start using the deferred mode unless we actually
177 // observe concurrency. See kNumSharedToStartDeferring.
179 // It is explicitly allowed to call lock_unshared() from a different
180 // thread than lock_shared(), so long as they are properly paired.
181 // lock_unshared() needs to find the location at which lock_shared()
182 // recorded the lock, which might be in the lock itself or in any of
183 // the shared slots. If you can conveniently pass state from lock
184 // acquisition to release then the fastest mechanism is to std::move
185 // the SharedMutex::ReadHolder instance or an SharedMutex::Token (using
186 // lock_shared(Token&) and unlock_shared(Token&)). The guard or token
187 // will tell unlock_shared where in deferredReaders[] to look for the
188 // deferred lock. The Token-less version of unlock_shared() works in all
189 // cases, but is optimized for the common (no inter-thread handoff) case.
191 // In both read- and write-priority mode, a waiting lock() (exclusive mode)
192 // only blocks readers after it has waited for an active upgrade lock to be
193 // released; until the upgrade lock is released (or upgraded or downgraded)
194 // readers will still be able to enter. Preferences about lock acquisition
195 // are not guaranteed to be enforced perfectly (even if they were, there
196 // is theoretically the chance that a thread could be arbitrarily suspended
197 // between calling lock() and SharedMutex code actually getting executed).
199 // try_*_for methods always try at least once, even if the duration
200 // is zero or negative. The duration type must be compatible with
201 // std::chrono::steady_clock. try_*_until methods also always try at
202 // least once. std::chrono::system_clock and std::chrono::steady_clock
205 // If you have observed by profiling that your SharedMutex-s are getting
206 // cache misses on deferredReaders[] due to another SharedMutex user, then
207 // you can use the tag type to create your own instantiation of the type.
208 // The contention threshold (see kNumSharedToStartDeferring) should make
209 // this unnecessary in all but the most extreme cases. Make sure to check
210 // that the increased icache and dcache footprint of the tagged result is
213 // SharedMutex's use of thread local storage is as an optimization, so
214 // for the case where thread local storage is not supported, define it
216 #ifndef FOLLY_SHAREDMUTEX_TLS
218 #define FOLLY_SHAREDMUTEX_TLS FOLLY_TLS
220 #define FOLLY_SHAREDMUTEX_TLS
226 struct SharedMutexToken {
227 enum class Type : uint16_t {
237 template <bool ReaderPriority,
238 typename Tag_ = void,
239 template <typename> class Atom = std::atomic,
240 bool BlockImmediately = false>
241 class SharedMutexImpl {
243 static constexpr bool kReaderPriority = ReaderPriority;
246 typedef SharedMutexToken Token;
252 constexpr SharedMutexImpl() : state_(0) {}
254 SharedMutexImpl(const SharedMutexImpl&) = delete;
255 SharedMutexImpl(SharedMutexImpl&&) = delete;
256 SharedMutexImpl& operator = (const SharedMutexImpl&) = delete;
257 SharedMutexImpl& operator = (SharedMutexImpl&&) = delete;
259 // It is an error to destroy an SharedMutex that still has
260 // any outstanding locks. This is checked if NDEBUG isn't defined.
261 // SharedMutex's exclusive mode can be safely used to guard the lock's
262 // own destruction. If, for example, you acquire the lock in exclusive
263 // mode and then observe that the object containing the lock is no longer
264 // needed, you can unlock() and then immediately destroy the lock.
265 // See https://sourceware.org/bugzilla/show_bug.cgi?id=13690 for a
266 // description about why this property needs to be explicitly mentioned.
268 auto state = state_.load(std::memory_order_relaxed);
269 if (UNLIKELY((state & kHasS) != 0)) {
270 cleanupTokenlessSharedDeferred(state);
274 // if a futexWait fails to go to sleep because the value has been
275 // changed, we don't necessarily clean up the wait bits, so it is
276 // possible they will be set here in a correct system
277 assert((state & ~(kWaitingAny | kMayDefer)) == 0);
278 if ((state & kMayDefer) != 0) {
279 for (uint32_t slot = 0; slot < kMaxDeferredReaders; ++slot) {
280 auto slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
281 assert(!slotValueIsThis(slotValue));
289 (void)lockExclusiveImpl(kHasSolo, ctx);
294 return lockExclusiveImpl(kHasSolo, ctx);
297 template <class Rep, class Period>
298 bool try_lock_for(const std::chrono::duration<Rep, Period>& duration) {
299 WaitForDuration<Rep, Period> ctx(duration);
300 return lockExclusiveImpl(kHasSolo, ctx);
303 template <class Clock, class Duration>
305 const std::chrono::time_point<Clock, Duration>& absDeadline) {
306 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
307 return lockExclusiveImpl(kHasSolo, ctx);
311 // It is possible that we have a left-over kWaitingNotS if the last
312 // unlock_shared() that let our matching lock() complete finished
313 // releasing before lock()'s futexWait went to sleep. Clean it up now
314 auto state = (state_ &= ~(kWaitingNotS | kPrevDefer | kHasE));
315 assert((state & ~kWaitingAny) == 0);
316 wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS);
319 // Managing the token yourself makes unlock_shared a bit faster
323 (void)lockSharedImpl(nullptr, ctx);
326 void lock_shared(Token& token) {
328 (void)lockSharedImpl(&token, ctx);
331 bool try_lock_shared() {
333 return lockSharedImpl(nullptr, ctx);
336 bool try_lock_shared(Token& token) {
338 return lockSharedImpl(&token, ctx);
341 template <class Rep, class Period>
342 bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration) {
343 WaitForDuration<Rep, Period> ctx(duration);
344 return lockSharedImpl(nullptr, ctx);
347 template <class Rep, class Period>
348 bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration,
350 WaitForDuration<Rep, Period> ctx(duration);
351 return lockSharedImpl(&token, ctx);
354 template <class Clock, class Duration>
355 bool try_lock_shared_until(
356 const std::chrono::time_point<Clock, Duration>& absDeadline) {
357 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
358 return lockSharedImpl(nullptr, ctx);
361 template <class Clock, class Duration>
362 bool try_lock_shared_until(
363 const std::chrono::time_point<Clock, Duration>& absDeadline,
365 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
366 return lockSharedImpl(&token, ctx);
369 void unlock_shared() {
370 auto state = state_.load(std::memory_order_acquire);
372 // kPrevDefer can only be set if HasE or BegunE is set
373 assert((state & (kPrevDefer | kHasE | kBegunE)) != kPrevDefer);
375 // lock() strips kMayDefer immediately, but then copies it to
376 // kPrevDefer so we can tell if the pre-lock() lock_shared() might
378 if ((state & (kMayDefer | kPrevDefer)) == 0 ||
379 !tryUnlockTokenlessSharedDeferred()) {
380 // Matching lock_shared() couldn't have deferred, or the deferred
381 // lock has already been inlined by applyDeferredReaders()
382 unlockSharedInline();
386 void unlock_shared(Token& token) {
387 assert(token.type_ == Token::Type::INLINE_SHARED ||
388 token.type_ == Token::Type::DEFERRED_SHARED);
390 if (token.type_ != Token::Type::DEFERRED_SHARED ||
391 !tryUnlockSharedDeferred(token.slot_)) {
392 unlockSharedInline();
395 token.type_ = Token::Type::INVALID;
399 void unlock_and_lock_shared() {
400 // We can't use state_ -=, because we need to clear 2 bits (1 of which
401 // has an uncertain initial state) and set 1 other. We might as well
402 // clear the relevant wake bits at the same time. Note that since S
403 // doesn't block the beginning of a transition to E (writer priority
404 // can cut off new S, reader priority grabs BegunE and blocks deferred
405 // S) we need to wake E as well.
406 auto state = state_.load(std::memory_order_acquire);
408 assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE);
409 } while (!state_.compare_exchange_strong(
410 state, (state & ~(kWaitingAny | kPrevDefer | kHasE)) + kIncrHasS));
411 if ((state & (kWaitingE | kWaitingU | kWaitingS)) != 0) {
412 futexWakeAll(kWaitingE | kWaitingU | kWaitingS);
416 void unlock_and_lock_shared(Token& token) {
417 unlock_and_lock_shared();
418 token.type_ = Token::Type::INLINE_SHARED;
421 void lock_upgrade() {
423 (void)lockUpgradeImpl(ctx);
426 bool try_lock_upgrade() {
428 return lockUpgradeImpl(ctx);
431 template <class Rep, class Period>
432 bool try_lock_upgrade_for(
433 const std::chrono::duration<Rep, Period>& duration) {
434 WaitForDuration<Rep, Period> ctx(duration);
435 return lockUpgradeImpl(ctx);
438 template <class Clock, class Duration>
439 bool try_lock_upgrade_until(
440 const std::chrono::time_point<Clock, Duration>& absDeadline) {
441 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
442 return lockUpgradeImpl(ctx);
445 void unlock_upgrade() {
446 auto state = (state_ -= kHasU);
447 assert((state & (kWaitingNotS | kHasSolo)) == 0);
448 wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
451 void unlock_upgrade_and_lock() {
452 // no waiting necessary, so waitMask is empty
454 (void)lockExclusiveImpl(0, ctx);
457 void unlock_upgrade_and_lock_shared() {
458 auto state = (state_ -= kHasU - kIncrHasS);
459 assert((state & (kWaitingNotS | kHasSolo)) == 0);
460 wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
463 void unlock_upgrade_and_lock_shared(Token& token) {
464 unlock_upgrade_and_lock_shared();
465 token.type_ = Token::Type::INLINE_SHARED;
468 void unlock_and_lock_upgrade() {
469 // We can't use state_ -=, because we need to clear 2 bits (1 of
470 // which has an uncertain initial state) and set 1 other. We might
471 // as well clear the relevant wake bits at the same time.
472 auto state = state_.load(std::memory_order_acquire);
474 assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE);
476 (state & ~(kWaitingNotS | kWaitingS | kPrevDefer | kHasE)) + kHasU;
477 if (state_.compare_exchange_strong(state, after)) {
478 if ((state & kWaitingS) != 0) {
479 futexWakeAll(kWaitingS);
487 typedef typename folly::detail::Futex<Atom> Futex;
489 // Internally we use four kinds of wait contexts. These are structs
490 // that provide a doWait method that returns true if a futex wake
491 // was issued that intersects with the waitMask, false if there was a
492 // timeout and no more waiting should be performed. Spinning occurs
493 // before the wait context is invoked.
496 bool canBlock() { return true; }
497 bool canTimeOut() { return false; }
498 bool shouldTimeOut() { return false; }
500 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
501 futex.futexWait(expected, waitMask);
507 bool canBlock() { return false; }
508 bool canTimeOut() { return true; }
509 bool shouldTimeOut() { return true; }
511 bool doWait(Futex& /* futex */,
512 uint32_t /* expected */,
513 uint32_t /* waitMask */) {
518 template <class Rep, class Period>
519 struct WaitForDuration {
520 std::chrono::duration<Rep, Period> duration_;
521 bool deadlineComputed_;
522 std::chrono::steady_clock::time_point deadline_;
524 explicit WaitForDuration(const std::chrono::duration<Rep, Period>& duration)
525 : duration_(duration), deadlineComputed_(false) {}
527 std::chrono::steady_clock::time_point deadline() {
528 if (!deadlineComputed_) {
529 deadline_ = std::chrono::steady_clock::now() + duration_;
530 deadlineComputed_ = true;
535 bool canBlock() { return duration_.count() > 0; }
536 bool canTimeOut() { return true; }
538 bool shouldTimeOut() {
539 return std::chrono::steady_clock::now() > deadline();
542 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
543 auto result = futex.futexWaitUntil(expected, deadline(), waitMask);
544 return result != folly::detail::FutexResult::TIMEDOUT;
548 template <class Clock, class Duration>
549 struct WaitUntilDeadline {
550 std::chrono::time_point<Clock, Duration> absDeadline_;
552 bool canBlock() { return true; }
553 bool canTimeOut() { return true; }
554 bool shouldTimeOut() { return Clock::now() > absDeadline_; }
556 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
557 auto result = futex.futexWaitUntil(expected, absDeadline_, waitMask);
558 return result != folly::detail::FutexResult::TIMEDOUT;
565 // S count needs to be on the end, because we explicitly allow it to
566 // underflow. This can occur while we are in the middle of applying
567 // deferred locks (we remove them from deferredReaders[] before
568 // inlining them), or during token-less unlock_shared() if a racing
569 // lock_shared();unlock_shared() moves the deferredReaders slot while
570 // the first unlock_shared() is scanning. The former case is cleaned
571 // up before we finish applying the locks. The latter case can persist
572 // until destruction, when it is cleaned up.
573 static constexpr uint32_t kIncrHasS = 1 << 10;
574 static constexpr uint32_t kHasS = ~(kIncrHasS - 1);
576 // If false, then there are definitely no deferred read locks for this
577 // instance. Cleared after initialization and when exclusively locked.
578 static constexpr uint32_t kMayDefer = 1 << 9;
580 // lock() cleared kMayDefer as soon as it starts draining readers (so
581 // that it doesn't have to do a second CAS once drain completes), but
582 // unlock_shared() still needs to know whether to scan deferredReaders[]
583 // or not. We copy kMayDefer to kPrevDefer when setting kHasE or
584 // kBegunE, and clear it when clearing those bits.
585 static constexpr uint32_t kPrevDefer = 1 << 8;
587 // Exclusive-locked blocks all read locks and write locks. This bit
588 // may be set before all readers have finished, but in that case the
589 // thread that sets it won't return to the caller until all read locks
590 // have been released.
591 static constexpr uint32_t kHasE = 1 << 7;
593 // Exclusive-draining means that lock() is waiting for existing readers
594 // to leave, but that new readers may still acquire shared access.
595 // This is only used in reader priority mode. New readers during
596 // drain must be inline. The difference between this and kHasU is that
597 // kBegunE prevents kMayDefer from being set.
598 static constexpr uint32_t kBegunE = 1 << 6;
600 // At most one thread may have either exclusive or upgrade lock
601 // ownership. Unlike exclusive mode, ownership of the lock in upgrade
602 // mode doesn't preclude other threads holding the lock in shared mode.
603 // boost's concept for this doesn't explicitly say whether new shared
604 // locks can be acquired one lock_upgrade has succeeded, but doesn't
605 // list that as disallowed. RWSpinLock disallows new read locks after
606 // lock_upgrade has been acquired, but the boost implementation doesn't.
607 // We choose the latter.
608 static constexpr uint32_t kHasU = 1 << 5;
610 // There are three states that we consider to be "solo", in that they
611 // cannot coexist with other solo states. These are kHasE, kBegunE,
612 // and kHasU. Note that S doesn't conflict with any of these, because
613 // setting the kHasE is only one of the two steps needed to actually
614 // acquire the lock in exclusive mode (the other is draining the existing
616 static constexpr uint32_t kHasSolo = kHasE | kBegunE | kHasU;
618 // Once a thread sets kHasE it needs to wait for the current readers
619 // to exit the lock. We give this a separate wait identity from the
620 // waiting to set kHasE so that we can perform partial wakeups (wake
621 // one instead of wake all).
622 static constexpr uint32_t kWaitingNotS = 1 << 4;
624 // When waking writers we can either wake them all, in which case we
625 // can clear kWaitingE, or we can call futexWake(1). futexWake tells
626 // us if anybody woke up, but even if we detect that nobody woke up we
627 // can't clear the bit after the fact without issuing another wakeup.
628 // To avoid thundering herds when there are lots of pending lock()
629 // without needing to call futexWake twice when there is only one
630 // waiter, kWaitingE actually encodes if we have observed multiple
631 // concurrent waiters. Tricky: ABA issues on futexWait mean that when
632 // we see kWaitingESingle we can't assume that there is only one.
633 static constexpr uint32_t kWaitingESingle = 1 << 2;
634 static constexpr uint32_t kWaitingEMultiple = 1 << 3;
635 static constexpr uint32_t kWaitingE = kWaitingESingle | kWaitingEMultiple;
637 // kWaitingU is essentially a 1 bit saturating counter. It always
638 // requires a wakeAll.
639 static constexpr uint32_t kWaitingU = 1 << 1;
641 // All blocked lock_shared() should be awoken, so it is correct (not
642 // suboptimal) to wakeAll if there are any shared readers.
643 static constexpr uint32_t kWaitingS = 1 << 0;
645 // kWaitingAny is a mask of all of the bits that record the state of
646 // threads, rather than the state of the lock. It is convenient to be
647 // able to mask them off during asserts.
648 static constexpr uint32_t kWaitingAny =
649 kWaitingNotS | kWaitingE | kWaitingU | kWaitingS;
651 // The reader count at which a reader will attempt to use the lock
652 // in deferred mode. If this value is 2, then the second concurrent
653 // reader will set kMayDefer and use deferredReaders[]. kMayDefer is
654 // cleared during exclusive access, so this threshold must be reached
655 // each time a lock is held in exclusive mode.
656 static constexpr uint32_t kNumSharedToStartDeferring = 2;
658 // The typical number of spins that a thread will wait for a state
659 // transition. There is no bound on the number of threads that can wait
660 // for a writer, so we are pretty conservative here to limit the chance
661 // that we are starving the writer of CPU. Each spin is 6 or 7 nanos,
662 // almost all of which is in the pause instruction.
663 static constexpr uint32_t kMaxSpinCount = !BlockImmediately ? 1000 : 2;
665 // The maximum number of soft yields before falling back to futex.
666 // If the preemption heuristic is activated we will fall back before
667 // this. A soft yield takes ~900 nanos (two sched_yield plus a call
668 // to getrusage, with checks of the goal at each step). Soft yields
669 // aren't compatible with deterministic execution under test (unlike
670 // futexWaitUntil, which has a capricious but deterministic back end).
671 static constexpr uint32_t kMaxSoftYieldCount = !BlockImmediately ? 1000 : 0;
673 // If AccessSpreader assigns indexes from 0..k*n-1 on a system where some
674 // level of the memory hierarchy is symmetrically divided into k pieces
675 // (NUMA nodes, last-level caches, L1 caches, ...), then slot indexes
676 // that are the same after integer division by k share that resource.
677 // Our strategy for deferred readers is to probe up to numSlots/4 slots,
678 // using the full granularity of AccessSpreader for the start slot
679 // and then search outward. We can use AccessSpreader::current(n)
680 // without managing our own spreader if kMaxDeferredReaders <=
681 // AccessSpreader::kMaxCpus, which is currently 128.
683 // Our 2-socket E5-2660 machines have 8 L1 caches on each chip,
684 // with 64 byte cache lines. That means we need 64*16 bytes of
685 // deferredReaders[] to give each L1 its own playground. On x86_64
686 // each DeferredReaderSlot is 8 bytes, so we need kMaxDeferredReaders
687 // * kDeferredSeparationFactor >= 64 * 16 / 8 == 128. If
688 // kDeferredSearchDistance * kDeferredSeparationFactor <=
689 // 64 / 8 then we will search only within a single cache line, which
690 // guarantees we won't have inter-L1 contention. We give ourselves
691 // a factor of 2 on the core count, which should hold us for a couple
692 // processor generations. deferredReaders[] is 2048 bytes currently.
694 static constexpr uint32_t kMaxDeferredReaders = 64;
695 static constexpr uint32_t kDeferredSearchDistance = 2;
696 static constexpr uint32_t kDeferredSeparationFactor = 4;
700 static_assert(!(kMaxDeferredReaders & (kMaxDeferredReaders - 1)),
701 "kMaxDeferredReaders must be a power of 2");
702 static_assert(!(kDeferredSearchDistance & (kDeferredSearchDistance - 1)),
703 "kDeferredSearchDistance must be a power of 2");
705 // The number of deferred locks that can be simultaneously acquired
706 // by a thread via the token-less methods without performing any heap
707 // allocations. Each of these costs 3 pointers (24 bytes, probably)
708 // per thread. There's not much point in making this larger than
709 // kDeferredSearchDistance.
710 static constexpr uint32_t kTokenStackTLSCapacity = 2;
712 // We need to make sure that if there is a lock_shared()
713 // and lock_shared(token) followed by unlock_shared() and
714 // unlock_shared(token), the token-less unlock doesn't null
715 // out deferredReaders[token.slot_]. If we allowed that, then
716 // unlock_shared(token) wouldn't be able to assume that its lock
717 // had been inlined by applyDeferredReaders when it finds that
718 // deferredReaders[token.slot_] no longer points to this. We accomplish
719 // this by stealing bit 0 from the pointer to record that the slot's
720 // element has no token, hence our use of uintptr_t in deferredReaders[].
721 static constexpr uintptr_t kTokenless = 0x1;
723 // This is the starting location for Token-less unlock_shared().
724 static FOLLY_SHAREDMUTEX_TLS uint32_t tls_lastTokenlessSlot;
726 // Last deferred reader slot used.
727 static FOLLY_SHAREDMUTEX_TLS uint32_t tls_lastDeferredReaderSlot;
730 // Only indexes divisible by kDeferredSeparationFactor are used.
731 // If any of those elements points to a SharedMutexImpl, then it
732 // should be considered that there is a shared lock on that instance.
735 typedef Atom<uintptr_t> DeferredReaderSlot;
738 FOLLY_ALIGN_TO_AVOID_FALSE_SHARING static DeferredReaderSlot deferredReaders
739 [kMaxDeferredReaders *
740 kDeferredSeparationFactor];
742 // Performs an exclusive lock, waiting for state_ & waitMask to be
744 template <class WaitContext>
745 bool lockExclusiveImpl(uint32_t preconditionGoalMask, WaitContext& ctx) {
746 uint32_t state = state_.load(std::memory_order_acquire);
748 (state & (preconditionGoalMask | kMayDefer | kHasS)) == 0 &&
749 state_.compare_exchange_strong(state, (state | kHasE) & ~kHasU))) {
752 return lockExclusiveImpl(state, preconditionGoalMask, ctx);
756 template <class WaitContext>
757 bool lockExclusiveImpl(uint32_t& state,
758 uint32_t preconditionGoalMask,
761 if (UNLIKELY((state & preconditionGoalMask) != 0) &&
762 !waitForZeroBits(state, preconditionGoalMask, kWaitingE, ctx) &&
767 uint32_t after = (state & kMayDefer) == 0 ? 0 : kPrevDefer;
768 if (!ReaderPriority || (state & (kMayDefer | kHasS)) == 0) {
769 // Block readers immediately, either because we are in write
770 // priority mode or because we can acquire the lock in one
771 // step. Note that if state has kHasU, then we are doing an
772 // unlock_upgrade_and_lock() and we should clear it (reader
773 // priority branch also does this).
774 after |= (state | kHasE) & ~(kHasU | kMayDefer);
776 after |= (state | kBegunE) & ~(kHasU | kMayDefer);
778 if (state_.compare_exchange_strong(state, after)) {
782 // If we set kHasE (writer priority) then no new readers can
783 // arrive. If we set kBegunE then they can still enter, but
784 // they must be inline. Either way we need to either spin on
785 // deferredReaders[] slots, or inline them so that we can wait on
786 // kHasS to zero itself. deferredReaders[] is pointers, which on
787 // x86_64 are bigger than futex() can handle, so we inline the
788 // deferred locks instead of trying to futexWait on each slot.
789 // Readers are responsible for rechecking state_ after recording
790 // a deferred read to avoid atomicity problems between the state_
791 // CAS and applyDeferredReader's reads of deferredReaders[].
792 if (UNLIKELY((before & kMayDefer) != 0)) {
793 applyDeferredReaders(state, ctx);
796 assert((state & (kHasE | kBegunE)) != 0 && (state & kHasU) == 0);
797 if (UNLIKELY((state & kHasS) != 0) &&
798 !waitForZeroBits(state, kHasS, kWaitingNotS, ctx) &&
800 // Ugh. We blocked new readers and other writers for a while,
801 // but were unable to complete. Move on. On the plus side
802 // we can clear kWaitingNotS because nobody else can piggyback
804 state = (state_ &= ~(kPrevDefer | kHasE | kBegunE | kWaitingNotS));
805 wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS);
809 if (ReaderPriority && (state & kHasE) == 0) {
810 assert((state & kBegunE) != 0);
811 if (!state_.compare_exchange_strong(state,
812 (state & ~kBegunE) | kHasE)) {
823 template <class WaitContext>
824 bool waitForZeroBits(uint32_t& state,
828 uint32_t spinCount = 0;
830 state = state_.load(std::memory_order_acquire);
831 if ((state & goal) == 0) {
834 asm_volatile_pause();
836 if (UNLIKELY(spinCount >= kMaxSpinCount)) {
837 return ctx.canBlock() &&
838 yieldWaitForZeroBits(state, goal, waitMask, ctx);
843 template <class WaitContext>
844 bool yieldWaitForZeroBits(uint32_t& state,
852 for (uint32_t yieldCount = 0; yieldCount < kMaxSoftYieldCount;
854 for (int softState = 0; softState < 3; ++softState) {
856 std::this_thread::yield();
859 getrusage(RUSAGE_THREAD, &usage);
862 if (((state = state_.load(std::memory_order_acquire)) & goal) == 0) {
865 if (ctx.shouldTimeOut()) {
870 if (before >= 0 && usage.ru_nivcsw >= before + 2) {
871 // One involuntary csw might just be occasional background work,
872 // but if we get two in a row then we guess that there is someone
873 // else who can profitably use this CPU. Fall back to futex
876 before = usage.ru_nivcsw;
879 return futexWaitForZeroBits(state, goal, waitMask, ctx);
882 template <class WaitContext>
883 bool futexWaitForZeroBits(uint32_t& state,
887 assert(waitMask == kWaitingNotS || waitMask == kWaitingE ||
888 waitMask == kWaitingU || waitMask == kWaitingS);
891 state = state_.load(std::memory_order_acquire);
892 if ((state & goal) == 0) {
897 if (waitMask == kWaitingE) {
898 if ((state & kWaitingESingle) != 0) {
899 after |= kWaitingEMultiple;
901 after |= kWaitingESingle;
907 // CAS is better than atomic |= here, because it lets us avoid
908 // setting the wait flag when the goal is concurrently achieved
909 if (after != state && !state_.compare_exchange_strong(state, after)) {
913 if (!ctx.doWait(state_, after, waitMask)) {
920 // Wakes up waiters registered in state_ as appropriate, clearing the
921 // awaiting bits for anybody that was awoken. Tries to perform direct
922 // single wakeup of an exclusive waiter if appropriate
923 void wakeRegisteredWaiters(uint32_t& state, uint32_t wakeMask) {
924 if (UNLIKELY((state & wakeMask) != 0)) {
925 wakeRegisteredWaitersImpl(state, wakeMask);
929 void wakeRegisteredWaitersImpl(uint32_t& state, uint32_t wakeMask) {
930 // If there are multiple lock() pending only one of them will actually
931 // get to wake up, so issuing futexWakeAll will make a thundering herd.
932 // There's nothing stopping us from issuing futexWake(1) instead,
933 // so long as the wait bits are still an accurate reflection of
934 // the waiters. If we notice (via futexWake's return value) that
935 // nobody woke up then we can try again with the normal wake-all path.
936 // Note that we can't just clear the bits at that point; we need to
937 // clear the bits and then issue another wakeup.
939 // It is possible that we wake an E waiter but an outside S grabs the
940 // lock instead, at which point we should wake pending U and S waiters.
941 // Rather than tracking state to make the failing E regenerate the
942 // wakeup, we just disable the optimization in the case that there
943 // are waiting U or S that we are eligible to wake.
944 if ((wakeMask & kWaitingE) == kWaitingE &&
945 (state & wakeMask) == kWaitingE &&
946 state_.futexWake(1, kWaitingE) > 0) {
947 // somebody woke up, so leave state_ as is and clear it later
951 if ((state & wakeMask) != 0) {
952 auto prev = state_.fetch_and(~wakeMask);
953 if ((prev & wakeMask) != 0) {
954 futexWakeAll(wakeMask);
956 state = prev & ~wakeMask;
960 void futexWakeAll(uint32_t wakeMask) {
961 state_.futexWake(std::numeric_limits<int>::max(), wakeMask);
964 DeferredReaderSlot* deferredReader(uint32_t slot) {
965 return &deferredReaders[slot * kDeferredSeparationFactor];
968 uintptr_t tokenfulSlotValue() { return reinterpret_cast<uintptr_t>(this); }
970 uintptr_t tokenlessSlotValue() { return tokenfulSlotValue() | kTokenless; }
972 bool slotValueIsThis(uintptr_t slotValue) {
973 return (slotValue & ~kTokenless) == tokenfulSlotValue();
976 // Clears any deferredReaders[] that point to this, adjusting the inline
977 // shared lock count to compensate. Does some spinning and yielding
978 // to avoid the work. Always finishes the application, even if ctx
980 template <class WaitContext>
981 void applyDeferredReaders(uint32_t& state, WaitContext& ctx) {
984 uint32_t spinCount = 0;
986 while (!slotValueIsThis(
987 deferredReader(slot)->load(std::memory_order_acquire))) {
988 if (++slot == kMaxDeferredReaders) {
993 if (UNLIKELY(++spinCount >= kMaxSpinCount)) {
994 applyDeferredReaders(state, ctx, slot);
1000 template <class WaitContext>
1001 void applyDeferredReaders(uint32_t& state, WaitContext& ctx, uint32_t slot) {
1003 #ifdef RUSAGE_THREAD
1004 struct rusage usage;
1007 for (uint32_t yieldCount = 0; yieldCount < kMaxSoftYieldCount;
1009 for (int softState = 0; softState < 3; ++softState) {
1010 if (softState < 2) {
1011 std::this_thread::yield();
1013 #ifdef RUSAGE_THREAD
1014 getrusage(RUSAGE_THREAD, &usage);
1017 while (!slotValueIsThis(
1018 deferredReader(slot)->load(std::memory_order_acquire))) {
1019 if (++slot == kMaxDeferredReaders) {
1023 if (ctx.shouldTimeOut()) {
1024 // finish applying immediately on timeout
1028 #ifdef RUSAGE_THREAD
1029 if (before >= 0 && usage.ru_nivcsw >= before + 2) {
1030 // heuristic says run queue is not empty
1033 before = usage.ru_nivcsw;
1037 uint32_t movedSlotCount = 0;
1038 for (; slot < kMaxDeferredReaders; ++slot) {
1039 auto slotPtr = deferredReader(slot);
1040 auto slotValue = slotPtr->load(std::memory_order_acquire);
1041 if (slotValueIsThis(slotValue) &&
1042 slotPtr->compare_exchange_strong(slotValue, 0)) {
1047 if (movedSlotCount > 0) {
1048 state = (state_ += movedSlotCount * kIncrHasS);
1050 assert((state & (kHasE | kBegunE)) != 0);
1052 // if state + kIncrHasS overflows (off the end of state) then either
1053 // we have 2^(32-9) readers (almost certainly an application bug)
1054 // or we had an underflow (also a bug)
1055 assert(state < state + kIncrHasS);
1058 // It is straightfoward to make a token-less lock_shared() and
1059 // unlock_shared() either by making the token-less version always use
1060 // INLINE_SHARED mode or by removing the token version. Supporting
1061 // deferred operation for both types is trickier than it appears, because
1062 // the purpose of the token it so that unlock_shared doesn't have to
1063 // look in other slots for its deferred lock. Token-less unlock_shared
1064 // might place a deferred lock in one place and then release a different
1065 // slot that was originally used by the token-ful version. If this was
1066 // important we could solve the problem by differentiating the deferred
1067 // locks so that cross-variety release wouldn't occur. The best way
1068 // is probably to steal a bit from the pointer, making deferredLocks[]
1069 // an array of Atom<uintptr_t>.
1071 template <class WaitContext>
1072 bool lockSharedImpl(Token* token, WaitContext& ctx) {
1073 uint32_t state = state_.load(std::memory_order_relaxed);
1074 if ((state & (kHasS | kMayDefer | kHasE)) == 0 &&
1075 state_.compare_exchange_strong(state, state + kIncrHasS)) {
1076 if (token != nullptr) {
1077 token->type_ = Token::Type::INLINE_SHARED;
1081 return lockSharedImpl(state, token, ctx);
1084 template <class WaitContext>
1085 bool lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx);
1087 // Updates the state in/out argument as if the locks were made inline,
1088 // but does not update state_
1089 void cleanupTokenlessSharedDeferred(uint32_t& state) {
1090 for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
1091 auto slotPtr = deferredReader(i);
1092 auto slotValue = slotPtr->load(std::memory_order_relaxed);
1093 if (slotValue == tokenlessSlotValue()) {
1094 slotPtr->store(0, std::memory_order_relaxed);
1096 if ((state & kHasS) == 0) {
1103 bool tryUnlockTokenlessSharedDeferred();
1105 bool tryUnlockSharedDeferred(uint32_t slot) {
1106 assert(slot < kMaxDeferredReaders);
1107 auto slotValue = tokenfulSlotValue();
1108 return deferredReader(slot)->compare_exchange_strong(slotValue, 0);
1111 uint32_t unlockSharedInline() {
1112 uint32_t state = (state_ -= kIncrHasS);
1113 assert((state & (kHasE | kBegunE | kMayDefer)) != 0 ||
1114 state < state + kIncrHasS);
1115 if ((state & kHasS) == 0) {
1116 // Only the second half of lock() can be blocked by a non-zero
1117 // reader count, so that's the only thing we need to wake
1118 wakeRegisteredWaiters(state, kWaitingNotS);
1123 template <class WaitContext>
1124 bool lockUpgradeImpl(WaitContext& ctx) {
1127 if (!waitForZeroBits(state, kHasSolo, kWaitingU, ctx)) {
1130 } while (!state_.compare_exchange_strong(state, state | kHasU));
1137 ReadHolder() : lock_(nullptr) {}
1139 explicit ReadHolder(const SharedMutexImpl* lock) : ReadHolder(*lock) {}
1141 explicit ReadHolder(const SharedMutexImpl& lock)
1142 : lock_(const_cast<SharedMutexImpl*>(&lock)) {
1143 lock_->lock_shared(token_);
1146 ReadHolder(ReadHolder&& rhs) noexcept : lock_(rhs.lock_),
1147 token_(rhs.token_) {
1148 rhs.lock_ = nullptr;
1151 // Downgrade from upgrade mode
1152 explicit ReadHolder(UpgradeHolder&& upgraded) : lock_(upgraded.lock_) {
1153 assert(upgraded.lock_ != nullptr);
1154 upgraded.lock_ = nullptr;
1155 lock_->unlock_upgrade_and_lock_shared(token_);
1158 // Downgrade from exclusive mode
1159 explicit ReadHolder(WriteHolder&& writer) : lock_(writer.lock_) {
1160 assert(writer.lock_ != nullptr);
1161 writer.lock_ = nullptr;
1162 lock_->unlock_and_lock_shared(token_);
1165 ReadHolder& operator=(ReadHolder&& rhs) noexcept {
1166 std::swap(lock_, rhs.lock_);
1167 std::swap(token_, rhs.token_);
1171 ReadHolder(const ReadHolder& rhs) = delete;
1172 ReadHolder& operator=(const ReadHolder& rhs) = delete;
1180 lock_->unlock_shared(token_);
1186 friend class UpgradeHolder;
1187 friend class WriteHolder;
1188 SharedMutexImpl* lock_;
1189 SharedMutexToken token_;
1192 class UpgradeHolder {
1194 UpgradeHolder() : lock_(nullptr) {}
1196 explicit UpgradeHolder(SharedMutexImpl* lock) : UpgradeHolder(*lock) {}
1198 explicit UpgradeHolder(SharedMutexImpl& lock) : lock_(&lock) {
1199 lock_->lock_upgrade();
1202 // Downgrade from exclusive mode
1203 explicit UpgradeHolder(WriteHolder&& writer) : lock_(writer.lock_) {
1204 assert(writer.lock_ != nullptr);
1205 writer.lock_ = nullptr;
1206 lock_->unlock_and_lock_upgrade();
1209 UpgradeHolder(UpgradeHolder&& rhs) noexcept : lock_(rhs.lock_) {
1210 rhs.lock_ = nullptr;
1213 UpgradeHolder& operator=(UpgradeHolder&& rhs) noexcept {
1214 std::swap(lock_, rhs.lock_);
1218 UpgradeHolder(const UpgradeHolder& rhs) = delete;
1219 UpgradeHolder& operator=(const UpgradeHolder& rhs) = delete;
1227 lock_->unlock_upgrade();
1233 friend class WriteHolder;
1234 friend class ReadHolder;
1235 SharedMutexImpl* lock_;
1240 WriteHolder() : lock_(nullptr) {}
1242 explicit WriteHolder(SharedMutexImpl* lock) : WriteHolder(*lock) {}
1244 explicit WriteHolder(SharedMutexImpl& lock) : lock_(&lock) {
1248 // Promotion from upgrade mode
1249 explicit WriteHolder(UpgradeHolder&& upgrade) : lock_(upgrade.lock_) {
1250 assert(upgrade.lock_ != nullptr);
1251 upgrade.lock_ = nullptr;
1252 lock_->unlock_upgrade_and_lock();
1257 // It is intended that WriteHolder(ReadHolder&& rhs) do not exist.
1259 // Shared locks (read) can not safely upgrade to unique locks (write).
1260 // That upgrade path is a well-known recipe for deadlock, so we explicitly
1263 // If you need to do a conditional mutation, you have a few options:
1264 // 1. Check the condition under a shared lock and release it.
1265 // Then maybe check the condition again under a unique lock and maybe do
1267 // 2. Check the condition once under an upgradeable lock.
1268 // Then maybe upgrade the lock to a unique lock and do the mutation.
1269 // 3. Check the condition and maybe perform the mutation under a unique
1272 // Relevant upgradeable lock notes:
1273 // * At most one upgradeable lock can be held at a time for a given shared
1274 // mutex, just like a unique lock.
1275 // * An upgradeable lock may be held concurrently with any number of shared
1277 // * An upgradeable lock may be upgraded atomically to a unique lock.
1279 WriteHolder(WriteHolder&& rhs) noexcept : lock_(rhs.lock_) {
1280 rhs.lock_ = nullptr;
1283 WriteHolder& operator=(WriteHolder&& rhs) noexcept {
1284 std::swap(lock_, rhs.lock_);
1288 WriteHolder(const WriteHolder& rhs) = delete;
1289 WriteHolder& operator=(const WriteHolder& rhs) = delete;
1303 friend class ReadHolder;
1304 friend class UpgradeHolder;
1305 SharedMutexImpl* lock_;
1308 // Adapters for Synchronized<>
1309 friend void acquireRead(SharedMutexImpl& lock) { lock.lock_shared(); }
1310 friend void acquireReadWrite(SharedMutexImpl& lock) { lock.lock(); }
1311 friend void releaseRead(SharedMutexImpl& lock) { lock.unlock_shared(); }
1312 friend void releaseReadWrite(SharedMutexImpl& lock) { lock.unlock(); }
1313 friend bool acquireRead(SharedMutexImpl& lock, unsigned int ms) {
1314 return lock.try_lock_shared_for(std::chrono::milliseconds(ms));
1316 friend bool acquireReadWrite(SharedMutexImpl& lock, unsigned int ms) {
1317 return lock.try_lock_for(std::chrono::milliseconds(ms));
1321 typedef SharedMutexImpl<true> SharedMutexReadPriority;
1322 typedef SharedMutexImpl<false> SharedMutexWritePriority;
1323 typedef SharedMutexWritePriority SharedMutex;
1325 // Prevent the compiler from instantiating these in other translation units.
1326 // They are instantiated once in SharedMutex.cpp
1327 extern template class SharedMutexImpl<true>;
1328 extern template class SharedMutexImpl<false>;
1331 bool ReaderPriority,
1333 template <typename> class Atom,
1334 bool BlockImmediately>
1335 typename SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1337 SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1338 deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor] =
1342 bool ReaderPriority,
1344 template <typename> class Atom,
1345 bool BlockImmediately>
1346 FOLLY_SHAREDMUTEX_TLS uint32_t
1347 SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1348 tls_lastTokenlessSlot = 0;
1351 bool ReaderPriority,
1353 template <typename> class Atom,
1354 bool BlockImmediately>
1355 FOLLY_SHAREDMUTEX_TLS uint32_t
1356 SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1357 tls_lastDeferredReaderSlot = 0;
1360 bool ReaderPriority,
1362 template <typename> class Atom,
1363 bool BlockImmediately>
1364 bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1365 tryUnlockTokenlessSharedDeferred() {
1366 auto bestSlot = tls_lastTokenlessSlot;
1367 for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
1368 auto slotPtr = deferredReader(bestSlot ^ i);
1369 auto slotValue = slotPtr->load(std::memory_order_relaxed);
1370 if (slotValue == tokenlessSlotValue() &&
1371 slotPtr->compare_exchange_strong(slotValue, 0)) {
1372 tls_lastTokenlessSlot = bestSlot ^ i;
1380 bool ReaderPriority,
1382 template <typename> class Atom,
1383 bool BlockImmediately>
1384 template <class WaitContext>
1385 bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1386 lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) {
1388 if (UNLIKELY((state & kHasE) != 0) &&
1389 !waitForZeroBits(state, kHasE, kWaitingS, ctx) && ctx.canTimeOut()) {
1393 uint32_t slot = tls_lastDeferredReaderSlot;
1394 uintptr_t slotValue = 1; // any non-zero value will do
1396 bool canAlreadyDefer = (state & kMayDefer) != 0;
1397 bool aboveDeferThreshold =
1398 (state & kHasS) >= (kNumSharedToStartDeferring - 1) * kIncrHasS;
1399 bool drainInProgress = ReaderPriority && (state & kBegunE) != 0;
1400 if (canAlreadyDefer || (aboveDeferThreshold && !drainInProgress)) {
1401 /* Try using the most recent slot first. */
1402 slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
1403 if (slotValue != 0) {
1404 // starting point for our empty-slot search, can change after
1405 // calling waitForZeroBits
1407 (uint32_t)folly::detail::AccessSpreader<Atom>::current(
1408 kMaxDeferredReaders);
1410 // deferred readers are already enabled, or it is time to
1411 // enable them if we can find a slot
1412 for (uint32_t i = 0; i < kDeferredSearchDistance; ++i) {
1413 slot = bestSlot ^ i;
1414 assert(slot < kMaxDeferredReaders);
1415 slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
1416 if (slotValue == 0) {
1418 tls_lastDeferredReaderSlot = slot;
1425 if (slotValue != 0) {
1426 // not yet deferred, or no empty slots
1427 if (state_.compare_exchange_strong(state, state + kIncrHasS)) {
1428 // successfully recorded the read lock inline
1429 if (token != nullptr) {
1430 token->type_ = Token::Type::INLINE_SHARED;
1434 // state is updated, try again
1438 // record that deferred readers might be in use if necessary
1439 if ((state & kMayDefer) == 0) {
1440 if (!state_.compare_exchange_strong(state, state | kMayDefer)) {
1441 // keep going if CAS failed because somebody else set the bit
1443 if ((state & (kHasE | kMayDefer)) != kMayDefer) {
1447 // state = state | kMayDefer;
1450 // try to use the slot
1451 bool gotSlot = deferredReader(slot)->compare_exchange_strong(
1453 token == nullptr ? tokenlessSlotValue() : tokenfulSlotValue());
1455 // If we got the slot, we need to verify that an exclusive lock
1456 // didn't happen since we last checked. If we didn't get the slot we
1457 // need to recheck state_ anyway to make sure we don't waste too much
1458 // work. It is also possible that since we checked state_ someone
1459 // has acquired and released the write lock, clearing kMayDefer.
1460 // Both cases are covered by looking for the readers-possible bit,
1461 // because it is off when the exclusive lock bit is set.
1462 state = state_.load(std::memory_order_acquire);
1468 if (token == nullptr) {
1469 tls_lastTokenlessSlot = slot;
1472 if ((state & kMayDefer) != 0) {
1473 assert((state & kHasE) == 0);
1475 if (token != nullptr) {
1476 token->type_ = Token::Type::DEFERRED_SHARED;
1477 token->slot_ = (uint16_t)slot;
1482 // release the slot before retrying
1483 if (token == nullptr) {
1484 // We can't rely on slot. Token-less slot values can be freed by
1485 // any unlock_shared(), so we need to do the full deferredReader
1486 // search during unlock. Unlike unlock_shared(), we can't trust
1487 // kPrevDefer here. This deferred lock isn't visible to lock()
1488 // (that's the whole reason we're undoing it) so there might have
1489 // subsequently been an unlock() and lock() with no intervening
1490 // transition to deferred mode.
1491 if (!tryUnlockTokenlessSharedDeferred()) {
1492 unlockSharedInline();
1495 if (!tryUnlockSharedDeferred(slot)) {
1496 unlockSharedInline();
1500 // We got here not because the lock was unavailable, but because
1501 // we lost a compare-and-swap. Try-lock is typically allowed to
1502 // have spurious failures, but there is no lock efficiency gain
1503 // from exploiting that freedom here.
1507 } // namespace folly