2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author Nathan Bronson (ngbronson@fb.com)
24 #include <type_traits>
25 #include <folly/Likely.h>
26 #include <folly/detail/CacheLocality.h>
27 #include <folly/detail/Futex.h>
28 #include <folly/portability/Asm.h>
29 #include <folly/portability/SysResource.h>
31 // SharedMutex is a reader-writer lock. It is small, very fast, scalable
32 // on multi-core, and suitable for use when readers or writers may block.
33 // Unlike most other reader-writer locks, its throughput with concurrent
34 // readers scales linearly; it is able to acquire and release the lock
35 // in shared mode without cache line ping-ponging. It is suitable for
36 // a wide range of lock hold times because it starts with spinning,
37 // proceeds to using sched_yield with a preemption heuristic, and then
38 // waits using futex and precise wakeups.
40 // SharedMutex provides all of the methods of folly::RWSpinLock,
41 // boost::shared_mutex, boost::upgrade_mutex, and C++14's
42 // std::shared_timed_mutex. All operations that can block are available
43 // in try, try-for, and try-until (system_clock or steady_clock) versions.
45 // SharedMutexReadPriority gives priority to readers,
46 // SharedMutexWritePriority gives priority to writers. SharedMutex is an
47 // alias for SharedMutexWritePriority, because writer starvation is more
48 // likely than reader starvation for the read-heavy workloads targetted
51 // In my tests SharedMutex is as good or better than the other
52 // reader-writer locks in use at Facebook for almost all use cases,
53 // sometimes by a wide margin. (If it is rare that there are actually
54 // concurrent readers then RWSpinLock can be a few nanoseconds faster.)
55 // I compared it to folly::RWSpinLock, folly::RWTicketSpinLock64,
56 // boost::shared_mutex, pthread_rwlock_t, and a RWLock that internally uses
57 // spinlocks to guard state and pthread_mutex_t+pthread_cond_t to block.
58 // (Thrift's ReadWriteMutex is based underneath on pthread_rwlock_t.)
59 // It is generally as good or better than the rest when evaluating size,
60 // speed, scalability, or latency outliers. In the corner cases where
61 // it is not the fastest (such as single-threaded use or heavy write
62 // contention) it is never very much worse than the best. See the bottom
63 // of folly/test/SharedMutexTest.cpp for lots of microbenchmark results.
65 // Comparison to folly::RWSpinLock:
67 // * SharedMutex is faster than RWSpinLock when there are actually
68 // concurrent read accesses (sometimes much faster), and ~5 nanoseconds
69 // slower when there is not actually any contention. SharedMutex is
70 // faster in every (benchmarked) scenario where the shared mode of
71 // the lock is actually useful.
73 // * Concurrent shared access to SharedMutex scales linearly, while total
74 // RWSpinLock throughput drops as more threads try to access the lock
75 // in shared mode. Under very heavy read contention SharedMutex can
76 // be two orders of magnitude faster than RWSpinLock (or any reader
77 // writer lock that doesn't use striping or deferral).
79 // * SharedMutex can safely protect blocking calls, because after an
80 // initial period of spinning it waits using futex().
82 // * RWSpinLock prioritizes readers, SharedMutex has both reader- and
83 // writer-priority variants, but defaults to write priority.
85 // * RWSpinLock's upgradeable mode blocks new readers, while SharedMutex's
86 // doesn't. Both semantics are reasonable. The boost documentation
87 // doesn't explicitly talk about this behavior (except by omitting
88 // any statement that those lock modes conflict), but the boost
89 // implementations do allow new readers while the upgradeable mode
90 // is held. See https://github.com/boostorg/thread/blob/master/
91 // include/boost/thread/pthread/shared_mutex.hpp
93 // * RWSpinLock::UpgradedHolder maps to SharedMutex::UpgradeHolder
94 // (UpgradeableHolder would be even more pedantically correct).
95 // SharedMutex's holders have fewer methods (no reset) and are less
96 // tolerant (promotion and downgrade crash if the donor doesn't own
97 // the lock, and you must use the default constructor rather than
98 // passing a nullptr to the pointer constructor).
100 // Both SharedMutex and RWSpinLock provide "exclusive", "upgrade",
101 // and "shared" modes. At all times num_threads_holding_exclusive +
102 // num_threads_holding_upgrade <= 1, and num_threads_holding_exclusive ==
103 // 0 || num_threads_holding_shared == 0. RWSpinLock has the additional
104 // constraint that num_threads_holding_shared cannot increase while
105 // num_threads_holding_upgrade is non-zero.
107 // Comparison to the internal RWLock:
109 // * SharedMutex doesn't allow a maximum reader count to be configured,
110 // so it can't be used as a semaphore in the same way as RWLock.
112 // * SharedMutex is 4 bytes, RWLock is 256.
114 // * SharedMutex is as fast or faster than RWLock in all of my
115 // microbenchmarks, and has positive rather than negative scalability.
117 // * RWLock and SharedMutex are both writer priority locks.
119 // * SharedMutex avoids latency outliers as well as RWLock.
121 // * SharedMutex uses different names (t != 0 below):
123 // RWLock::lock(0) => SharedMutex::lock()
125 // RWLock::lock(t) => SharedMutex::try_lock_for(milliseconds(t))
127 // RWLock::tryLock() => SharedMutex::try_lock()
129 // RWLock::unlock() => SharedMutex::unlock()
131 // RWLock::enter(0) => SharedMutex::lock_shared()
133 // RWLock::enter(t) =>
134 // SharedMutex::try_lock_shared_for(milliseconds(t))
136 // RWLock::tryEnter() => SharedMutex::try_lock_shared()
138 // RWLock::leave() => SharedMutex::unlock_shared()
140 // * RWLock allows the reader count to be adjusted by a value other
141 // than 1 during enter() or leave(). SharedMutex doesn't currently
142 // implement this feature.
144 // * RWLock's methods are marked const, SharedMutex's aren't.
146 // Reader-writer locks have the potential to allow concurrent access
147 // to shared read-mostly data, but in practice they often provide no
148 // improvement over a mutex. The problem is the cache coherence protocol
149 // of modern CPUs. Coherence is provided by making sure that when a cache
150 // line is written it is present in only one core's cache. Since a memory
151 // write is required to acquire a reader-writer lock in shared mode, the
152 // cache line holding the lock is invalidated in all of the other caches.
153 // This leads to cache misses when another thread wants to acquire or
154 // release the lock concurrently. When the RWLock is colocated with the
155 // data it protects (common), cache misses can also continue occur when
156 // a thread that already holds the lock tries to read the protected data.
158 // Ideally, a reader-writer lock would allow multiple cores to acquire
159 // and release the lock in shared mode without incurring any cache misses.
160 // This requires that each core records its shared access in a cache line
161 // that isn't read or written by other read-locking cores. (Writers will
162 // have to check all of the cache lines.) Typical server hardware when
163 // this comment was written has 16 L1 caches and cache lines of 64 bytes,
164 // so a lock striped over all L1 caches would occupy a prohibitive 1024
165 // bytes. Nothing says that we need a separate set of per-core memory
166 // locations for each lock, however. Each SharedMutex instance is only
167 // 4 bytes, but all locks together share a 2K area in which they make a
168 // core-local record of lock acquisitions.
170 // SharedMutex's strategy of using a shared set of core-local stripes has
171 // a potential downside, because it means that acquisition of any lock in
172 // write mode can conflict with acquisition of any lock in shared mode.
173 // If a lock instance doesn't actually experience concurrency then this
174 // downside will outweight the upside of improved scalability for readers.
175 // To avoid this problem we dynamically detect concurrent accesses to
176 // SharedMutex, and don't start using the deferred mode unless we actually
177 // observe concurrency. See kNumSharedToStartDeferring.
179 // It is explicitly allowed to call lock_unshared() from a different
180 // thread than lock_shared(), so long as they are properly paired.
181 // lock_unshared() needs to find the location at which lock_shared()
182 // recorded the lock, which might be in the lock itself or in any of
183 // the shared slots. If you can conveniently pass state from lock
184 // acquisition to release then the fastest mechanism is to std::move
185 // the SharedMutex::ReadHolder instance or an SharedMutex::Token (using
186 // lock_shared(Token&) and unlock_shared(Token&)). The guard or token
187 // will tell unlock_shared where in deferredReaders[] to look for the
188 // deferred lock. The Token-less version of unlock_shared() works in all
189 // cases, but is optimized for the common (no inter-thread handoff) case.
191 // In both read- and write-priority mode, a waiting lock() (exclusive mode)
192 // only blocks readers after it has waited for an active upgrade lock to be
193 // released; until the upgrade lock is released (or upgraded or downgraded)
194 // readers will still be able to enter. Preferences about lock acquisition
195 // are not guaranteed to be enforced perfectly (even if they were, there
196 // is theoretically the chance that a thread could be arbitrarily suspended
197 // between calling lock() and SharedMutex code actually getting executed).
199 // try_*_for methods always try at least once, even if the duration
200 // is zero or negative. The duration type must be compatible with
201 // std::chrono::steady_clock. try_*_until methods also always try at
202 // least once. std::chrono::system_clock and std::chrono::steady_clock
205 // If you have observed by profiling that your SharedMutex-s are getting
206 // cache misses on deferredReaders[] due to another SharedMutex user, then
207 // you can use the tag type plus the RWDEFERREDLOCK_DECLARE_STATIC_STORAGE
208 // macro to create your own instantiation of the type. The contention
209 // threshold (see kNumSharedToStartDeferring) should make this unnecessary
210 // in all but the most extreme cases. Make sure to check that the
211 // increased icache and dcache footprint of the tagged result is worth it.
215 struct SharedMutexToken {
216 enum class Type : uint16_t {
226 template <bool ReaderPriority,
227 typename Tag_ = void,
228 template <typename> class Atom = std::atomic,
229 bool BlockImmediately = false>
230 class SharedMutexImpl {
232 static constexpr bool kReaderPriority = ReaderPriority;
235 typedef SharedMutexToken Token;
241 constexpr SharedMutexImpl() : state_(0) {}
243 SharedMutexImpl(const SharedMutexImpl&) = delete;
244 SharedMutexImpl(SharedMutexImpl&&) = delete;
245 SharedMutexImpl& operator = (const SharedMutexImpl&) = delete;
246 SharedMutexImpl& operator = (SharedMutexImpl&&) = delete;
248 // It is an error to destroy an SharedMutex that still has
249 // any outstanding locks. This is checked if NDEBUG isn't defined.
250 // SharedMutex's exclusive mode can be safely used to guard the lock's
251 // own destruction. If, for example, you acquire the lock in exclusive
252 // mode and then observe that the object containing the lock is no longer
253 // needed, you can unlock() and then immediately destroy the lock.
254 // See https://sourceware.org/bugzilla/show_bug.cgi?id=13690 for a
255 // description about why this property needs to be explicitly mentioned.
257 auto state = state_.load(std::memory_order_relaxed);
258 if (UNLIKELY((state & kHasS) != 0)) {
259 cleanupTokenlessSharedDeferred(state);
263 // if a futexWait fails to go to sleep because the value has been
264 // changed, we don't necessarily clean up the wait bits, so it is
265 // possible they will be set here in a correct system
266 assert((state & ~(kWaitingAny | kMayDefer)) == 0);
267 if ((state & kMayDefer) != 0) {
268 for (uint32_t slot = 0; slot < kMaxDeferredReaders; ++slot) {
269 auto slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
270 assert(!slotValueIsThis(slotValue));
278 (void)lockExclusiveImpl(kHasSolo, ctx);
283 return lockExclusiveImpl(kHasSolo, ctx);
286 template <class Rep, class Period>
287 bool try_lock_for(const std::chrono::duration<Rep, Period>& duration) {
288 WaitForDuration<Rep, Period> ctx(duration);
289 return lockExclusiveImpl(kHasSolo, ctx);
292 template <class Clock, class Duration>
294 const std::chrono::time_point<Clock, Duration>& absDeadline) {
295 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
296 return lockExclusiveImpl(kHasSolo, ctx);
300 // It is possible that we have a left-over kWaitingNotS if the last
301 // unlock_shared() that let our matching lock() complete finished
302 // releasing before lock()'s futexWait went to sleep. Clean it up now
303 auto state = (state_ &= ~(kWaitingNotS | kPrevDefer | kHasE));
304 assert((state & ~kWaitingAny) == 0);
305 wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS);
308 // Managing the token yourself makes unlock_shared a bit faster
312 (void)lockSharedImpl(nullptr, ctx);
315 void lock_shared(Token& token) {
317 (void)lockSharedImpl(&token, ctx);
320 bool try_lock_shared() {
322 return lockSharedImpl(nullptr, ctx);
325 bool try_lock_shared(Token& token) {
327 return lockSharedImpl(&token, ctx);
330 template <class Rep, class Period>
331 bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration) {
332 WaitForDuration<Rep, Period> ctx(duration);
333 return lockSharedImpl(nullptr, ctx);
336 template <class Rep, class Period>
337 bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& duration,
339 WaitForDuration<Rep, Period> ctx(duration);
340 return lockSharedImpl(&token, ctx);
343 template <class Clock, class Duration>
344 bool try_lock_shared_until(
345 const std::chrono::time_point<Clock, Duration>& absDeadline) {
346 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
347 return lockSharedImpl(nullptr, ctx);
350 template <class Clock, class Duration>
351 bool try_lock_shared_until(
352 const std::chrono::time_point<Clock, Duration>& absDeadline,
354 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
355 return lockSharedImpl(&token, ctx);
358 void unlock_shared() {
359 auto state = state_.load(std::memory_order_acquire);
361 // kPrevDefer can only be set if HasE or BegunE is set
362 assert((state & (kPrevDefer | kHasE | kBegunE)) != kPrevDefer);
364 // lock() strips kMayDefer immediately, but then copies it to
365 // kPrevDefer so we can tell if the pre-lock() lock_shared() might
367 if ((state & (kMayDefer | kPrevDefer)) == 0 ||
368 !tryUnlockTokenlessSharedDeferred()) {
369 // Matching lock_shared() couldn't have deferred, or the deferred
370 // lock has already been inlined by applyDeferredReaders()
371 unlockSharedInline();
375 void unlock_shared(Token& token) {
376 assert(token.type_ == Token::Type::INLINE_SHARED ||
377 token.type_ == Token::Type::DEFERRED_SHARED);
379 if (token.type_ != Token::Type::DEFERRED_SHARED ||
380 !tryUnlockSharedDeferred(token.slot_)) {
381 unlockSharedInline();
384 token.type_ = Token::Type::INVALID;
388 void unlock_and_lock_shared() {
389 // We can't use state_ -=, because we need to clear 2 bits (1 of which
390 // has an uncertain initial state) and set 1 other. We might as well
391 // clear the relevant wake bits at the same time. Note that since S
392 // doesn't block the beginning of a transition to E (writer priority
393 // can cut off new S, reader priority grabs BegunE and blocks deferred
394 // S) we need to wake E as well.
395 auto state = state_.load(std::memory_order_acquire);
397 assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE);
398 } while (!state_.compare_exchange_strong(
399 state, (state & ~(kWaitingAny | kPrevDefer | kHasE)) + kIncrHasS));
400 if ((state & (kWaitingE | kWaitingU | kWaitingS)) != 0) {
401 futexWakeAll(kWaitingE | kWaitingU | kWaitingS);
405 void unlock_and_lock_shared(Token& token) {
406 unlock_and_lock_shared();
407 token.type_ = Token::Type::INLINE_SHARED;
410 void lock_upgrade() {
412 (void)lockUpgradeImpl(ctx);
415 bool try_lock_upgrade() {
417 return lockUpgradeImpl(ctx);
420 template <class Rep, class Period>
421 bool try_lock_upgrade_for(
422 const std::chrono::duration<Rep, Period>& duration) {
423 WaitForDuration<Rep, Period> ctx(duration);
424 return lockUpgradeImpl(ctx);
427 template <class Clock, class Duration>
428 bool try_lock_upgrade_until(
429 const std::chrono::time_point<Clock, Duration>& absDeadline) {
430 WaitUntilDeadline<Clock, Duration> ctx{absDeadline};
431 return lockUpgradeImpl(ctx);
434 void unlock_upgrade() {
435 auto state = (state_ -= kHasU);
436 assert((state & (kWaitingNotS | kHasSolo)) == 0);
437 wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
440 void unlock_upgrade_and_lock() {
441 // no waiting necessary, so waitMask is empty
443 (void)lockExclusiveImpl(0, ctx);
446 void unlock_upgrade_and_lock_shared() {
447 auto state = (state_ -= kHasU - kIncrHasS);
448 assert((state & (kWaitingNotS | kHasSolo)) == 0);
449 wakeRegisteredWaiters(state, kWaitingE | kWaitingU);
452 void unlock_upgrade_and_lock_shared(Token& token) {
453 unlock_upgrade_and_lock_shared();
454 token.type_ = Token::Type::INLINE_SHARED;
457 void unlock_and_lock_upgrade() {
458 // We can't use state_ -=, because we need to clear 2 bits (1 of
459 // which has an uncertain initial state) and set 1 other. We might
460 // as well clear the relevant wake bits at the same time.
461 auto state = state_.load(std::memory_order_acquire);
463 assert((state & ~(kWaitingAny | kPrevDefer)) == kHasE);
465 (state & ~(kWaitingNotS | kWaitingS | kPrevDefer | kHasE)) + kHasU;
466 if (state_.compare_exchange_strong(state, after)) {
467 if ((state & kWaitingS) != 0) {
468 futexWakeAll(kWaitingS);
476 typedef typename folly::detail::Futex<Atom> Futex;
478 // Internally we use four kinds of wait contexts. These are structs
479 // that provide a doWait method that returns true if a futex wake
480 // was issued that intersects with the waitMask, false if there was a
481 // timeout and no more waiting should be performed. Spinning occurs
482 // before the wait context is invoked.
485 bool canBlock() { return true; }
486 bool canTimeOut() { return false; }
487 bool shouldTimeOut() { return false; }
489 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
490 futex.futexWait(expected, waitMask);
496 bool canBlock() { return false; }
497 bool canTimeOut() { return true; }
498 bool shouldTimeOut() { return true; }
500 bool doWait(Futex& /* futex */,
501 uint32_t /* expected */,
502 uint32_t /* waitMask */) {
507 template <class Rep, class Period>
508 struct WaitForDuration {
509 std::chrono::duration<Rep, Period> duration_;
510 bool deadlineComputed_;
511 std::chrono::steady_clock::time_point deadline_;
513 explicit WaitForDuration(const std::chrono::duration<Rep, Period>& duration)
514 : duration_(duration), deadlineComputed_(false) {}
516 std::chrono::steady_clock::time_point deadline() {
517 if (!deadlineComputed_) {
518 deadline_ = std::chrono::steady_clock::now() + duration_;
519 deadlineComputed_ = true;
524 bool canBlock() { return duration_.count() > 0; }
525 bool canTimeOut() { return true; }
527 bool shouldTimeOut() {
528 return std::chrono::steady_clock::now() > deadline();
531 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
532 auto result = futex.futexWaitUntil(expected, deadline(), waitMask);
533 return result != folly::detail::FutexResult::TIMEDOUT;
537 template <class Clock, class Duration>
538 struct WaitUntilDeadline {
539 std::chrono::time_point<Clock, Duration> absDeadline_;
541 bool canBlock() { return true; }
542 bool canTimeOut() { return true; }
543 bool shouldTimeOut() { return Clock::now() > absDeadline_; }
545 bool doWait(Futex& futex, uint32_t expected, uint32_t waitMask) {
546 auto result = futex.futexWaitUntil(expected, absDeadline_, waitMask);
547 return result != folly::detail::FutexResult::TIMEDOUT;
554 // S count needs to be on the end, because we explicitly allow it to
555 // underflow. This can occur while we are in the middle of applying
556 // deferred locks (we remove them from deferredReaders[] before
557 // inlining them), or during token-less unlock_shared() if a racing
558 // lock_shared();unlock_shared() moves the deferredReaders slot while
559 // the first unlock_shared() is scanning. The former case is cleaned
560 // up before we finish applying the locks. The latter case can persist
561 // until destruction, when it is cleaned up.
562 static constexpr uint32_t kIncrHasS = 1 << 10;
563 static constexpr uint32_t kHasS = ~(kIncrHasS - 1);
565 // If false, then there are definitely no deferred read locks for this
566 // instance. Cleared after initialization and when exclusively locked.
567 static constexpr uint32_t kMayDefer = 1 << 9;
569 // lock() cleared kMayDefer as soon as it starts draining readers (so
570 // that it doesn't have to do a second CAS once drain completes), but
571 // unlock_shared() still needs to know whether to scan deferredReaders[]
572 // or not. We copy kMayDefer to kPrevDefer when setting kHasE or
573 // kBegunE, and clear it when clearing those bits.
574 static constexpr uint32_t kPrevDefer = 1 << 8;
576 // Exclusive-locked blocks all read locks and write locks. This bit
577 // may be set before all readers have finished, but in that case the
578 // thread that sets it won't return to the caller until all read locks
579 // have been released.
580 static constexpr uint32_t kHasE = 1 << 7;
582 // Exclusive-draining means that lock() is waiting for existing readers
583 // to leave, but that new readers may still acquire shared access.
584 // This is only used in reader priority mode. New readers during
585 // drain must be inline. The difference between this and kHasU is that
586 // kBegunE prevents kMayDefer from being set.
587 static constexpr uint32_t kBegunE = 1 << 6;
589 // At most one thread may have either exclusive or upgrade lock
590 // ownership. Unlike exclusive mode, ownership of the lock in upgrade
591 // mode doesn't preclude other threads holding the lock in shared mode.
592 // boost's concept for this doesn't explicitly say whether new shared
593 // locks can be acquired one lock_upgrade has succeeded, but doesn't
594 // list that as disallowed. RWSpinLock disallows new read locks after
595 // lock_upgrade has been acquired, but the boost implementation doesn't.
596 // We choose the latter.
597 static constexpr uint32_t kHasU = 1 << 5;
599 // There are three states that we consider to be "solo", in that they
600 // cannot coexist with other solo states. These are kHasE, kBegunE,
601 // and kHasU. Note that S doesn't conflict with any of these, because
602 // setting the kHasE is only one of the two steps needed to actually
603 // acquire the lock in exclusive mode (the other is draining the existing
605 static constexpr uint32_t kHasSolo = kHasE | kBegunE | kHasU;
607 // Once a thread sets kHasE it needs to wait for the current readers
608 // to exit the lock. We give this a separate wait identity from the
609 // waiting to set kHasE so that we can perform partial wakeups (wake
610 // one instead of wake all).
611 static constexpr uint32_t kWaitingNotS = 1 << 4;
613 // When waking writers we can either wake them all, in which case we
614 // can clear kWaitingE, or we can call futexWake(1). futexWake tells
615 // us if anybody woke up, but even if we detect that nobody woke up we
616 // can't clear the bit after the fact without issuing another wakeup.
617 // To avoid thundering herds when there are lots of pending lock()
618 // without needing to call futexWake twice when there is only one
619 // waiter, kWaitingE actually encodes if we have observed multiple
620 // concurrent waiters. Tricky: ABA issues on futexWait mean that when
621 // we see kWaitingESingle we can't assume that there is only one.
622 static constexpr uint32_t kWaitingESingle = 1 << 2;
623 static constexpr uint32_t kWaitingEMultiple = 1 << 3;
624 static constexpr uint32_t kWaitingE = kWaitingESingle | kWaitingEMultiple;
626 // kWaitingU is essentially a 1 bit saturating counter. It always
627 // requires a wakeAll.
628 static constexpr uint32_t kWaitingU = 1 << 1;
630 // All blocked lock_shared() should be awoken, so it is correct (not
631 // suboptimal) to wakeAll if there are any shared readers.
632 static constexpr uint32_t kWaitingS = 1 << 0;
634 // kWaitingAny is a mask of all of the bits that record the state of
635 // threads, rather than the state of the lock. It is convenient to be
636 // able to mask them off during asserts.
637 static constexpr uint32_t kWaitingAny =
638 kWaitingNotS | kWaitingE | kWaitingU | kWaitingS;
640 // The reader count at which a reader will attempt to use the lock
641 // in deferred mode. If this value is 2, then the second concurrent
642 // reader will set kMayDefer and use deferredReaders[]. kMayDefer is
643 // cleared during exclusive access, so this threshold must be reached
644 // each time a lock is held in exclusive mode.
645 static constexpr uint32_t kNumSharedToStartDeferring = 2;
647 // The typical number of spins that a thread will wait for a state
648 // transition. There is no bound on the number of threads that can wait
649 // for a writer, so we are pretty conservative here to limit the chance
650 // that we are starving the writer of CPU. Each spin is 6 or 7 nanos,
651 // almost all of which is in the pause instruction.
652 static constexpr uint32_t kMaxSpinCount = !BlockImmediately ? 1000 : 2;
654 // The maximum number of soft yields before falling back to futex.
655 // If the preemption heuristic is activated we will fall back before
656 // this. A soft yield takes ~900 nanos (two sched_yield plus a call
657 // to getrusage, with checks of the goal at each step). Soft yields
658 // aren't compatible with deterministic execution under test (unlike
659 // futexWaitUntil, which has a capricious but deterministic back end).
660 static constexpr uint32_t kMaxSoftYieldCount = !BlockImmediately ? 1000 : 0;
662 // If AccessSpreader assigns indexes from 0..k*n-1 on a system where some
663 // level of the memory hierarchy is symmetrically divided into k pieces
664 // (NUMA nodes, last-level caches, L1 caches, ...), then slot indexes
665 // that are the same after integer division by k share that resource.
666 // Our strategy for deferred readers is to probe up to numSlots/4 slots,
667 // using the full granularity of AccessSpreader for the start slot
668 // and then search outward. We can use AccessSpreader::current(n)
669 // without managing our own spreader if kMaxDeferredReaders <=
670 // AccessSpreader::kMaxCpus, which is currently 128.
672 // Our 2-socket E5-2660 machines have 8 L1 caches on each chip,
673 // with 64 byte cache lines. That means we need 64*16 bytes of
674 // deferredReaders[] to give each L1 its own playground. On x86_64
675 // each DeferredReaderSlot is 8 bytes, so we need kMaxDeferredReaders
676 // * kDeferredSeparationFactor >= 64 * 16 / 8 == 128. If
677 // kDeferredSearchDistance * kDeferredSeparationFactor <=
678 // 64 / 8 then we will search only within a single cache line, which
679 // guarantees we won't have inter-L1 contention. We give ourselves
680 // a factor of 2 on the core count, which should hold us for a couple
681 // processor generations. deferredReaders[] is 2048 bytes currently.
683 static constexpr uint32_t kMaxDeferredReaders = 64;
684 static constexpr uint32_t kDeferredSearchDistance = 2;
685 static constexpr uint32_t kDeferredSeparationFactor = 4;
689 static_assert(!(kMaxDeferredReaders & (kMaxDeferredReaders - 1)),
690 "kMaxDeferredReaders must be a power of 2");
691 static_assert(!(kDeferredSearchDistance & (kDeferredSearchDistance - 1)),
692 "kDeferredSearchDistance must be a power of 2");
694 // The number of deferred locks that can be simultaneously acquired
695 // by a thread via the token-less methods without performing any heap
696 // allocations. Each of these costs 3 pointers (24 bytes, probably)
697 // per thread. There's not much point in making this larger than
698 // kDeferredSearchDistance.
699 static constexpr uint32_t kTokenStackTLSCapacity = 2;
701 // We need to make sure that if there is a lock_shared()
702 // and lock_shared(token) followed by unlock_shared() and
703 // unlock_shared(token), the token-less unlock doesn't null
704 // out deferredReaders[token.slot_]. If we allowed that, then
705 // unlock_shared(token) wouldn't be able to assume that its lock
706 // had been inlined by applyDeferredReaders when it finds that
707 // deferredReaders[token.slot_] no longer points to this. We accomplish
708 // this by stealing bit 0 from the pointer to record that the slot's
709 // element has no token, hence our use of uintptr_t in deferredReaders[].
710 static constexpr uintptr_t kTokenless = 0x1;
712 // This is the starting location for Token-less unlock_shared().
713 static FOLLY_TLS uint32_t tls_lastTokenlessSlot;
715 // Only indexes divisible by kDeferredSeparationFactor are used.
716 // If any of those elements points to a SharedMutexImpl, then it
717 // should be considered that there is a shared lock on that instance.
720 typedef Atom<uintptr_t> DeferredReaderSlot;
723 FOLLY_ALIGN_TO_AVOID_FALSE_SHARING static DeferredReaderSlot deferredReaders
724 [kMaxDeferredReaders *
725 kDeferredSeparationFactor];
727 // Performs an exclusive lock, waiting for state_ & waitMask to be
729 template <class WaitContext>
730 bool lockExclusiveImpl(uint32_t preconditionGoalMask, WaitContext& ctx) {
731 uint32_t state = state_.load(std::memory_order_acquire);
733 (state & (preconditionGoalMask | kMayDefer | kHasS)) == 0 &&
734 state_.compare_exchange_strong(state, (state | kHasE) & ~kHasU))) {
737 return lockExclusiveImpl(state, preconditionGoalMask, ctx);
741 template <class WaitContext>
742 bool lockExclusiveImpl(uint32_t& state,
743 uint32_t preconditionGoalMask,
746 if (UNLIKELY((state & preconditionGoalMask) != 0) &&
747 !waitForZeroBits(state, preconditionGoalMask, kWaitingE, ctx) &&
752 uint32_t after = (state & kMayDefer) == 0 ? 0 : kPrevDefer;
753 if (!ReaderPriority || (state & (kMayDefer | kHasS)) == 0) {
754 // Block readers immediately, either because we are in write
755 // priority mode or because we can acquire the lock in one
756 // step. Note that if state has kHasU, then we are doing an
757 // unlock_upgrade_and_lock() and we should clear it (reader
758 // priority branch also does this).
759 after |= (state | kHasE) & ~(kHasU | kMayDefer);
761 after |= (state | kBegunE) & ~(kHasU | kMayDefer);
763 if (state_.compare_exchange_strong(state, after)) {
767 // If we set kHasE (writer priority) then no new readers can
768 // arrive. If we set kBegunE then they can still enter, but
769 // they must be inline. Either way we need to either spin on
770 // deferredReaders[] slots, or inline them so that we can wait on
771 // kHasS to zero itself. deferredReaders[] is pointers, which on
772 // x86_64 are bigger than futex() can handle, so we inline the
773 // deferred locks instead of trying to futexWait on each slot.
774 // Readers are responsible for rechecking state_ after recording
775 // a deferred read to avoid atomicity problems between the state_
776 // CAS and applyDeferredReader's reads of deferredReaders[].
777 if (UNLIKELY((before & kMayDefer) != 0)) {
778 applyDeferredReaders(state, ctx);
781 assert((state & (kHasE | kBegunE)) != 0 && (state & kHasU) == 0);
782 if (UNLIKELY((state & kHasS) != 0) &&
783 !waitForZeroBits(state, kHasS, kWaitingNotS, ctx) &&
785 // Ugh. We blocked new readers and other writers for a while,
786 // but were unable to complete. Move on. On the plus side
787 // we can clear kWaitingNotS because nobody else can piggyback
789 state = (state_ &= ~(kPrevDefer | kHasE | kBegunE | kWaitingNotS));
790 wakeRegisteredWaiters(state, kWaitingE | kWaitingU | kWaitingS);
794 if (ReaderPriority && (state & kHasE) == 0) {
795 assert((state & kBegunE) != 0);
796 if (!state_.compare_exchange_strong(state,
797 (state & ~kBegunE) | kHasE)) {
808 template <class WaitContext>
809 bool waitForZeroBits(uint32_t& state,
813 uint32_t spinCount = 0;
815 state = state_.load(std::memory_order_acquire);
816 if ((state & goal) == 0) {
819 asm_volatile_pause();
821 if (UNLIKELY(spinCount >= kMaxSpinCount)) {
822 return ctx.canBlock() &&
823 yieldWaitForZeroBits(state, goal, waitMask, ctx);
828 template <class WaitContext>
829 bool yieldWaitForZeroBits(uint32_t& state,
837 for (uint32_t yieldCount = 0; yieldCount < kMaxSoftYieldCount;
839 for (int softState = 0; softState < 3; ++softState) {
841 std::this_thread::yield();
844 getrusage(RUSAGE_THREAD, &usage);
847 if (((state = state_.load(std::memory_order_acquire)) & goal) == 0) {
850 if (ctx.shouldTimeOut()) {
855 if (before >= 0 && usage.ru_nivcsw >= before + 2) {
856 // One involuntary csw might just be occasional background work,
857 // but if we get two in a row then we guess that there is someone
858 // else who can profitably use this CPU. Fall back to futex
861 before = usage.ru_nivcsw;
864 return futexWaitForZeroBits(state, goal, waitMask, ctx);
867 template <class WaitContext>
868 bool futexWaitForZeroBits(uint32_t& state,
872 assert(waitMask == kWaitingNotS || waitMask == kWaitingE ||
873 waitMask == kWaitingU || waitMask == kWaitingS);
876 state = state_.load(std::memory_order_acquire);
877 if ((state & goal) == 0) {
882 if (waitMask == kWaitingE) {
883 if ((state & kWaitingESingle) != 0) {
884 after |= kWaitingEMultiple;
886 after |= kWaitingESingle;
892 // CAS is better than atomic |= here, because it lets us avoid
893 // setting the wait flag when the goal is concurrently achieved
894 if (after != state && !state_.compare_exchange_strong(state, after)) {
898 if (!ctx.doWait(state_, after, waitMask)) {
905 // Wakes up waiters registered in state_ as appropriate, clearing the
906 // awaiting bits for anybody that was awoken. Tries to perform direct
907 // single wakeup of an exclusive waiter if appropriate
908 void wakeRegisteredWaiters(uint32_t& state, uint32_t wakeMask) {
909 if (UNLIKELY((state & wakeMask) != 0)) {
910 wakeRegisteredWaitersImpl(state, wakeMask);
914 void wakeRegisteredWaitersImpl(uint32_t& state, uint32_t wakeMask) {
915 // If there are multiple lock() pending only one of them will actually
916 // get to wake up, so issuing futexWakeAll will make a thundering herd.
917 // There's nothing stopping us from issuing futexWake(1) instead,
918 // so long as the wait bits are still an accurate reflection of
919 // the waiters. If we notice (via futexWake's return value) that
920 // nobody woke up then we can try again with the normal wake-all path.
921 // Note that we can't just clear the bits at that point; we need to
922 // clear the bits and then issue another wakeup.
924 // It is possible that we wake an E waiter but an outside S grabs the
925 // lock instead, at which point we should wake pending U and S waiters.
926 // Rather than tracking state to make the failing E regenerate the
927 // wakeup, we just disable the optimization in the case that there
928 // are waiting U or S that we are eligible to wake.
929 if ((wakeMask & kWaitingE) == kWaitingE &&
930 (state & wakeMask) == kWaitingE &&
931 state_.futexWake(1, kWaitingE) > 0) {
932 // somebody woke up, so leave state_ as is and clear it later
936 if ((state & wakeMask) != 0) {
937 auto prev = state_.fetch_and(~wakeMask);
938 if ((prev & wakeMask) != 0) {
939 futexWakeAll(wakeMask);
941 state = prev & ~wakeMask;
945 void futexWakeAll(uint32_t wakeMask) {
946 state_.futexWake(std::numeric_limits<int>::max(), wakeMask);
949 DeferredReaderSlot* deferredReader(uint32_t slot) {
950 return &deferredReaders[slot * kDeferredSeparationFactor];
953 uintptr_t tokenfulSlotValue() { return reinterpret_cast<uintptr_t>(this); }
955 uintptr_t tokenlessSlotValue() { return tokenfulSlotValue() | kTokenless; }
957 bool slotValueIsThis(uintptr_t slotValue) {
958 return (slotValue & ~kTokenless) == tokenfulSlotValue();
961 // Clears any deferredReaders[] that point to this, adjusting the inline
962 // shared lock count to compensate. Does some spinning and yielding
963 // to avoid the work. Always finishes the application, even if ctx
965 template <class WaitContext>
966 void applyDeferredReaders(uint32_t& state, WaitContext& ctx) {
969 uint32_t spinCount = 0;
971 while (!slotValueIsThis(
972 deferredReader(slot)->load(std::memory_order_acquire))) {
973 if (++slot == kMaxDeferredReaders) {
978 if (UNLIKELY(++spinCount >= kMaxSpinCount)) {
979 applyDeferredReaders(state, ctx, slot);
985 template <class WaitContext>
986 void applyDeferredReaders(uint32_t& state, WaitContext& ctx, uint32_t slot) {
992 for (uint32_t yieldCount = 0; yieldCount < kMaxSoftYieldCount;
994 for (int softState = 0; softState < 3; ++softState) {
996 std::this_thread::yield();
999 getrusage(RUSAGE_THREAD, &usage);
1002 while (!slotValueIsThis(
1003 deferredReader(slot)->load(std::memory_order_acquire))) {
1004 if (++slot == kMaxDeferredReaders) {
1008 if (ctx.shouldTimeOut()) {
1009 // finish applying immediately on timeout
1013 #ifdef RUSAGE_THREAD
1014 if (before >= 0 && usage.ru_nivcsw >= before + 2) {
1015 // heuristic says run queue is not empty
1018 before = usage.ru_nivcsw;
1022 uint32_t movedSlotCount = 0;
1023 for (; slot < kMaxDeferredReaders; ++slot) {
1024 auto slotPtr = deferredReader(slot);
1025 auto slotValue = slotPtr->load(std::memory_order_acquire);
1026 if (slotValueIsThis(slotValue) &&
1027 slotPtr->compare_exchange_strong(slotValue, 0)) {
1032 if (movedSlotCount > 0) {
1033 state = (state_ += movedSlotCount * kIncrHasS);
1035 assert((state & (kHasE | kBegunE)) != 0);
1037 // if state + kIncrHasS overflows (off the end of state) then either
1038 // we have 2^(32-9) readers (almost certainly an application bug)
1039 // or we had an underflow (also a bug)
1040 assert(state < state + kIncrHasS);
1043 // It is straightfoward to make a token-less lock_shared() and
1044 // unlock_shared() either by making the token-less version always use
1045 // INLINE_SHARED mode or by removing the token version. Supporting
1046 // deferred operation for both types is trickier than it appears, because
1047 // the purpose of the token it so that unlock_shared doesn't have to
1048 // look in other slots for its deferred lock. Token-less unlock_shared
1049 // might place a deferred lock in one place and then release a different
1050 // slot that was originally used by the token-ful version. If this was
1051 // important we could solve the problem by differentiating the deferred
1052 // locks so that cross-variety release wouldn't occur. The best way
1053 // is probably to steal a bit from the pointer, making deferredLocks[]
1054 // an array of Atom<uintptr_t>.
1056 template <class WaitContext>
1057 bool lockSharedImpl(Token* token, WaitContext& ctx) {
1058 uint32_t state = state_.load(std::memory_order_relaxed);
1059 if ((state & (kHasS | kMayDefer | kHasE)) == 0 &&
1060 state_.compare_exchange_strong(state, state + kIncrHasS)) {
1061 if (token != nullptr) {
1062 token->type_ = Token::Type::INLINE_SHARED;
1066 return lockSharedImpl(state, token, ctx);
1069 template <class WaitContext>
1070 bool lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx);
1072 // Updates the state in/out argument as if the locks were made inline,
1073 // but does not update state_
1074 void cleanupTokenlessSharedDeferred(uint32_t& state) {
1075 for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
1076 auto slotPtr = deferredReader(i);
1077 auto slotValue = slotPtr->load(std::memory_order_relaxed);
1078 if (slotValue == tokenlessSlotValue()) {
1079 slotPtr->store(0, std::memory_order_relaxed);
1081 if ((state & kHasS) == 0) {
1088 bool tryUnlockTokenlessSharedDeferred();
1090 bool tryUnlockSharedDeferred(uint32_t slot) {
1091 assert(slot < kMaxDeferredReaders);
1092 auto slotValue = tokenfulSlotValue();
1093 return deferredReader(slot)->compare_exchange_strong(slotValue, 0);
1096 uint32_t unlockSharedInline() {
1097 uint32_t state = (state_ -= kIncrHasS);
1098 assert((state & (kHasE | kBegunE | kMayDefer)) != 0 ||
1099 state < state + kIncrHasS);
1100 if ((state & kHasS) == 0) {
1101 // Only the second half of lock() can be blocked by a non-zero
1102 // reader count, so that's the only thing we need to wake
1103 wakeRegisteredWaiters(state, kWaitingNotS);
1108 template <class WaitContext>
1109 bool lockUpgradeImpl(WaitContext& ctx) {
1112 if (!waitForZeroBits(state, kHasSolo, kWaitingU, ctx)) {
1115 } while (!state_.compare_exchange_strong(state, state | kHasU));
1122 ReadHolder() : lock_(nullptr) {}
1124 explicit ReadHolder(const SharedMutexImpl* lock) : ReadHolder(*lock) {}
1126 explicit ReadHolder(const SharedMutexImpl& lock)
1127 : lock_(const_cast<SharedMutexImpl*>(&lock)) {
1128 lock_->lock_shared(token_);
1131 ReadHolder(ReadHolder&& rhs) noexcept : lock_(rhs.lock_),
1132 token_(rhs.token_) {
1133 rhs.lock_ = nullptr;
1136 // Downgrade from upgrade mode
1137 explicit ReadHolder(UpgradeHolder&& upgraded) : lock_(upgraded.lock_) {
1138 assert(upgraded.lock_ != nullptr);
1139 upgraded.lock_ = nullptr;
1140 lock_->unlock_upgrade_and_lock_shared(token_);
1143 // Downgrade from exclusive mode
1144 explicit ReadHolder(WriteHolder&& writer) : lock_(writer.lock_) {
1145 assert(writer.lock_ != nullptr);
1146 writer.lock_ = nullptr;
1147 lock_->unlock_and_lock_shared(token_);
1150 ReadHolder& operator=(ReadHolder&& rhs) noexcept {
1151 std::swap(lock_, rhs.lock_);
1152 std::swap(token_, rhs.token_);
1156 ReadHolder(const ReadHolder& rhs) = delete;
1157 ReadHolder& operator=(const ReadHolder& rhs) = delete;
1165 lock_->unlock_shared(token_);
1171 friend class UpgradeHolder;
1172 friend class WriteHolder;
1173 SharedMutexImpl* lock_;
1174 SharedMutexToken token_;
1177 class UpgradeHolder {
1179 UpgradeHolder() : lock_(nullptr) {}
1181 explicit UpgradeHolder(SharedMutexImpl* lock) : UpgradeHolder(*lock) {}
1183 explicit UpgradeHolder(SharedMutexImpl& lock) : lock_(&lock) {
1184 lock_->lock_upgrade();
1187 // Downgrade from exclusive mode
1188 explicit UpgradeHolder(WriteHolder&& writer) : lock_(writer.lock_) {
1189 assert(writer.lock_ != nullptr);
1190 writer.lock_ = nullptr;
1191 lock_->unlock_and_lock_upgrade();
1194 UpgradeHolder(UpgradeHolder&& rhs) noexcept : lock_(rhs.lock_) {
1195 rhs.lock_ = nullptr;
1198 UpgradeHolder& operator=(UpgradeHolder&& rhs) noexcept {
1199 std::swap(lock_, rhs.lock_);
1203 UpgradeHolder(const UpgradeHolder& rhs) = delete;
1204 UpgradeHolder& operator=(const UpgradeHolder& rhs) = delete;
1212 lock_->unlock_upgrade();
1218 friend class WriteHolder;
1219 friend class ReadHolder;
1220 SharedMutexImpl* lock_;
1225 WriteHolder() : lock_(nullptr) {}
1227 explicit WriteHolder(SharedMutexImpl* lock) : WriteHolder(*lock) {}
1229 explicit WriteHolder(SharedMutexImpl& lock) : lock_(&lock) {
1233 // Promotion from upgrade mode
1234 explicit WriteHolder(UpgradeHolder&& upgrade) : lock_(upgrade.lock_) {
1235 assert(upgrade.lock_ != nullptr);
1236 upgrade.lock_ = nullptr;
1237 lock_->unlock_upgrade_and_lock();
1242 // It is intended that WriteHolder(ReadHolder&& rhs) do not exist.
1244 // Shared locks (read) can not safely upgrade to unique locks (write).
1245 // That upgrade path is a well-known recipe for deadlock, so we explicitly
1248 // If you need to do a conditional mutation, you have a few options:
1249 // 1. Check the condition under a shared lock and release it.
1250 // Then maybe check the condition again under a unique lock and maybe do
1252 // 2. Check the condition once under an upgradeable lock.
1253 // Then maybe upgrade the lock to a unique lock and do the mutation.
1254 // 3. Check the condition and maybe perform the mutation under a unique
1257 // Relevant upgradeable lock notes:
1258 // * At most one upgradeable lock can be held at a time for a given shared
1259 // mutex, just like a unique lock.
1260 // * An upgradeable lock may be held concurrently with any number of shared
1262 // * An upgradeable lock may be upgraded atomically to a unique lock.
1264 WriteHolder(WriteHolder&& rhs) noexcept : lock_(rhs.lock_) {
1265 rhs.lock_ = nullptr;
1268 WriteHolder& operator=(WriteHolder&& rhs) noexcept {
1269 std::swap(lock_, rhs.lock_);
1273 WriteHolder(const WriteHolder& rhs) = delete;
1274 WriteHolder& operator=(const WriteHolder& rhs) = delete;
1288 friend class ReadHolder;
1289 friend class UpgradeHolder;
1290 SharedMutexImpl* lock_;
1293 // Adapters for Synchronized<>
1294 friend void acquireRead(SharedMutexImpl& lock) { lock.lock_shared(); }
1295 friend void acquireReadWrite(SharedMutexImpl& lock) { lock.lock(); }
1296 friend void releaseRead(SharedMutexImpl& lock) { lock.unlock_shared(); }
1297 friend void releaseReadWrite(SharedMutexImpl& lock) { lock.unlock(); }
1298 friend bool acquireRead(SharedMutexImpl& lock, unsigned int ms) {
1299 return lock.try_lock_shared_for(std::chrono::milliseconds(ms));
1301 friend bool acquireReadWrite(SharedMutexImpl& lock, unsigned int ms) {
1302 return lock.try_lock_for(std::chrono::milliseconds(ms));
1306 typedef SharedMutexImpl<true> SharedMutexReadPriority;
1307 typedef SharedMutexImpl<false> SharedMutexWritePriority;
1308 typedef SharedMutexWritePriority SharedMutex;
1310 // Prevent the compiler from instantiating these in other translation units.
1311 // They are instantiated once in SharedMutex.cpp
1312 extern template class SharedMutexImpl<true>;
1313 extern template class SharedMutexImpl<false>;
1316 bool ReaderPriority,
1318 template <typename> class Atom,
1319 bool BlockImmediately>
1320 typename SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1322 SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1323 deferredReaders[kMaxDeferredReaders * kDeferredSeparationFactor] =
1327 bool ReaderPriority,
1329 template <typename> class Atom,
1330 bool BlockImmediately>
1332 SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1333 tls_lastTokenlessSlot = 0;
1336 bool ReaderPriority,
1338 template <typename> class Atom,
1339 bool BlockImmediately>
1340 bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1341 tryUnlockTokenlessSharedDeferred() {
1342 auto bestSlot = tls_lastTokenlessSlot;
1343 for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
1344 auto slotPtr = deferredReader(bestSlot ^ i);
1345 auto slotValue = slotPtr->load(std::memory_order_relaxed);
1346 if (slotValue == tokenlessSlotValue() &&
1347 slotPtr->compare_exchange_strong(slotValue, 0)) {
1348 tls_lastTokenlessSlot = bestSlot ^ i;
1356 bool ReaderPriority,
1358 template <typename> class Atom,
1359 bool BlockImmediately>
1360 template <class WaitContext>
1361 bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
1362 lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) {
1364 if (UNLIKELY((state & kHasE) != 0) &&
1365 !waitForZeroBits(state, kHasE, kWaitingS, ctx) && ctx.canTimeOut()) {
1370 uintptr_t slotValue = 1; // any non-zero value will do
1372 bool canAlreadyDefer = (state & kMayDefer) != 0;
1373 bool aboveDeferThreshold =
1374 (state & kHasS) >= (kNumSharedToStartDeferring - 1) * kIncrHasS;
1375 bool drainInProgress = ReaderPriority && (state & kBegunE) != 0;
1376 if (canAlreadyDefer || (aboveDeferThreshold && !drainInProgress)) {
1377 // starting point for our empty-slot search, can change after
1378 // calling waitForZeroBits
1380 (uint32_t)folly::detail::AccessSpreader<Atom>::current(
1381 kMaxDeferredReaders);
1383 // deferred readers are already enabled, or it is time to
1384 // enable them if we can find a slot
1385 for (uint32_t i = 0; i < kDeferredSearchDistance; ++i) {
1386 slot = bestSlot ^ i;
1387 assert(slot < kMaxDeferredReaders);
1388 slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
1389 if (slotValue == 0) {
1396 if (slotValue != 0) {
1397 // not yet deferred, or no empty slots
1398 if (state_.compare_exchange_strong(state, state + kIncrHasS)) {
1399 // successfully recorded the read lock inline
1400 if (token != nullptr) {
1401 token->type_ = Token::Type::INLINE_SHARED;
1405 // state is updated, try again
1409 // record that deferred readers might be in use if necessary
1410 if ((state & kMayDefer) == 0) {
1411 if (!state_.compare_exchange_strong(state, state | kMayDefer)) {
1412 // keep going if CAS failed because somebody else set the bit
1414 if ((state & (kHasE | kMayDefer)) != kMayDefer) {
1418 // state = state | kMayDefer;
1421 // try to use the slot
1422 bool gotSlot = deferredReader(slot)->compare_exchange_strong(
1424 token == nullptr ? tokenlessSlotValue() : tokenfulSlotValue());
1426 // If we got the slot, we need to verify that an exclusive lock
1427 // didn't happen since we last checked. If we didn't get the slot we
1428 // need to recheck state_ anyway to make sure we don't waste too much
1429 // work. It is also possible that since we checked state_ someone
1430 // has acquired and released the write lock, clearing kMayDefer.
1431 // Both cases are covered by looking for the readers-possible bit,
1432 // because it is off when the exclusive lock bit is set.
1433 state = state_.load(std::memory_order_acquire);
1439 if (token == nullptr) {
1440 tls_lastTokenlessSlot = slot;
1443 if ((state & kMayDefer) != 0) {
1444 assert((state & kHasE) == 0);
1446 if (token != nullptr) {
1447 token->type_ = Token::Type::DEFERRED_SHARED;
1448 token->slot_ = (uint16_t)slot;
1453 // release the slot before retrying
1454 if (token == nullptr) {
1455 // We can't rely on slot. Token-less slot values can be freed by
1456 // any unlock_shared(), so we need to do the full deferredReader
1457 // search during unlock. Unlike unlock_shared(), we can't trust
1458 // kPrevDefer here. This deferred lock isn't visible to lock()
1459 // (that's the whole reason we're undoing it) so there might have
1460 // subsequently been an unlock() and lock() with no intervening
1461 // transition to deferred mode.
1462 if (!tryUnlockTokenlessSharedDeferred()) {
1463 unlockSharedInline();
1466 if (!tryUnlockSharedDeferred(slot)) {
1467 unlockSharedInline();
1471 // We got here not because the lock was unavailable, but because
1472 // we lost a compare-and-swap. Try-lock is typically allowed to
1473 // have spurious failures, but there is no lock efficiency gain
1474 // from exploiting that freedom here.
1478 } // namespace folly