}
template <class WaitContext>
- bool lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) {
- while (true) {
- if (UNLIKELY((state & kHasE) != 0) &&
- !waitForZeroBits(state, kHasE, kWaitingS, ctx) && ctx.canTimeOut()) {
- return false;
- }
-
- uint32_t slot;
- uintptr_t slotValue = 1; // any non-zero value will do
-
- bool canAlreadyDefer = (state & kMayDefer) != 0;
- bool aboveDeferThreshold =
- (state & kHasS) >= (kNumSharedToStartDeferring - 1) * kIncrHasS;
- bool drainInProgress = ReaderPriority && (state & kBegunE) != 0;
- if (canAlreadyDefer || (aboveDeferThreshold && !drainInProgress)) {
- // starting point for our empty-slot search, can change after
- // calling waitForZeroBits
- uint32_t bestSlot =
- (uint32_t)folly::detail::AccessSpreader<Atom>::current(
- kMaxDeferredReaders);
-
- // deferred readers are already enabled, or it is time to
- // enable them if we can find a slot
- for (uint32_t i = 0; i < kDeferredSearchDistance; ++i) {
- slot = bestSlot ^ i;
- assert(slot < kMaxDeferredReaders);
- slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
- if (slotValue == 0) {
- // found empty slot
- break;
- }
- }
- }
-
- if (slotValue != 0) {
- // not yet deferred, or no empty slots
- if (state_.compare_exchange_strong(state, state + kIncrHasS)) {
- // successfully recorded the read lock inline
- if (token != nullptr) {
- token->type_ = Token::Type::INLINE_SHARED;
- }
- return true;
- }
- // state is updated, try again
- continue;
- }
-
- // record that deferred readers might be in use if necessary
- if ((state & kMayDefer) == 0) {
- if (!state_.compare_exchange_strong(state, state | kMayDefer)) {
- // keep going if CAS failed because somebody else set the bit
- // for us
- if ((state & (kHasE | kMayDefer)) != kMayDefer) {
- continue;
- }
- }
- // state = state | kMayDefer;
- }
-
- // try to use the slot
- bool gotSlot = deferredReader(slot)->compare_exchange_strong(
- slotValue,
- token == nullptr ? tokenlessSlotValue() : tokenfulSlotValue());
-
- // If we got the slot, we need to verify that an exclusive lock
- // didn't happen since we last checked. If we didn't get the slot we
- // need to recheck state_ anyway to make sure we don't waste too much
- // work. It is also possible that since we checked state_ someone
- // has acquired and released the write lock, clearing kMayDefer.
- // Both cases are covered by looking for the readers-possible bit,
- // because it is off when the exclusive lock bit is set.
- state = state_.load(std::memory_order_acquire);
-
- if (!gotSlot) {
- continue;
- }
-
- if (token == nullptr) {
- tls_lastTokenlessSlot = slot;
- }
-
- if ((state & kMayDefer) != 0) {
- assert((state & kHasE) == 0);
- // success
- if (token != nullptr) {
- token->type_ = Token::Type::DEFERRED_SHARED;
- token->slot_ = (uint16_t)slot;
- }
- return true;
- }
-
- // release the slot before retrying
- if (token == nullptr) {
- // We can't rely on slot. Token-less slot values can be freed by
- // any unlock_shared(), so we need to do the full deferredReader
- // search during unlock. Unlike unlock_shared(), we can't trust
- // kPrevDefer here. This deferred lock isn't visible to lock()
- // (that's the whole reason we're undoing it) so there might have
- // subsequently been an unlock() and lock() with no intervening
- // transition to deferred mode.
- if (!tryUnlockTokenlessSharedDeferred()) {
- unlockSharedInline();
- }
- } else {
- if (!tryUnlockSharedDeferred(slot)) {
- unlockSharedInline();
- }
- }
-
- // We got here not because the lock was unavailable, but because
- // we lost a compare-and-swap. Try-lock is typically allowed to
- // have spurious failures, but there is no lock efficiency gain
- // from exploiting that freedom here.
- }
- }
+ bool lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx);
// Updates the state in/out argument as if the locks were made inline,
// but does not update state_
}
}
- bool tryUnlockTokenlessSharedDeferred() {
- auto bestSlot = tls_lastTokenlessSlot;
- for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
- auto slotPtr = deferredReader(bestSlot ^ i);
- auto slotValue = slotPtr->load(std::memory_order_relaxed);
- if (slotValue == tokenlessSlotValue() &&
- slotPtr->compare_exchange_strong(slotValue, 0)) {
- tls_lastTokenlessSlot = bestSlot ^ i;
- return true;
- }
- }
- return false;
- }
+ bool tryUnlockTokenlessSharedDeferred();
bool tryUnlockSharedDeferred(uint32_t slot) {
assert(slot < kMaxDeferredReaders);
}
};
+typedef SharedMutexImpl<true> SharedMutexReadPriority;
+typedef SharedMutexImpl<false> SharedMutexWritePriority;
+typedef SharedMutexWritePriority SharedMutex;
+
+// Prevent the compiler from instantiating these in other translation units.
+// They are instantiated once in SharedMutex.cpp
+extern template class SharedMutexImpl<true>;
+extern template class SharedMutexImpl<false>;
+
template <
bool ReaderPriority,
typename Tag_,
SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
tls_lastTokenlessSlot = 0;
-typedef SharedMutexImpl<true> SharedMutexReadPriority;
-typedef SharedMutexImpl<false> SharedMutexWritePriority;
-typedef SharedMutexWritePriority SharedMutex;
+template <
+ bool ReaderPriority,
+ typename Tag_,
+ template <typename> class Atom,
+ bool BlockImmediately>
+bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
+ tryUnlockTokenlessSharedDeferred() {
+ auto bestSlot = tls_lastTokenlessSlot;
+ for (uint32_t i = 0; i < kMaxDeferredReaders; ++i) {
+ auto slotPtr = deferredReader(bestSlot ^ i);
+ auto slotValue = slotPtr->load(std::memory_order_relaxed);
+ if (slotValue == tokenlessSlotValue() &&
+ slotPtr->compare_exchange_strong(slotValue, 0)) {
+ tls_lastTokenlessSlot = bestSlot ^ i;
+ return true;
+ }
+ }
+ return false;
+}
-// Prevent the compiler from instantiating these in other translation units.
-// They are instantiated once in SharedMutex.cpp
-extern template class SharedMutexImpl<true>;
-extern template class SharedMutexImpl<false>;
+template <
+ bool ReaderPriority,
+ typename Tag_,
+ template <typename> class Atom,
+ bool BlockImmediately>
+template <class WaitContext>
+bool SharedMutexImpl<ReaderPriority, Tag_, Atom, BlockImmediately>::
+ lockSharedImpl(uint32_t& state, Token* token, WaitContext& ctx) {
+ while (true) {
+ if (UNLIKELY((state & kHasE) != 0) &&
+ !waitForZeroBits(state, kHasE, kWaitingS, ctx) && ctx.canTimeOut()) {
+ return false;
+ }
+
+ uint32_t slot;
+ uintptr_t slotValue = 1; // any non-zero value will do
+
+ bool canAlreadyDefer = (state & kMayDefer) != 0;
+ bool aboveDeferThreshold =
+ (state & kHasS) >= (kNumSharedToStartDeferring - 1) * kIncrHasS;
+ bool drainInProgress = ReaderPriority && (state & kBegunE) != 0;
+ if (canAlreadyDefer || (aboveDeferThreshold && !drainInProgress)) {
+ // starting point for our empty-slot search, can change after
+ // calling waitForZeroBits
+ uint32_t bestSlot =
+ (uint32_t)folly::detail::AccessSpreader<Atom>::current(
+ kMaxDeferredReaders);
+
+ // deferred readers are already enabled, or it is time to
+ // enable them if we can find a slot
+ for (uint32_t i = 0; i < kDeferredSearchDistance; ++i) {
+ slot = bestSlot ^ i;
+ assert(slot < kMaxDeferredReaders);
+ slotValue = deferredReader(slot)->load(std::memory_order_relaxed);
+ if (slotValue == 0) {
+ // found empty slot
+ break;
+ }
+ }
+ }
+
+ if (slotValue != 0) {
+ // not yet deferred, or no empty slots
+ if (state_.compare_exchange_strong(state, state + kIncrHasS)) {
+ // successfully recorded the read lock inline
+ if (token != nullptr) {
+ token->type_ = Token::Type::INLINE_SHARED;
+ }
+ return true;
+ }
+ // state is updated, try again
+ continue;
+ }
+
+ // record that deferred readers might be in use if necessary
+ if ((state & kMayDefer) == 0) {
+ if (!state_.compare_exchange_strong(state, state | kMayDefer)) {
+ // keep going if CAS failed because somebody else set the bit
+ // for us
+ if ((state & (kHasE | kMayDefer)) != kMayDefer) {
+ continue;
+ }
+ }
+ // state = state | kMayDefer;
+ }
+
+ // try to use the slot
+ bool gotSlot = deferredReader(slot)->compare_exchange_strong(
+ slotValue,
+ token == nullptr ? tokenlessSlotValue() : tokenfulSlotValue());
+
+ // If we got the slot, we need to verify that an exclusive lock
+ // didn't happen since we last checked. If we didn't get the slot we
+ // need to recheck state_ anyway to make sure we don't waste too much
+ // work. It is also possible that since we checked state_ someone
+ // has acquired and released the write lock, clearing kMayDefer.
+ // Both cases are covered by looking for the readers-possible bit,
+ // because it is off when the exclusive lock bit is set.
+ state = state_.load(std::memory_order_acquire);
+
+ if (!gotSlot) {
+ continue;
+ }
+
+ if (token == nullptr) {
+ tls_lastTokenlessSlot = slot;
+ }
+
+ if ((state & kMayDefer) != 0) {
+ assert((state & kHasE) == 0);
+ // success
+ if (token != nullptr) {
+ token->type_ = Token::Type::DEFERRED_SHARED;
+ token->slot_ = (uint16_t)slot;
+ }
+ return true;
+ }
+
+ // release the slot before retrying
+ if (token == nullptr) {
+ // We can't rely on slot. Token-less slot values can be freed by
+ // any unlock_shared(), so we need to do the full deferredReader
+ // search during unlock. Unlike unlock_shared(), we can't trust
+ // kPrevDefer here. This deferred lock isn't visible to lock()
+ // (that's the whole reason we're undoing it) so there might have
+ // subsequently been an unlock() and lock() with no intervening
+ // transition to deferred mode.
+ if (!tryUnlockTokenlessSharedDeferred()) {
+ unlockSharedInline();
+ }
+ } else {
+ if (!tryUnlockSharedDeferred(slot)) {
+ unlockSharedInline();
+ }
+ }
+
+ // We got here not because the lock was unavailable, but because
+ // we lost a compare-and-swap. Try-lock is typically allowed to
+ // have spurious failures, but there is no lock efficiency gain
+ // from exploiting that freedom here.
+ }
+}
} // namespace folly