From d383a1167cca0599b5ca9053f4f5ade55a7b50a5 Mon Sep 17 00:00:00 2001 From: Orvid King Date: Fri, 31 Jul 2015 12:43:41 -0700 Subject: [PATCH] Generalize FOLLY_SPIN_WAIT to use Portability methods Summary: Improves MSVC support. Closes #274 Note that this diff has been heavily modified from @Orvid's original PR by @yfeldblum and @sgolemon Reviewed By: @yfeldblum Differential Revision: D2284035 Pulled By: @sgolemon --- folly/AtomicHashArray-inl.h | 17 +++++++-------- folly/AtomicHashMap-inl.h | 8 +++---- folly/detail/AtomicHashUtils.h | 38 +++++++++++++++++++--------------- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h index d455bbce..6ab54850 100644 --- a/folly/AtomicHashArray-inl.h +++ b/folly/AtomicHashArray-inl.h @@ -114,10 +114,11 @@ insertInternal(KeyT key_in, T&& value) { // another thread now does ++numPendingEntries_, we expect it // to pass the isFull_.load() test above. (It shouldn't insert // a new entry.) - FOLLY_SPIN_WAIT( - isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS - && numPendingEntries_.readFull() != 0 - ); + detail::atomic_hash_spin_wait([&] { + return + (isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS) && + (numPendingEntries_.readFull() != 0); + }); isFull_.store(NO_PENDING_INSERTS, std::memory_order_release); if (relaxedLoadKey(*cell) == kEmptyKey_) { @@ -161,9 +162,9 @@ insertInternal(KeyT key_in, T&& value) { } DCHECK(relaxedLoadKey(*cell) != kEmptyKey_); if (kLockedKey_ == acquireLoadKey(*cell)) { - FOLLY_SPIN_WAIT( - kLockedKey_ == acquireLoadKey(*cell) - ); + detail::atomic_hash_spin_wait([&] { + return kLockedKey_ == acquireLoadKey(*cell); + }); } const KeyT thisKey = acquireLoadKey(*cell); @@ -397,5 +398,3 @@ struct AtomicHashArray::aha_iterator }; // aha_iterator } // namespace folly - -#undef FOLLY_SPIN_WAIT diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index 4752cdd3..4f7c6a12 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -130,9 +130,9 @@ insertInternal(key_type key, T&& value) { } else { // If we lost the race, we'll have to wait for the next map to get // allocated before doing any insertion here. - FOLLY_SPIN_WAIT( - nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire) - ); + detail::atomic_hash_spin_wait([&] { + return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire); + }); } // Relaxed is ok here because either we just created this map, or we @@ -427,5 +427,3 @@ struct AtomicHashMap::ahm_iterator }; // ahm_iterator } // namespace folly - -#undef FOLLY_SPIN_WAIT diff --git a/folly/detail/AtomicHashUtils.h b/folly/detail/AtomicHashUtils.h index a896ba3a..ebc96562 100644 --- a/folly/detail/AtomicHashUtils.h +++ b/folly/detail/AtomicHashUtils.h @@ -14,25 +14,29 @@ * limitations under the License. */ +#ifndef incl_FOLLY_ATOMIC_HASH_UTILS_H +#define incl_FOLLY_ATOMIC_HASH_UTILS_H + +#include +#include + // Some utilities used by AtomicHashArray and AtomicHashMap // -// Note: no include guard; different -inl.h files include this and -// undef it more than once in a translation unit. -// override-include-guard -#if !(defined(__x86__) || defined(__i386__) || FOLLY_X64) -#define FOLLY_SPIN_WAIT(condition) \ - for (int counter = 0; condition; ++counter) { \ - if (counter < 10000) continue; \ - pthread_yield(); \ - } -#else -#define FOLLY_SPIN_WAIT(condition) \ - for (int counter = 0; condition; ++counter) { \ - if (counter < 10000) { \ - asm volatile("pause"); \ - continue; \ - } \ - pthread_yield(); \ +namespace folly { namespace detail { + +template +void atomic_hash_spin_wait(Cond condition) { + constexpr size_t kPauseLimit = 10000; + for (size_t i = 0; condition(); ++i) { + if (i < kPauseLimit) { + folly::asm_pause(); + } else { + std::this_thread::yield(); + } } +} + +}} // namespace folly::detail + #endif -- 2.34.1