2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * N.B. You most likely do _not_ want to use RWSpinLock or any other
19 * kind of spinlock. Use SharedMutex instead.
21 * In short, spinlocks in preemptive multi-tasking operating systems
22 * have serious problems and fast mutexes like SharedMutex are almost
23 * certainly the better choice, because letting the OS scheduler put a
24 * thread to sleep is better for system responsiveness and throughput
25 * than wasting a timeslice repeatedly querying a lock held by a
26 * thread that's blocked, and you can't prevent userspace
29 * Spinlocks in an operating system kernel make much more sense than
30 * they do in userspace.
32 * -------------------------------------------------------------------
34 * Two Read-Write spin lock implementations.
36 * Ref: http://locklessinc.com/articles/locks
38 * Both locks here are faster than pthread_rwlock and have very low
39 * overhead (usually 20-30ns). They don't use any system mutexes and
40 * are very compact (4/8 bytes), so are suitable for per-instance
41 * based locking, particularly when contention is not expected.
43 * For a spinlock, RWSpinLock is a reasonable choice. (See the note
44 * about for why a spin lock is frequently a bad idea generally.)
45 * RWSpinLock has minimal overhead, and comparable contention
46 * performance when the number of competing threads is less than or
47 * equal to the number of logical CPUs. Even as the number of
48 * threads gets larger, RWSpinLock can still be very competitive in
49 * READ, although it is slower on WRITE, and also inherently unfair
52 * RWTicketSpinLock shows more balanced READ/WRITE performance. If
53 * your application really needs a lot more threads, and a
54 * higher-priority writer, prefer one of the RWTicketSpinLock locks.
58 * RWTicketSpinLock locks can only be used with GCC on x86/x86-64
61 * RWTicketSpinLock<32> only allows up to 2^8 - 1 concurrent
62 * readers and writers.
64 * RWTicketSpinLock<64> only allows up to 2^16 - 1 concurrent
65 * readers and writers.
67 * RWTicketSpinLock<..., true> (kFavorWriter = true, that is, strict
68 * writer priority) is NOT reentrant, even for lock_shared().
70 * The lock will not grant any new shared (read) accesses while a thread
71 * attempting to acquire the lock in write mode is blocked. (That is,
72 * if the lock is held in shared mode by N threads, and a thread attempts
73 * to acquire it in write mode, no one else can acquire it in shared mode
74 * until these N threads release the lock and then the blocked thread
75 * acquires and releases the exclusive lock.) This also applies for
76 * attempts to reacquire the lock in shared mode by threads that already
77 * hold it in shared mode, making the lock non-reentrant.
79 * RWSpinLock handles 2^30 - 1 concurrent readers.
81 * @author Xin Liu <xliux@fb.com>
87 ========================================================================
88 Benchmark on (Intel(R) Xeon(R) CPU L5630 @ 2.13GHz) 8 cores(16 HTs)
89 ========================================================================
91 ------------------------------------------------------------------------------
92 1. Single thread benchmark (read/write lock + unlock overhead)
93 Benchmark Iters Total t t/iter iter/sec
94 -------------------------------------------------------------------------------
95 * BM_RWSpinLockRead 100000 1.786 ms 17.86 ns 53.4M
96 +30.5% BM_RWSpinLockWrite 100000 2.331 ms 23.31 ns 40.91M
97 +85.7% BM_RWTicketSpinLock32Read 100000 3.317 ms 33.17 ns 28.75M
98 +96.0% BM_RWTicketSpinLock32Write 100000 3.5 ms 35 ns 27.25M
99 +85.6% BM_RWTicketSpinLock64Read 100000 3.315 ms 33.15 ns 28.77M
100 +96.0% BM_RWTicketSpinLock64Write 100000 3.5 ms 35 ns 27.25M
101 +85.7% BM_RWTicketSpinLock32FavorWriterRead 100000 3.317 ms 33.17 ns 28.75M
102 +29.7% BM_RWTicketSpinLock32FavorWriterWrite 100000 2.316 ms 23.16 ns 41.18M
103 +85.3% BM_RWTicketSpinLock64FavorWriterRead 100000 3.309 ms 33.09 ns 28.82M
104 +30.2% BM_RWTicketSpinLock64FavorWriterWrite 100000 2.325 ms 23.25 ns 41.02M
105 + 175% BM_PThreadRWMutexRead 100000 4.917 ms 49.17 ns 19.4M
106 + 166% BM_PThreadRWMutexWrite 100000 4.757 ms 47.57 ns 20.05M
108 ------------------------------------------------------------------------------
109 2. Contention Benchmark 90% read 10% write
110 Benchmark hits average min max sigma
111 ------------------------------------------------------------------------------
112 ---------- 8 threads ------------
113 RWSpinLock Write 142666 220ns 78ns 40.8us 269ns
114 RWSpinLock Read 1282297 222ns 80ns 37.7us 248ns
115 RWTicketSpinLock Write 85692 209ns 71ns 17.9us 252ns
116 RWTicketSpinLock Read 769571 215ns 78ns 33.4us 251ns
117 pthread_rwlock_t Write 84248 2.48us 99ns 269us 8.19us
118 pthread_rwlock_t Read 761646 933ns 101ns 374us 3.25us
120 ---------- 16 threads ------------
121 RWSpinLock Write 124236 237ns 78ns 261us 801ns
122 RWSpinLock Read 1115807 236ns 78ns 2.27ms 2.17us
123 RWTicketSpinLock Write 81781 231ns 71ns 31.4us 351ns
124 RWTicketSpinLock Read 734518 238ns 78ns 73.6us 379ns
125 pthread_rwlock_t Write 83363 7.12us 99ns 785us 28.1us
126 pthread_rwlock_t Read 754978 2.18us 101ns 1.02ms 14.3us
128 ---------- 50 threads ------------
129 RWSpinLock Write 131142 1.37us 82ns 7.53ms 68.2us
130 RWSpinLock Read 1181240 262ns 78ns 6.62ms 12.7us
131 RWTicketSpinLock Write 83045 397ns 73ns 7.01ms 31.5us
132 RWTicketSpinLock Read 744133 386ns 78ns 11ms 31.4us
133 pthread_rwlock_t Write 80849 112us 103ns 4.52ms 263us
134 pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us
138 #include <folly/Portability.h>
139 #include <folly/portability/Asm.h>
141 #if defined(__GNUC__) && (defined(__i386) || FOLLY_X64 || defined(ARCH_K8))
142 #define RW_SPINLOCK_USE_X86_INTRINSIC_
143 #include <x86intrin.h>
144 #elif defined(_MSC_VER) && defined(FOLLY_X64)
145 #define RW_SPINLOCK_USE_X86_INTRINSIC_
147 #undef RW_SPINLOCK_USE_X86_INTRINSIC_
150 // iOS doesn't define _mm_cvtsi64_si128 and friends
151 #if (FOLLY_SSE >= 2) && !FOLLY_MOBILE
152 #define RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
154 #undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
162 #include <glog/logging.h>
164 #include <folly/Likely.h>
169 * A simple, small (4-bytes), but unfair rwlock. Use it when you want
170 * a nice writer and don't expect a lot of write/read contention, or
171 * when you need small rwlocks since you are creating a large number
174 * Note that the unfairness here is extreme: if the lock is
175 * continually accessed for read, writers will never get a chance. If
176 * the lock can be that highly contended this class is probably not an
177 * ideal choice anyway.
179 * It currently implements most of the Lockable, SharedLockable and
180 * UpgradeLockable concepts except the TimedLockable related locking/unlocking
184 enum : int32_t { READER = 4, UPGRADED = 2, WRITER = 1 };
186 constexpr RWSpinLock() : bits_(0) {}
188 RWSpinLock(RWSpinLock const&) = delete;
189 RWSpinLock& operator=(RWSpinLock const&) = delete;
193 uint_fast32_t count = 0;
194 while (!LIKELY(try_lock())) {
195 if (++count > 1000) std::this_thread::yield();
199 // Writer is responsible for clearing up both the UPGRADED and WRITER bits.
201 static_assert(READER > WRITER + UPGRADED, "wrong bits!");
202 bits_.fetch_and(~(WRITER | UPGRADED), std::memory_order_release);
205 // SharedLockable Concept
207 uint_fast32_t count = 0;
208 while (!LIKELY(try_lock_shared())) {
209 if (++count > 1000) std::this_thread::yield();
213 void unlock_shared() {
214 bits_.fetch_add(-READER, std::memory_order_release);
217 // Downgrade the lock from writer status to reader status.
218 void unlock_and_lock_shared() {
219 bits_.fetch_add(READER, std::memory_order_acquire);
223 // UpgradeLockable Concept
224 void lock_upgrade() {
225 uint_fast32_t count = 0;
226 while (!try_lock_upgrade()) {
227 if (++count > 1000) std::this_thread::yield();
231 void unlock_upgrade() {
232 bits_.fetch_add(-UPGRADED, std::memory_order_acq_rel);
235 // unlock upgrade and try to acquire write lock
236 void unlock_upgrade_and_lock() {
238 while (!try_unlock_upgrade_and_lock()) {
239 if (++count > 1000) std::this_thread::yield();
243 // unlock upgrade and read lock atomically
244 void unlock_upgrade_and_lock_shared() {
245 bits_.fetch_add(READER - UPGRADED, std::memory_order_acq_rel);
248 // write unlock and upgrade lock atomically
249 void unlock_and_lock_upgrade() {
250 // need to do it in two steps here -- as the UPGRADED bit might be OR-ed at
251 // the same time when other threads are trying do try_lock_upgrade().
252 bits_.fetch_or(UPGRADED, std::memory_order_acquire);
253 bits_.fetch_add(-WRITER, std::memory_order_release);
257 // Attempt to acquire writer permission. Return false if we didn't get it.
260 return bits_.compare_exchange_strong(expect, WRITER,
261 std::memory_order_acq_rel);
264 // Try to get reader permission on the lock. This can fail if we
265 // find out someone is a writer or upgrader.
266 // Setting the UPGRADED bit would allow a writer-to-be to indicate
267 // its intention to write and block any new readers while waiting
268 // for existing readers to finish and release their read locks. This
269 // helps avoid starving writers (promoted from upgraders).
270 bool try_lock_shared() {
271 // fetch_add is considerably (100%) faster than compare_exchange,
272 // so here we are optimizing for the common (lock success) case.
273 int32_t value = bits_.fetch_add(READER, std::memory_order_acquire);
274 if (UNLIKELY(value & (WRITER|UPGRADED))) {
275 bits_.fetch_add(-READER, std::memory_order_release);
281 // try to unlock upgrade and write lock atomically
282 bool try_unlock_upgrade_and_lock() {
283 int32_t expect = UPGRADED;
284 return bits_.compare_exchange_strong(expect, WRITER,
285 std::memory_order_acq_rel);
288 // try to acquire an upgradable lock.
289 bool try_lock_upgrade() {
290 int32_t value = bits_.fetch_or(UPGRADED, std::memory_order_acquire);
292 // Note: when failed, we cannot flip the UPGRADED bit back,
293 // as in this case there is either another upgrade lock or a write lock.
294 // If it's a write lock, the bit will get cleared up when that lock's done
296 return ((value & (UPGRADED | WRITER)) == 0);
299 // mainly for debugging purposes.
300 int32_t bits() const { return bits_.load(std::memory_order_acquire); }
303 class UpgradedHolder;
308 explicit ReadHolder(RWSpinLock* lock) : lock_(lock) {
309 if (lock_) lock_->lock_shared();
312 explicit ReadHolder(RWSpinLock& lock) : lock_(&lock) {
313 lock_->lock_shared();
316 ReadHolder(ReadHolder&& other) noexcept : lock_(other.lock_) {
317 other.lock_ = nullptr;
321 explicit ReadHolder(UpgradedHolder&& upgraded) : lock_(upgraded.lock_) {
322 upgraded.lock_ = nullptr;
323 if (lock_) lock_->unlock_upgrade_and_lock_shared();
326 explicit ReadHolder(WriteHolder&& writer) : lock_(writer.lock_) {
327 writer.lock_ = nullptr;
328 if (lock_) lock_->unlock_and_lock_shared();
331 ReadHolder& operator=(ReadHolder&& other) {
333 swap(lock_, other.lock_);
337 ReadHolder(const ReadHolder& other) = delete;
338 ReadHolder& operator=(const ReadHolder& other) = delete;
340 ~ReadHolder() { if (lock_) lock_->unlock_shared(); }
342 void reset(RWSpinLock* lock = nullptr) {
343 if (lock == lock_) return;
344 if (lock_) lock_->unlock_shared();
346 if (lock_) lock_->lock_shared();
349 void swap(ReadHolder* other) {
350 std::swap(lock_, other->lock_);
354 friend class UpgradedHolder;
355 friend class WriteHolder;
359 class UpgradedHolder {
361 explicit UpgradedHolder(RWSpinLock* lock) : lock_(lock) {
362 if (lock_) lock_->lock_upgrade();
365 explicit UpgradedHolder(RWSpinLock& lock) : lock_(&lock) {
366 lock_->lock_upgrade();
369 explicit UpgradedHolder(WriteHolder&& writer) {
370 lock_ = writer.lock_;
371 writer.lock_ = nullptr;
372 if (lock_) lock_->unlock_and_lock_upgrade();
375 UpgradedHolder(UpgradedHolder&& other) noexcept : lock_(other.lock_) {
376 other.lock_ = nullptr;
379 UpgradedHolder& operator =(UpgradedHolder&& other) {
381 swap(lock_, other.lock_);
385 UpgradedHolder(const UpgradedHolder& other) = delete;
386 UpgradedHolder& operator =(const UpgradedHolder& other) = delete;
388 ~UpgradedHolder() { if (lock_) lock_->unlock_upgrade(); }
390 void reset(RWSpinLock* lock = nullptr) {
391 if (lock == lock_) return;
392 if (lock_) lock_->unlock_upgrade();
394 if (lock_) lock_->lock_upgrade();
397 void swap(UpgradedHolder* other) {
399 swap(lock_, other->lock_);
403 friend class WriteHolder;
404 friend class ReadHolder;
410 explicit WriteHolder(RWSpinLock* lock) : lock_(lock) {
411 if (lock_) lock_->lock();
414 explicit WriteHolder(RWSpinLock& lock) : lock_(&lock) {
418 // promoted from an upgrade lock holder
419 explicit WriteHolder(UpgradedHolder&& upgraded) {
420 lock_ = upgraded.lock_;
421 upgraded.lock_ = nullptr;
422 if (lock_) lock_->unlock_upgrade_and_lock();
425 WriteHolder(WriteHolder&& other) noexcept : lock_(other.lock_) {
426 other.lock_ = nullptr;
429 WriteHolder& operator =(WriteHolder&& other) {
431 swap(lock_, other.lock_);
435 WriteHolder(const WriteHolder& other) = delete;
436 WriteHolder& operator =(const WriteHolder& other) = delete;
438 ~WriteHolder () { if (lock_) lock_->unlock(); }
440 void reset(RWSpinLock* lock = nullptr) {
441 if (lock == lock_) return;
442 if (lock_) lock_->unlock();
444 if (lock_) lock_->lock();
447 void swap(WriteHolder* other) {
449 swap(lock_, other->lock_);
453 friend class ReadHolder;
454 friend class UpgradedHolder;
459 std::atomic<int32_t> bits_;
463 #ifdef RW_SPINLOCK_USE_X86_INTRINSIC_
464 // A more balanced Read-Write spin lock implemented based on GCC intrinsics.
467 template <size_t kBitWidth> struct RWTicketIntTrait {
468 static_assert(kBitWidth == 32 || kBitWidth == 64,
469 "bit width has to be either 32 or 64 ");
473 struct RWTicketIntTrait<64> {
474 typedef uint64_t FullInt;
475 typedef uint32_t HalfInt;
476 typedef uint16_t QuarterInt;
478 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
479 static __m128i make128(const uint16_t v[4]) {
480 return _mm_set_epi16(0, 0, 0, 0,
481 short(v[3]), short(v[2]), short(v[1]), short(v[0]));
483 static inline __m128i fromInteger(uint64_t from) {
484 return _mm_cvtsi64_si128(int64_t(from));
486 static inline uint64_t toInteger(__m128i in) {
487 return uint64_t(_mm_cvtsi128_si64(in));
489 static inline uint64_t addParallel(__m128i in, __m128i kDelta) {
490 return toInteger(_mm_add_epi16(in, kDelta));
496 struct RWTicketIntTrait<32> {
497 typedef uint32_t FullInt;
498 typedef uint16_t HalfInt;
499 typedef uint8_t QuarterInt;
501 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
502 static __m128i make128(const uint8_t v[4]) {
507 char(v[3]), char(v[2]), char(v[1]), char(v[0]));
509 static inline __m128i fromInteger(uint32_t from) {
510 return _mm_cvtsi32_si128(int32_t(from));
512 static inline uint32_t toInteger(__m128i in) {
513 return uint32_t(_mm_cvtsi128_si32(in));
515 static inline uint32_t addParallel(__m128i in, __m128i kDelta) {
516 return toInteger(_mm_add_epi8(in, kDelta));
520 } // namespace detail
522 template <size_t kBitWidth, bool kFavorWriter = false>
523 class RWTicketSpinLockT {
524 typedef detail::RWTicketIntTrait<kBitWidth> IntTraitType;
525 typedef typename detail::RWTicketIntTrait<kBitWidth>::FullInt FullInt;
526 typedef typename detail::RWTicketIntTrait<kBitWidth>::HalfInt HalfInt;
527 typedef typename detail::RWTicketIntTrait<kBitWidth>::QuarterInt
531 constexpr RWTicket() : whole(0) {}
534 __extension__ struct {
541 private: // Some x64-specific utilities for atomic access to ticket.
542 template <class T> static T load_acquire(T* addr) {
543 T t = *addr; // acquire barrier
544 asm_volatile_memory();
549 static void store_release(T* addr, T v) {
550 asm_volatile_memory();
551 *addr = v; // release barrier
556 constexpr RWTicketSpinLockT() {}
558 RWTicketSpinLockT(RWTicketSpinLockT const&) = delete;
559 RWTicketSpinLockT& operator=(RWTicketSpinLockT const&) = delete;
563 writeLockAggressive();
570 * Both try_lock and try_lock_shared diverge in our implementation from the
571 * lock algorithm described in the link above.
573 * In the read case, it is undesirable that the readers could wait
574 * for another reader (before increasing ticket.read in the other
575 * implementation). Our approach gives up on
576 * first-come-first-serve, but our benchmarks showed improve
577 * performance for both readers and writers under heavily contended
578 * cases, particularly when the number of threads exceeds the number
581 * We have writeLockAggressive() using the original implementation
582 * for a writer, which gives some advantage to the writer over the
583 * readers---for that path it is guaranteed that the writer will
584 * acquire the lock after all the existing readers exit.
588 FullInt old = t.whole = load_acquire(&ticket.whole);
589 if (t.users != t.write) return false;
591 return __sync_bool_compare_and_swap(&ticket.whole, old, t.whole);
595 * Call this if you want to prioritize writer to avoid starvation.
596 * Unlike writeLockNice, immediately acquires the write lock when
597 * the existing readers (arriving before the writer) finish their
600 void writeLockAggressive() {
601 // std::this_thread::yield() is needed here to avoid a pathology if the number
602 // of threads attempting concurrent writes is >= the number of real
603 // cores allocated to this process. This is less likely than the
604 // corresponding situation in lock_shared(), but we still want to
606 uint_fast32_t count = 0;
607 QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
608 while (val != load_acquire(&ticket.write)) {
609 asm_volatile_pause();
610 if (UNLIKELY(++count > 1000)) std::this_thread::yield();
614 // Call this when the writer should be nicer to the readers.
615 void writeLockNice() {
616 // Here it doesn't cpu-relax the writer.
618 // This is because usually we have many more readers than the
619 // writers, so the writer has less chance to get the lock when
620 // there are a lot of competing readers. The aggressive spinning
621 // can help to avoid starving writers.
623 // We don't worry about std::this_thread::yield() here because the caller
624 // has already explicitly abandoned fairness.
625 while (!try_lock()) {}
628 // Atomically unlock the write-lock from writer and acquire the read-lock.
629 void unlock_and_lock_shared() {
630 QuarterInt val = __sync_fetch_and_add(&ticket.read, 1);
633 // Release writer permission on the lock.
636 t.whole = load_acquire(&ticket.whole);
637 FullInt old = t.whole;
639 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
640 // SSE2 can reduce the lock and unlock overhead by 10%
641 static const QuarterInt kDeltaBuf[4] = { 1, 1, 0, 0 }; // write/read/user
642 static const __m128i kDelta = IntTraitType::make128(kDeltaBuf);
643 __m128i m = IntTraitType::fromInteger(old);
644 t.whole = IntTraitType::addParallel(m, kDelta);
649 store_release(&ticket.readWrite, t.readWrite);
653 // std::this_thread::yield() is important here because we can't grab the
654 // shared lock if there is a pending writeLockAggressive, so we
655 // need to let threads that already have a shared lock complete
656 uint_fast32_t count = 0;
657 while (!LIKELY(try_lock_shared())) {
658 asm_volatile_pause();
659 if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield();
663 bool try_lock_shared() {
665 old.whole = t.whole = load_acquire(&ticket.whole);
666 old.users = old.read;
667 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
668 // SSE2 may reduce the total lock and unlock overhead by 10%
669 static const QuarterInt kDeltaBuf[4] = { 0, 1, 1, 0 }; // write/read/user
670 static const __m128i kDelta = IntTraitType::make128(kDeltaBuf);
671 __m128i m = IntTraitType::fromInteger(old.whole);
672 t.whole = IntTraitType::addParallel(m, kDelta);
677 return __sync_bool_compare_and_swap(&ticket.whole, old.whole, t.whole);
680 void unlock_shared() {
681 __sync_fetch_and_add(&ticket.write, 1);
686 typedef RWTicketSpinLockT<kBitWidth, kFavorWriter> RWSpinLock;
689 ReadHolder(ReadHolder const&) = delete;
690 ReadHolder& operator=(ReadHolder const&) = delete;
692 explicit ReadHolder(RWSpinLock* lock) : lock_(lock) {
693 if (lock_) lock_->lock_shared();
696 explicit ReadHolder(RWSpinLock &lock) : lock_ (&lock) {
697 if (lock_) lock_->lock_shared();
700 // atomically unlock the write-lock from writer and acquire the read-lock
701 explicit ReadHolder(WriteHolder *writer) : lock_(nullptr) {
702 std::swap(this->lock_, writer->lock_);
704 lock_->unlock_and_lock_shared();
709 if (lock_) lock_->unlock_shared();
712 void reset(RWSpinLock *lock = nullptr) {
713 if (lock_) lock_->unlock_shared();
715 if (lock_) lock_->lock_shared();
718 void swap(ReadHolder *other) {
719 std::swap(this->lock_, other->lock_);
728 WriteHolder(WriteHolder const&) = delete;
729 WriteHolder& operator=(WriteHolder const&) = delete;
731 explicit WriteHolder(RWSpinLock* lock) : lock_(lock) {
732 if (lock_) lock_->lock();
734 explicit WriteHolder(RWSpinLock &lock) : lock_ (&lock) {
735 if (lock_) lock_->lock();
739 if (lock_) lock_->unlock();
742 void reset(RWSpinLock *lock = nullptr) {
743 if (lock == lock_) return;
744 if (lock_) lock_->unlock();
746 if (lock_) lock_->lock();
749 void swap(WriteHolder *other) {
750 std::swap(this->lock_, other->lock_);
754 friend class ReadHolder;
759 typedef RWTicketSpinLockT<32> RWTicketSpinLock32;
760 typedef RWTicketSpinLockT<64> RWTicketSpinLock64;
762 #endif // RW_SPINLOCK_USE_X86_INTRINSIC_
766 #ifdef RW_SPINLOCK_USE_X86_INTRINSIC_
767 #undef RW_SPINLOCK_USE_X86_INTRINSIC_