6 #include <cds/algo/backoff_strategy.h>
11 size_t BackoffTraits::lower_bound = 16;
12 size_t BackoffTraits::upper_bound = 1024;
14 // Forward declaration
19 std::atomic<mcs_node *> next;
20 std::atomic<int> gate;
30 // tail is null when lock is not held
31 std::atomic<mcs_node *> m_tail;
33 mcs_mutex() { m_tail.store(nullptr); }
34 ~mcs_mutex() { assert(m_tail.load() == nullptr); }
39 mcs_node m_node; // node held on the stack
41 // Call the wrapper (instrument every lock/unlock)
42 guard(mcs_mutex *t) : m_t(t) { t->lock(this); }
43 ~guard() { m_t->unlock(this); }
47 mcs_node *me = &(I->m_node);
50 // not published yet so relaxed :
51 me->next.store(nullptr, std::memory_order_relaxed);
52 me->gate.store(1, std::memory_order_relaxed);
54 // publish my node as the new tail :
55 mcs_node *pred = m_tail.exchange(me, std::memory_order_acq_rel);
56 if (pred != nullptr) {
58 // unlock of pred can see me in the tail before I fill next
60 // If this is relaxed, the store 0 to gate will be read before and
61 // that lock will never ends.
62 // publish me to previous lock-holder :
63 pred->next.store(me, std::memory_order_release);
65 // (*2) pred not touched any more
67 // now this is the spin -
68 // wait on predecessor setting my flag -
70 while (me->gate.load(std::memory_order_acquire)) {
76 void unlock(guard *I) {
77 mcs_node *me = &(I->m_node);
78 mcs_node *next = me->next.load(std::memory_order_acquire);
79 if (next == nullptr) {
80 mcs_node *tail_was_me = me;
82 // This was mo_acq_rel, which is stronger than necessary
83 if (m_tail.compare_exchange_strong(tail_was_me, nullptr,
84 std::memory_order_release)) {
85 // got null in tail, mutex is unlocked
89 // (*1) catch the race :
92 next = me->next.load(std::memory_order_acquire);
99 // (*2) - store to next must be done,
100 // so no locker can be viewing my node any more
102 next->gate.store(0, std::memory_order_release);
106 } // namespace cds_others