6 #include <cds/algo/backoff_strategy.h>
11 // Forward declaration
16 std::atomic<mcs_node *> next;
17 std::atomic<int> gate;
27 // tail is null when lock is not held
28 std::atomic<mcs_node *> m_tail;
30 mcs_mutex() { m_tail.store(nullptr); }
31 ~mcs_mutex() { assert(m_tail.load() == nullptr); }
36 mcs_node m_node; // node held on the stack
38 // Call the wrapper (instrument every lock/unlock)
39 guard(mcs_mutex *t) : m_t(t) { t->lock(this); }
40 ~guard() { m_t->unlock(this); }
44 mcs_node *me = &(I->m_node);
47 // not published yet so relaxed :
48 me->next.store(nullptr, std::memory_order_relaxed);
49 me->gate.store(1, std::memory_order_relaxed);
51 // publish my node as the new tail :
52 mcs_node *pred = m_tail.exchange(me, std::memory_order_acq_rel);
53 if (pred != nullptr) {
55 // unlock of pred can see me in the tail before I fill next
57 // If this is relaxed, the store 0 to gate will be read before and
58 // that lock will never ends.
59 // publish me to previous lock-holder :
60 pred->next.store(me, std::memory_order_release);
62 // (*2) pred not touched any more
64 // now this is the spin -
65 // wait on predecessor setting my flag -
67 while (me->gate.load(std::memory_order_acquire)) {
73 void unlock(guard *I) {
74 mcs_node *me = &(I->m_node);
75 mcs_node *next = me->next.load(std::memory_order_acquire);
76 if (next == nullptr) {
77 mcs_node *tail_was_me = me;
79 // This was mo_acq_rel, which is stronger than necessary
80 if (m_tail.compare_exchange_strong(tail_was_me, nullptr,
81 std::memory_order_release)) {
82 // got null in tail, mutex is unlocked
86 // (*1) catch the race :
89 next = me->next.load(std::memory_order_acquire);
96 // (*2) - store to next must be done,
97 // so no locker can be viewing my node any more
99 next->gate.store(0, std::memory_order_release);
103 } // namespace cds_others