7 std::atomic<mcs_node *> next;
18 // tail is null when lock is not held
19 std::atomic<mcs_node *> m_tail;
25 ASSERT( m_tail.load() == NULL );
31 mcs_node m_node; // node held on the stack
33 guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
34 ~guard() { m_t->unlock(this); }
37 void lock(guard * I) {
38 mcs_node * me = &(I->m_node);
41 // not published yet so relaxed :
42 me->next.store(NULL, std::mo_relaxed );
43 me->gate.store(1, std::mo_relaxed );
45 // publish my node as the new tail :
46 mcs_node * pred = m_tail.exchange(me, std::mo_acq_rel);
49 // unlock of pred can see me in the tail before I fill next
51 // publish me to previous lock-holder :
52 pred->next.store(me, std::mo_release );
54 // (*2) pred not touched any more
56 // now this is the spin -
57 // wait on predecessor setting my flag -
58 rl::linear_backoff bo;
59 while ( me->gate.load(std::mo_acquire) ) {
65 void unlock(guard * I) {
66 mcs_node * me = &(I->m_node);
68 mcs_node * next = me->next.load(std::mo_acquire);
71 mcs_node * tail_was_me = me;
72 if ( m_tail.compare_exchange_strong(
73 tail_was_me,NULL,std::mo_release) ) {
74 // got null in tail, mutex is unlocked
78 // (*1) catch the race :
79 rl::linear_backoff bo;
81 next = me->next.load(std::mo_acquire);
88 // (*2) - store to next must be done,
89 // so no locker can be viewing my node any more
92 next->gate.store( 0, std::mo_release );