9 std::atomic<mcs_node *> next;
10 std::atomic<int> gate;
20 // tail is null when lock is not held
21 std::atomic<mcs_node *> m_tail;
27 //ASSERT( m_tail.load() == NULL );
30 // Each thread will have their own guard.
34 mcs_node m_node; // node held on the stack
36 guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
37 ~guard() { m_t->unlock(this); }
40 void lock(guard * I) {
41 mcs_node * me = &(I->m_node);
44 // not published yet so relaxed :
45 me->next.store(NULL, wildcard(1) ); // relaxed
46 me->gate.store(1, wildcard(2) ); // relaxed
48 // publish my node as the new tail :
49 mcs_node * pred = m_tail.exchange(me, wildcard(3)); // acq_rel
52 // unlock of pred can see me in the tail before I fill next
54 // publish me to previous lock-holder :
55 pred->next.store(me, wildcard(4) ); // release
57 // (*2) pred not touched any more
59 // now this is the spin -
60 // wait on predecessor setting my flag -
61 rl::linear_backoff bo;
63 while ( (my_gate = me->gate.load(wildcard(5))) ) { // acquire
69 void unlock(guard * I) {
70 mcs_node * me = &(I->m_node);
72 mcs_node * next = me->next.load(wildcard(6)); // acquire
75 mcs_node * tail_was_me = me;
77 // SCFence infers acq_rel / release !!!
78 // Could be release if wildcard(8) is acquire
79 if ( (success = m_tail.compare_exchange_strong(
80 tail_was_me,NULL,wildcard(7))) ) { // acq_rel
81 // got null in tail, mutex is unlocked
85 // (*1) catch the race :
86 rl::linear_backoff bo;
88 // SCFence infers relaxed / acquire!!!
89 // Could be relaxed if wildcard(7) is acq_rel
90 next = me->next.load(wildcard(8)); // acquire
97 // (*2) - store to next must be done,
98 // so no locker can be viewing my node any more
101 next->gate.store( 0, wildcard(9) ); // release