add seqlock
[model-checker-benchmarks.git] / mcs-lock / mcs-lock-wildcard.h
1 // mcs on stack
2
3 #include <stdatomic.h>
4 #include <unrelacy.h>
5 #include "wildcard.h" 
6
7
8 struct mcs_node {
9         std::atomic<mcs_node *> next;
10         std::atomic<int> gate;
11
12         mcs_node() {
13                 next.store(0);
14                 gate.store(0);
15         }
16 };
17
18 struct mcs_mutex {
19 public:
20         // tail is null when lock is not held
21         std::atomic<mcs_node *> m_tail;
22
23         mcs_mutex() {
24                 m_tail.store( NULL);
25         }
26         ~mcs_mutex() {
27                 //ASSERT( m_tail.load() == NULL );
28         }
29
30         // Each thread will have their own guard.
31         class guard {
32         public:
33                 mcs_mutex * m_t;
34                 mcs_node    m_node; // node held on the stack
35
36                 guard(mcs_mutex * t) : m_t(t) { t->lock(this); }
37                 ~guard() { m_t->unlock(this); }
38         };
39
40         void lock(guard * I) {
41                 mcs_node * me = &(I->m_node);
42
43                 // set up my node :
44                 // not published yet so relaxed :
45                 me->next.store(NULL, wildcard(1) ); // relaxed
46                 me->gate.store(1, wildcard(2) ); // relaxed
47
48                 // publish my node as the new tail :
49                 mcs_node * pred = m_tail.exchange(me, wildcard(3)); // acq_rel
50                 if ( pred != NULL ) {
51                         // (*1) race here
52                         // unlock of pred can see me in the tail before I fill next
53
54                         // publish me to previous lock-holder :
55                         pred->next.store(me, wildcard(4) ); // release
56
57                         // (*2) pred not touched any more       
58
59                         // now this is the spin -
60                         // wait on predecessor setting my flag -
61                         rl::linear_backoff bo;
62                         int my_gate = 1;
63                         while ( (my_gate = me->gate.load(wildcard(5))) ) { // acquire
64                                 thrd_yield();
65                         }
66                 }
67         }
68
69         void unlock(guard * I) {
70                 mcs_node * me = &(I->m_node);
71
72                 mcs_node * next = me->next.load(wildcard(6)); // acquire
73                 if ( next == NULL )
74                 {
75                         mcs_node * tail_was_me = me;
76                         bool success;
77                         // FIXME: SCFence infers acq_rel / release
78                         // Could be release if wildcard(8) is acquire
79                         if ( (success = m_tail.compare_exchange_strong(
80                                 tail_was_me,NULL,wildcard(7))) ) { // acq_rel
81                                 // got null in tail, mutex is unlocked
82                                 return;
83                         }
84
85                         // (*1) catch the race :
86                         rl::linear_backoff bo;
87                         for(;;) {
88                                 // FIXME: SCFence infers relaxed / acquire
89                                 // Could be relaxed if wildcard(7) is acq_rel
90                                 next = me->next.load(wildcard(8)); // acquire
91                                 if ( next != NULL )
92                                         break;
93                                 thrd_yield();
94                         }
95                 }
96
97                 // (*2) - store to next must be done,
98                 //  so no locker can be viewing my node any more        
99
100                 // let next guy in :
101                 next->gate.store( 0, wildcard(9) ); // release
102         }
103 };