barrier with weaker inferences & notes in comment
[model-checker-benchmarks.git] / linuxrwlocks / linuxrwlocks-wildcard.h
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4 #include "wildcard.h" 
5
6 #include "librace.h"
7
8 #define RW_LOCK_BIAS            0x00100000
9 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
10
11 /** Example implementation of linux rw lock along with 2 thread test
12  *  driver... */
13
14 /**
15         Properties to check:
16         1. At most 1 thread can acquire the write lock, and at the same time, no
17                 other threads can acquire any lock (including read/write lock).
18         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
19         3. A read_unlock release 1 read lock, and a write_unlock release the write
20         lock. They can not release a lock that they don't acquire.
21         ###
22         4. Read_lock and write_lock can not be grabbed at the same time.
23         5. Happpens-before relationship should be checked and guaranteed, which
24         should be as the following:
25                 a. read_unlock hb-> write_lock
26                 b. write_unlock hb-> write_lock
27                 c. write_unlock hb-> read_lock
28 */
29
30 /**
31         Interesting point for locks:
32         a. If the users unlock() before any lock(), then the model checker will fail.
33         For this case, we can not say that the data structure is buggy, how can we
34         tell them from a real data structure bug???
35         b. We should specify that for a specific thread, successful locks and
36         unlocks should always come in pairs. We could make this check as an
37         auxiliary check since it is an extra rule for how the interfaces should called.
38 */
39
40 /**
41         @Begin
42         @Global_define:
43                 bool __writer_lock_acquired = false;
44                 int __reader_lock_cnt = 0;
45         
46         @Happens_before:
47                 # Since commit_point_set has no ID attached, A -> B means that for any B,
48                 # the previous A happens before B.
49                 Read_Unlock -> Write_Lock
50                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
51                 
52                 Write_Unlock -> Write_Lock
53                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
54
55                 Write_Unlock -> Read_Lock
56                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
57         @End
58 */
59
60 /**
61         */
62
63 typedef union {
64         atomic_int lock;
65 } rwlock_t;
66
67 static inline int read_can_lock(rwlock_t *lock)
68 {
69         // relaxed
70         return atomic_load_explicit(&lock->lock, wildcard(1)) > 0;
71 }
72
73 static inline int write_can_lock(rwlock_t *lock)
74 {
75         // relaxed
76         return atomic_load_explicit(&lock->lock, wildcard(2)) == RW_LOCK_BIAS;
77 }
78
79
80 /**
81         @Begin
82         @Interface: Read_Lock
83         @Commit_point_set:
84                 Read_Lock_Success_1 | Read_Lock_Success_2
85         @Check:
86                 !__writer_lock_acquired
87         @Action:
88                 @Code:
89                 __reader_lock_cnt++;
90         @End
91 */
92 static inline void read_lock(rwlock_t *rw)
93 {
94         // acquire
95         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(3));
96         /**
97                 @Begin
98                 @Commit_point_define_check: __ATOMIC_RET__ > 0
99                 @Label:Read_Lock_Success_1
100                 @End
101         */
102         while (priorvalue <= 0) {
103                 // relaxed
104                 atomic_fetch_add_explicit(&rw->lock, 1, wildcard(4));
105                 // relaxed
106                 while (atomic_load_explicit(&rw->lock, wildcard(5)) <= 0) {
107                         thrd_yield();
108                 }
109                 // acquire
110                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(6));
111                 /**
112                         @Begin
113                         @Commit_point_define_check: __ATOMIC_RET__ > 0
114                         @Label:Read_Lock_Success_2
115                         @End
116                 */
117         }
118 }
119
120
121 /**
122         @Begin
123         @Interface: Write_Lock
124         @Commit_point_set:
125                 Write_Lock_Success_1 | Write_Lock_Success_2
126         @Check:
127                 !__writer_lock_acquired && __reader_lock_cnt == 0
128         @Action:
129                 @Code:
130                 __writer_lock_acquired = true;
131         @End
132 */
133 static inline void write_lock(rwlock_t *rw)
134 {
135         // acquire
136         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(7));
137         /**
138                 @Begin
139                 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
140                 @Label: Write_Lock_Success_1
141                 @End
142         */
143         while (priorvalue != RW_LOCK_BIAS) {
144                 // relaxed
145                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(8));
146                 // relaxed
147                 while (atomic_load_explicit(&rw->lock, wildcard(9)) != RW_LOCK_BIAS) {
148                         thrd_yield();
149                 }
150                 // acquire
151                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(10));
152                 /**
153                         @Begin
154                         @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
155                         @Label: Write_Lock_Success_2
156                         @End
157                 */
158         }
159 }
160
161 /**
162         @Begin
163         @Interface: Read_Trylock
164         @Commit_point_set:
165                 Read_Trylock_Point
166         @Condition:
167                 __writer_lock_acquired == false
168         @HB_condition:
169                 HB_Read_Trylock_Succ :: __RET__ == 1
170         @Action:
171                 @Code:
172                 if (COND_SAT)
173                         __reader_lock_cnt++;
174         @Post_check:
175                 COND_SAT ? __RET__ == 1 : __RET__ == 0
176         @End
177 */
178 static inline int read_trylock(rwlock_t *rw)
179 {
180         // acquire
181         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(11));
182         /**
183                 @Begin
184                 @Commit_point_define_check: true
185                 @Label:Read_Trylock_Point
186                 @End
187         */
188         if (priorvalue > 0)
189                 return 1;
190         
191         // relaxed
192         atomic_fetch_add_explicit(&rw->lock, 1, wildcard(12));
193         return 0;
194 }
195
196 /**
197         @Begin
198         @Interface: Write_Trylock
199         @Commit_point_set:
200                 Write_Trylock_Point
201         @Condition:
202                 !__writer_lock_acquired && __reader_lock_cnt == 0
203         @HB_condition:
204                 HB_Write_Trylock_Succ :: __RET__ == 1
205         @Action:
206                 @Code:
207                 if (COND_SAT)
208                         __writer_lock_acquired = true;
209         @Post_check:
210                 COND_SAT ? __RET__ == 1 : __RET__ == 0
211         @End
212 */
213 static inline int write_trylock(rwlock_t *rw)
214 {
215         // acquire
216         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(13));
217         /**
218                 @Begin
219                 @Commit_point_define_check: true
220                 @Label: Write_Trylock_Point
221                 @End
222         */
223         if (priorvalue == RW_LOCK_BIAS)
224                 return 1;
225
226         // relaxed
227         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(14));
228         return 0;
229 }
230
231 /**
232         @Begin
233         @Interface: Read_Unlock
234         @Commit_point_set: Read_Unlock_Point
235         @Check:
236                 __reader_lock_cnt > 0 && !__writer_lock_acquired
237         @Action: 
238                 @Code:
239                 reader_lock_cnt--;
240         @End
241 */
242 static inline void read_unlock(rwlock_t *rw)
243 {
244         // release
245         atomic_fetch_add_explicit(&rw->lock, 1, wildcard(15));
246         /**
247                 @Begin
248                 @Commit_point_define_check: true
249                 @Label: Read_Unlock_Point
250                 @End
251         */
252 }
253
254 /**
255         @Begin
256         @Interface: Write_Unlock
257         @Commit_point_set: Write_Unlock_Point
258         @Check:
259                 reader_lock_cnt == 0 && writer_lock_acquired
260         @Action: 
261                 @Code:
262                 __writer_lock_acquired = false;
263         @End
264 */
265
266 static inline void write_unlock(rwlock_t *rw)
267 {
268         // release
269         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(16));
270         /**
271                 @Begin
272                 @Commit_point_define_check: true
273                 @Label: Write_Unlock_Point
274                 @End
275         */
276 }