7fb604c52d72259e2ee55c073cdf08b5c61456d9
[cdsspec-compiler.git] / benchmark / linuxrwlocks / linuxrwlocks.c
1 #include <stdio.h>
2 #include <threads.h>
3 #include <stdatomic.h>
4
5 #include "librace.h"
6
7 #define RW_LOCK_BIAS            0x00100000
8 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
9
10 /** Example implementation of linux rw lock along with 2 thread test
11  *  driver... */
12
13 /**
14         Properties to check:
15         1. At most 1 thread can acquire the write lock, and at the same time, no
16                 other threads can acquire any lock (including read/write lock).
17         2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
18         3. A read_unlock release 1 read lock, and a write_unlock release the write
19         lock. They can not release a lock that they don't acquire.
20         ###
21         4. Read_lock and write_lock can not be grabbed at the same time.
22         5. Happpens-before relationship should be checked and guaranteed, which
23         should be as the following:
24                 a. read_unlock hb-> write_lock
25                 b. write_unlock hb-> write_lock
26                 c. write_unlock hb-> read_lock
27 */
28
29 /**
30         Interesting point for locks:
31         a. If the users unlock() before any lock(), then the model checker will fail.
32         For this case, we can not say that the data structure is buggy, how can we
33         tell them from a real data structure bug???
34         b. We should specify that for a specific thread, successful locks and
35         unlocks should always come in pairs. We could make this check as an
36         auxiliary check since it is an extra rule for how the interfaces should called.
37 */
38
39 /**
40         @Begin
41         @Global_define:
42                 bool __writer_lock_acquired = false;
43                 int __reader_lock_cnt = 0;
44         
45         @Happens_before:
46                 # Since commit_point_set has no ID attached, A -> B means that for any B,
47                 # the previous A happens before B.
48                 Read_Unlock -> Write_Lock
49                 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
50                 
51                 Write_Unlock -> Write_Lock
52                 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
53
54                 Write_Unlock -> Read_Lock
55                 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
56         @End
57 */
58
59 /**
60         */
61
62 typedef union {
63         atomic_int lock;
64 } rwlock_t;
65
66 static inline int read_can_lock(rwlock_t *lock)
67 {
68         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
69 }
70
71 static inline int write_can_lock(rwlock_t *lock)
72 {
73         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
74 }
75
76
77 /**
78         @Begin
79         @Interface: Read_Lock
80         @Commit_point_set:
81                 Read_Lock_Success_1 | Read_Lock_Success_2
82         @Check:
83                 !__writer_lock_acquired
84         @Action:
85                 @Code:
86                 __reader_lock_cnt++;
87         @End
88 */
89 static inline void read_lock(rwlock_t *rw)
90 {
91         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
92         /**
93                 @Begin
94                 @Commit_point_define_check: __ATOMIC_RET__ > 0
95                 @Label:Read_Lock_Success_1
96                 @End
97         */
98         while (priorvalue <= 0) {
99                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
100                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
101                         thrd_yield();
102                 }
103                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
104                 /**
105                         @Begin
106                         @Commit_point_define_check: __ATOMIC_RET__ > 0
107                         @Label:Read_Lock_Success_2
108                         @End
109                 */
110         }
111 }
112
113
114 /**
115         @Begin
116         @Interface: Write_Lock
117         @Commit_point_set:
118                 Write_Lock_Success_1 | Write_Lock_Success_2
119         @Check:
120                 !__writer_lock_acquired && __reader_lock_cnt == 0
121         @Action:
122                 @Code:
123                 __writer_lock_acquired = true;
124         @End
125 */
126 static inline void write_lock(rwlock_t *rw)
127 {
128         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
129         /**
130                 @Begin
131                 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
132                 @Label: Write_Lock_Success_1
133                 @End
134         */
135         while (priorvalue != RW_LOCK_BIAS) {
136                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
137                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
138                         thrd_yield();
139                 }
140                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
141                 /**
142                         @Begin
143                         @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
144                         @Label: Write_Lock_Success_2
145                         @End
146                 */
147         }
148 }
149
150 /**
151         @Begin
152         @Interface: Read_Trylock
153         @Commit_point_set:
154                 Read_Trylock_Point
155         @Condition:
156                 __writer_lock_acquired == false
157         @HB_condition:
158                 HB_Read_Trylock_Succ :: __RET__ == 1
159         @Action:
160                 @Code:
161                 if (COND_SAT)
162                         __reader_lock_cnt++;
163         @Post_check:
164                 COND_SAT ? __RET__ == 1 : __RET__ == 0
165         @End
166 */
167 static inline int read_trylock(rwlock_t *rw)
168 {
169         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
170         /**
171                 @Begin
172                 @Commit_point_define_check: true
173                 @Label:Read_Trylock_Point
174                 @End
175         */
176         if (priorvalue > 0)
177                 return 1;
178
179         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
180         return 0;
181 }
182
183 /**
184         @Begin
185         @Interface: Write_Trylock
186         @Commit_point_set:
187                 Write_Trylock_Point
188         @Condition:
189                 !__writer_lock_acquired && __reader_lock_cnt == 0
190         @HB_condition:
191                 HB_Write_Trylock_Succ :: __RET__ == 1
192         @Action:
193                 @Code:
194                 if (COND_SAT)
195                         __writer_lock_acquired = true;
196         @Post_check:
197                 COND_SAT ? __RET__ == 1 : __RET__ == 0
198         @End
199 */
200 static inline int write_trylock(rwlock_t *rw)
201 {
202         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
203         /**
204                 @Begin
205                 @Commit_point_define_check: true
206                 @Label: Write_Trylock_Point
207                 @End
208         */
209         if (priorvalue == RW_LOCK_BIAS)
210                 return 1;
211
212         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
213         return 0;
214 }
215
216 /**
217         @Begin
218         @Interface: Read_Unlock
219         @Commit_point_set: Read_Unlock_Point
220         @Check:
221                 __reader_lock_cnt > 0 && !__writer_lock_acquired
222         @Action: 
223                 @Code:
224                 reader_lock_cnt--;
225         @End
226 */
227 static inline void read_unlock(rwlock_t *rw)
228 {
229         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
230         /**
231                 @Begin
232                 @Commit_point_define_check: true
233                 @Label: Read_Unlock_Point
234                 @End
235         */
236 }
237
238 /**
239         @Begin
240         @Interface: Write_Unlock
241         @Commit_point_set: Write_Unlock_Point
242         @Check:
243                 reader_lock_cnt == 0 && writer_lock_acquired
244         @Action: 
245                 @Code:
246                 __writer_lock_acquired = false;
247         @End
248 */
249
250 static inline void write_unlock(rwlock_t *rw)
251 {
252         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
253         /**
254                 @Begin
255                 @Commit_point_define_check: true
256                 @Label: Write_Unlock_Point
257                 @End
258         */
259 }
260
261 rwlock_t mylock;
262 int shareddata;
263
264 static void a(void *obj)
265 {
266         int i;
267         for(i = 0; i < 2; i++) {
268                 if ((i % 2) == 0) {
269                         read_lock(&mylock);
270                         load_32(&shareddata);
271                         read_unlock(&mylock);
272                 } else {
273                         write_lock(&mylock);
274                         store_32(&shareddata,(unsigned int)i);
275                         write_unlock(&mylock);
276                 }
277         }
278 }
279
280 int user_main(int argc, char **argv)
281 {
282         thrd_t t1, t2;
283         atomic_init(&mylock.lock, RW_LOCK_BIAS);
284
285         thrd_create(&t1, (thrd_start_t)&a, NULL);
286         thrd_create(&t2, (thrd_start_t)&a, NULL);
287
288         thrd_join(t1);
289         thrd_join(t2);
290
291         return 0;
292 }