7 #define RW_LOCK_BIAS 0x00100000
8 #define WRITE_LOCK_CMP RW_LOCK_BIAS
14 /** Example implementation of linux rw lock along with 2 thread test
19 1. At most 1 thread can acquire the write lock, and at the same time, no
20 other threads can acquire any lock (including read/write lock).
21 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22 3. A read_unlock release 1 read lock, and a write_unlock release the write
23 lock. They can not release a lock that they don't acquire.
25 4. Read_lock and write_lock can not be grabbed at the same time.
26 5. Happpens-before relationship should be checked and guaranteed, which
27 should be as the following:
28 a. read_unlock hb-> write_lock
29 b. write_unlock hb-> write_lock
30 c. write_unlock hb-> read_lock
34 Interesting point for locks:
35 a. If the users unlock() before any lock(), then the model checker will fail.
36 For this case, we can not say that the data structure is buggy, how can we
37 tell them from a real data structure bug???
38 b. We should specify that for a specific thread, successful locks and
39 unlocks should always come in pairs. We could make this check as an
40 auxiliary check since it is an extra rule for how the interfaces should called.
49 bool writer_lock_acquired;
52 writer_lock_acquired = false;
55 # Since commit_point_set has no ID attached, A -> B means that for any B,
56 # the previous A happens before B.
57 Write_Unlock -> Write_Lock
58 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
60 Write_Unlock -> Read_Lock
61 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
65 static inline int read_can_lock(rwlock_t *lock)
67 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
70 static inline int write_can_lock(rwlock_t *lock)
72 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
80 Read_Lock_Success_1 | Read_Lock_Success_2
87 static inline void read_lock(rwlock_t *rw)
89 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
92 @Commit_point_define_check: priorvalue > 0
93 @Label:Read_Lock_Success_1
96 while (priorvalue <= 0) {
97 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
98 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
101 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
104 @Commit_point_define_check: priorvalue > 0
105 @Label:Read_Lock_Success_2
114 @Interface: Write_Lock
116 Write_Lock_Success_1 | Write_Lock_Success_2
118 !writer_lock_acquired && reader_lock_cnt == 0
120 writer_lock_acquired = true;
123 static inline void write_lock(rwlock_t *rw)
125 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
128 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
129 @Label: Write_Lock_Success_1
132 while (priorvalue != RW_LOCK_BIAS) {
133 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
134 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
137 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
140 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
141 @Label: Write_Lock_Success_2
149 @Interface: Read_Trylock
151 Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
153 writer_lock_acquired == false
155 HB_Read_Trylock_Succ :: __RET__ == 1
160 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
163 static inline int read_trylock(rwlock_t *rw)
165 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
168 @Potential_commit_point_define: true
169 @Label: Potential_Read_Trylock_Point
172 if (priorvalue > 0) {
175 @Commit_point_define: true
176 @Potential_commit_point_label: Potential_Read_Trylock_Point
177 @Label: Read_Trylock_Succ_Point
184 @Commit_point_define: true
185 @Potential_commit_point_label: Potential_Read_Trylock_Point
186 @Label: Read_Trylock_Fail_Point
189 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
195 @Interface: Write_Trylock
197 Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
199 !writer_lock_acquired && reader_lock_cnt == 0
201 HB_Write_Trylock_Succ :: __RET__ == 1
204 writer_lock_acquired = true;
206 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
209 static inline int write_trylock(rwlock_t *rw)
211 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
214 @Potential_commit_point_define: true
215 @Label: Potential_Write_Trylock_Point
218 if (priorvalue == RW_LOCK_BIAS) {
221 @Commit_point_define: true
222 @Potential_commit_point_label: Potential_Write_Trylock_Point
223 @Label: Write_Trylock_Succ_Point
230 @Commit_point_define: true
231 @Potential_commit_point_label: Potential_Write_Trylock_Point
232 @Label: Write_Trylock_Fail_Point
235 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
241 @Interface: Read_Unlock
242 @Commit_point_set: Read_Unlock_Point
244 reader_lock_cnt > 0 && !writer_lock_acquired
249 static inline void read_unlock(rwlock_t *rw)
251 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
254 @Commit_point_define_check: true
255 @Label: Read_Unlock_Point
262 @Interface: Write_Unlock
263 @Commit_point_set: Write_Unlock_Point
265 reader_lock_cnt == 0 && writer_lock_acquired
267 writer_lock_acquired = false;
270 static inline void write_unlock(rwlock_t *rw)
272 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
275 @Commit_point_define_check: true
276 @Label: Write_Unlock_Point
284 static void a(void *obj)
287 for(i = 0; i < 2; i++) {
290 load_32(&shareddata);
291 read_unlock(&mylock);
294 store_32(&shareddata,(unsigned int)i);
295 write_unlock(&mylock);
300 int user_main(int argc, char **argv)
308 atomic_init(&mylock.lock, RW_LOCK_BIAS);
310 thrd_create(&t1, (thrd_start_t)&a, NULL);
311 thrd_create(&t2, (thrd_start_t)&a, NULL);