7 #define RW_LOCK_BIAS 0x00100000
8 #define WRITE_LOCK_CMP RW_LOCK_BIAS
14 /** Example implementation of linux rw lock along with 2 thread test
19 1. At most 1 thread can acquire the write lock, and at the same time, no
20 other threads can acquire any lock (including read/write lock).
21 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
22 3. A read_unlock release 1 read lock, and a write_unlock release the write
23 lock. They can not release a lock that they don't acquire.
25 4. Read_lock and write_lock can not be grabbed at the same time.
26 5. Happpens-before relationship should be checked and guaranteed, which
27 should be as the following:
28 a. read_unlock hb-> write_lock
29 b. write_unlock hb-> write_lock
30 c. write_unlock hb-> read_lock
34 Interesting point for locks:
35 a. If the users unlock() before any lock(), then the model checker will fail.
36 For this case, we can not say that the data structure is buggy, how can we
37 tell them from a real data structure bug???
38 b. We should specify that for a specific thread, successful locks and
39 unlocks should always come in pairs. We could make this check as an
40 auxiliary check since it is an extra rule for how the interfaces should called.
47 bool writer_lock_acquired;
50 writer_lock_acquired = false;
53 # Since commit_point_set has no ID attached, A -> B means that for any B,
54 # the previous A happens before B.
55 Read_Unlock -> Write_Lock
56 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
58 Write_Unlock -> Write_Lock
59 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
61 Write_Unlock -> Read_Lock
62 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
66 static inline int read_can_lock(rwlock_t *lock)
68 return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
71 static inline int write_can_lock(rwlock_t *lock)
73 return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
81 Read_Lock_Success_1 | Read_Lock_Success_2
88 static inline void read_lock(rwlock_t *rw)
90 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
93 @Commit_point_define_check: priorvalue > 0
94 @Label:Read_Lock_Success_1
97 while (priorvalue <= 0) {
98 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
99 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
102 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
105 @Commit_point_define_check: priorvalue > 0
106 @Label:Read_Lock_Success_2
115 @Interface: Write_Lock
117 Write_Lock_Success_1 | Write_Lock_Success_2
119 !writer_lock_acquired && reader_lock_cnt == 0
121 writer_lock_acquired = true;
124 static inline void write_lock(rwlock_t *rw)
126 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
129 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
130 @Label: Write_Lock_Success_1
133 while (priorvalue != RW_LOCK_BIAS) {
134 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
135 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
138 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
141 @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
142 @Label: Write_Lock_Success_2
150 @Interface: Read_Trylock
154 writer_lock_acquired == false
156 HB_Read_Trylock_Succ :: __RET__ == 1
161 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
164 static inline int read_trylock(rwlock_t *rw)
166 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
169 @Commit_point_define_check: true
170 @Label:Read_Trylock_Point
176 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
182 @Interface: Write_Trylock
186 !writer_lock_acquired && reader_lock_cnt == 0
188 HB_Write_Trylock_Succ :: __RET__ == 1
191 writer_lock_acquired = true;
193 __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
196 static inline int write_trylock(rwlock_t *rw)
198 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
201 @Commit_point_define_check: true
202 @Label: Write_Trylock_Point
205 if (priorvalue == RW_LOCK_BIAS)
208 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
214 @Interface: Read_Unlock
215 @Commit_point_set: Read_Unlock_Point
217 reader_lock_cnt > 0 && !writer_lock_acquired
222 static inline void read_unlock(rwlock_t *rw)
224 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
227 @Commit_point_define_check: true
228 @Label: Read_Unlock_Point
235 @Interface: Write_Unlock
236 @Commit_point_set: Write_Unlock_Point
238 reader_lock_cnt == 0 && writer_lock_acquired
240 writer_lock_acquired = false;
243 static inline void write_unlock(rwlock_t *rw)
245 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
248 @Commit_point_define_check: true
249 @Label: Write_Unlock_Point
257 static void a(void *obj)
260 for(i = 0; i < 2; i++) {
263 load_32(&shareddata);
264 read_unlock(&mylock);
267 store_32(&shareddata,(unsigned int)i);
268 write_unlock(&mylock);
273 int user_main(int argc, char **argv)
281 atomic_init(&mylock.lock, RW_LOCK_BIAS);
283 thrd_create(&t1, (thrd_start_t)&a, NULL);
284 thrd_create(&t2, (thrd_start_t)&a, NULL);