8 #define RW_LOCK_BIAS 0x00100000
9 #define WRITE_LOCK_CMP RW_LOCK_BIAS
11 /** Example implementation of linux rw lock along with 2 thread test
16 1. At most 1 thread can acquire the write lock, and at the same time, no
17 other threads can acquire any lock (including read/write lock).
18 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
19 3. A read_unlock release 1 read lock, and a write_unlock release the write
20 lock. They can not release a lock that they don't acquire.
22 4. Read_lock and write_lock can not be grabbed at the same time.
23 5. Happpens-before relationship should be checked and guaranteed, which
24 should be as the following:
25 a. read_unlock hb-> write_lock
26 b. write_unlock hb-> write_lock
27 c. write_unlock hb-> read_lock
31 Interesting point for locks:
32 a. If the users unlock() before any lock(), then the model checker will fail.
33 For this case, we can not say that the data structure is buggy, how can we
34 tell them from a real data structure bug???
35 b. We should specify that for a specific thread, successful locks and
36 unlocks should always come in pairs. We could make this check as an
37 auxiliary check since it is an extra rule for how the interfaces should called.
43 bool __writer_lock_acquired = false;
44 int __reader_lock_cnt = 0;
47 # Since commit_point_set has no ID attached, A -> B means that for any B,
48 # the previous A happens before B.
49 Read_Unlock -> Write_Lock
50 Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
52 Write_Unlock -> Write_Lock
53 Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
55 Write_Unlock -> Read_Lock
56 Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
67 static inline int read_can_lock(rwlock_t *lock)
70 return atomic_load_explicit(&lock->lock, wildcard(1)) > 0;
73 static inline int write_can_lock(rwlock_t *lock)
76 return atomic_load_explicit(&lock->lock, wildcard(2)) == RW_LOCK_BIAS;
84 Read_Lock_Success_1 | Read_Lock_Success_2
86 !__writer_lock_acquired
92 static inline void read_lock(rwlock_t *rw)
95 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(3));
98 @Commit_point_define_check: __ATOMIC_RET__ > 0
99 @Label:Read_Lock_Success_1
102 while (priorvalue <= 0) {
104 atomic_fetch_add_explicit(&rw->lock, 1, wildcard(4));
106 while (atomic_load_explicit(&rw->lock, wildcard(5)) <= 0) {
110 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(6));
113 @Commit_point_define_check: __ATOMIC_RET__ > 0
114 @Label:Read_Lock_Success_2
123 @Interface: Write_Lock
125 Write_Lock_Success_1 | Write_Lock_Success_2
127 !__writer_lock_acquired && __reader_lock_cnt == 0
130 __writer_lock_acquired = true;
133 static inline void write_lock(rwlock_t *rw)
136 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(7));
139 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
140 @Label: Write_Lock_Success_1
143 while (priorvalue != RW_LOCK_BIAS) {
145 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(8));
147 while (atomic_load_explicit(&rw->lock, wildcard(9)) != RW_LOCK_BIAS) {
151 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(10));
154 @Commit_point_define_check: __ATOMIC_RET__ == RW_LOCK_BIAS
155 @Label: Write_Lock_Success_2
163 @Interface: Read_Trylock
167 __writer_lock_acquired == false
169 HB_Read_Trylock_Succ :: __RET__ == 1
175 COND_SAT ? __RET__ == 1 : __RET__ == 0
178 static inline int read_trylock(rwlock_t *rw)
181 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, wildcard(11));
184 @Commit_point_define_check: true
185 @Label:Read_Trylock_Point
192 atomic_fetch_add_explicit(&rw->lock, 1, wildcard(12));
198 @Interface: Write_Trylock
202 !__writer_lock_acquired && __reader_lock_cnt == 0
204 HB_Write_Trylock_Succ :: __RET__ == 1
208 __writer_lock_acquired = true;
210 COND_SAT ? __RET__ == 1 : __RET__ == 0
213 static inline int write_trylock(rwlock_t *rw)
216 int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(13));
219 @Commit_point_define_check: true
220 @Label: Write_Trylock_Point
223 if (priorvalue == RW_LOCK_BIAS)
227 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(14));
233 @Interface: Read_Unlock
234 @Commit_point_set: Read_Unlock_Point
236 __reader_lock_cnt > 0 && !__writer_lock_acquired
242 static inline void read_unlock(rwlock_t *rw)
245 atomic_fetch_add_explicit(&rw->lock, 1, wildcard(15));
248 @Commit_point_define_check: true
249 @Label: Read_Unlock_Point
256 @Interface: Write_Unlock
257 @Commit_point_set: Write_Unlock_Point
259 reader_lock_cnt == 0 && writer_lock_acquired
262 __writer_lock_acquired = false;
266 static inline void write_unlock(rwlock_t *rw)
269 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, wildcard(16));
272 @Commit_point_define_check: true
273 @Label: Write_Unlock_Point