barrier with weaker inferences & notes in comment
[model-checker-benchmarks.git] / linuxrwlocks / linuxrwlocks.h
1 #include <stdio.h>
2 #include <stdatomic.h>
3
4 #include "librace.h"
5
6 #define RW_LOCK_BIAS            0x00100000
7 #define WRITE_LOCK_CMP          RW_LOCK_BIAS
8
9 /** Example implementation of linux rw lock along with 2 thread test
10  *  driver... */
11
12 typedef union {
13         atomic_int lock;
14 } rwlock_t;
15
16 static inline int read_can_lock(rwlock_t *lock)
17 {
18         return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
19 }
20
21 static inline int write_can_lock(rwlock_t *lock)
22 {
23         return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
24 }
25
26 static inline void read_lock(rwlock_t *rw)
27 {
28         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
29         while (priorvalue <= 0) {
30                 atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
31                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
32                         thrd_yield();
33                 }
34                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
35         }
36 }
37
38 static inline void write_lock(rwlock_t *rw)
39 {
40         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
41         while (priorvalue != RW_LOCK_BIAS) {
42                 atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
43                 while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
44                         thrd_yield();
45                 }
46                 priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
47         }
48 }
49
50 static inline int read_trylock(rwlock_t *rw)
51 {
52         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
53         if (priorvalue > 0)
54                 return 1;
55
56         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
57         return 0;
58 }
59
60 static inline int write_trylock(rwlock_t *rw)
61 {
62         int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
63         if (priorvalue == RW_LOCK_BIAS)
64                 return 1;
65
66         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
67         return 0;
68 }
69
70 static inline void read_unlock(rwlock_t *rw)
71 {
72         atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
73 }
74
75 static inline void write_unlock(rwlock_t *rw)
76 {
77         atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
78 }