include ../benchmarks.mk
-TESTNAME = linuxrwlocks
+TESTNAME = linuxrwlocks testcase1 testcase2
all: $(TESTNAME)
-$(TESTNAME): $(TESTNAME).c
+$(TESTNAME): % : %.c
$(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
clean:
@Happens_before:
# Since commit_point_set has no ID attached, A -> B means that for any B,
# the previous A happens before B.
- Read_Unlock -> Read_Lock
+ #Read_Unlock -> Read_Lock
Read_Unlock -> Write_Lock
Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
- Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+ #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
Write_Unlock -> Write_Lock
Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
Write_Unlock -> Read_Lock
Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ @Commutativity: Read_Lock <-> Read_Lock: true
+ @Commutativity: Read_Lock <-> Read_Unlock: true
+ @Commutativity: Read_Lock <-> Read_Trylock: true
+ @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Unlock <-> Read_Unlock: true
+ @Commutativity: Read_Unlock <-> Read_Trylock: true
+ @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Trylock <-> Read_Trylock: true
+ @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
+ @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
+
+ @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
+ @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
@End
*/
*/
static inline void read_lock(rwlock_t *rw)
{
+
+ /********** Admissibility **********/
int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
/**
@Begin
while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
thrd_yield();
}
+ /********** Admissibility **********/
priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
/**
@Begin
*/
static inline void write_lock(rwlock_t *rw)
{
+ /********** Admissibility **********/
int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
/**
@Begin
while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
thrd_yield();
}
+ /********** Admissibility **********/
priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
/**
@Begin
//@Condition: writer_lock_acquired == false
@HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
@Action:
- if (__COND_SAT__)
+ if (__RET__)
reader_lock_cnt++;
- @Post_check: __COND_SAT__ ? __RET__ == 1 : __RET__ == 0
+ @Post_check: __RET__ == !writer_lock_acquired || !__RET__
@End
*/
static inline int read_trylock(rwlock_t *rw)
{
+ /********** Admissibility **********/
int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
/**
@Begin
*/
static inline int write_trylock(rwlock_t *rw)
{
+ /********** Admissibility **********/
int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
//model_print("write_trylock: priorvalue: %d\n", priorvalue);
/**
*/
static inline void read_unlock(rwlock_t *rw)
{
+ /********** Admissibility **********/
atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
/**
@Begin
*/
static inline void write_unlock(rwlock_t *rw)
{
+ /********** Admissibility **********/
atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
/**
@Begin
static void a(void *obj)
{
- int i;
- for(i = 0; i < 2; i++) {
- if ((i % 2) == 0) {
- read_lock(&mylock);
- //load_32(&shareddata);
- //printf("%d\n", shareddata);
- read_unlock(&mylock);
- } else {
- write_lock(&mylock);
- //store_32(&shareddata,(unsigned int)i);
- shareddata = 47;
- write_unlock(&mylock);
- }
- }
+ read_lock(&mylock);
+ //load_32(&shareddata);
+ //printf("%d\n", shareddata);
+ read_unlock(&mylock);
+
+ write_lock(&mylock);
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
}
static void b(void *obj)
{
- int i;
- for(i = 0; i < 2; i++) {
- if ((i % 2) == 0) {
- if (read_trylock(&mylock) == 1) {
- //printf("%d\n", shareddata);
- read_unlock(&mylock);
- }
- } else {
- if (write_trylock(&mylock) == 1) {
- //shareddata = 47;
- write_unlock(&mylock);
- }
- }
+ if (read_trylock(&mylock) == 1) {
+ //printf("%d\n", shareddata);
+ read_unlock(&mylock);
+ }
+
+ if (write_trylock(&mylock) == 1) {
+ //shareddata = 47;
+ write_unlock(&mylock);
}
}
--- /dev/null
+#include <stdio.h>
+#include <threads.h>
+#include <stdatomic.h>
+
+#include <spec_lib.h>
+#include <stdlib.h>
+#include <cdsannotate.h>
+#include <specannotation.h>
+#include <model_memory.h>
+#include "common.h"
+
+#include "librace.h"
+
+#define RW_LOCK_BIAS 0x00100000
+#define WRITE_LOCK_CMP RW_LOCK_BIAS
+
+typedef union {
+ atomic_int lock;
+} rwlock_t;
+
+/** Example implementation of linux rw lock along with 2 thread test
+ * driver... */
+
+/**
+ Properties to check:
+ 1. At most 1 thread can acquire the write lock, and at the same time, no
+ other threads can acquire any lock (including read/write lock).
+ 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
+ 3. A read_unlock release 1 read lock, and a write_unlock release the write
+ lock. They can not release a lock that they don't acquire.
+ ###
+ 4. Read_lock and write_lock can not be grabbed at the same time.
+ 5. Happpens-before relationship should be checked and guaranteed, which
+ should be as the following:
+ a. read_unlock hb-> write_lock
+ b. write_unlock hb-> write_lock
+ c. write_unlock hb-> read_lock
+*/
+
+/**
+ Interesting point for locks:
+ a. If the users unlock() before any lock(), then the model checker will fail.
+ For this case, we can not say that the data structure is buggy, how can we
+ tell them from a real data structure bug???
+ b. We should specify that for a specific thread, successful locks and
+ unlocks should always come in pairs. We could make this check as an
+ auxiliary check since it is an extra rule for how the interfaces should called.
+*/
+
+/**
+ @Begin
+ @Options:
+ LANG = C;
+ @Global_define:
+ @DeclareVar:
+ bool writer_lock_acquired;
+ int reader_lock_cnt;
+ @InitVar:
+ writer_lock_acquired = false;
+ reader_lock_cnt = 0;
+ @Happens_before:
+ # Since commit_point_set has no ID attached, A -> B means that for any B,
+ # the previous A happens before B.
+ #Read_Unlock -> Read_Lock
+ Read_Unlock -> Write_Lock
+ Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+ #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ Write_Unlock -> Write_Lock
+ Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+
+ Write_Unlock -> Read_Lock
+ Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ @Commutativity: Read_Lock <-> Read_Lock: true
+ @Commutativity: Read_Lock <-> Read_Unlock: true
+ @Commutativity: Read_Lock <-> Read_Trylock: true
+ @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Unlock <-> Read_Unlock: true
+ @Commutativity: Read_Unlock <-> Read_Trylock: true
+ @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Trylock <-> Read_Trylock: true
+ @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
+ @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
+
+ @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
+ @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
+ @End
+*/
+
+static inline int read_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
+}
+
+static inline int write_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
+}
+
+
+/**
+ @Begin
+ @Interface: Read_Lock
+ @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
+ @Check: !writer_lock_acquired
+ @Action: reader_lock_cnt++;
+ @End
+*/
+static inline void read_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_1
+ @End
+ */
+ while (priorvalue <= 0) {
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_2
+ @End
+ */
+ }
+}
+
+
+/**
+ @Begin
+ @Interface: Write_Lock
+ @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
+ @Check: !writer_lock_acquired && reader_lock_cnt == 0
+ @Action: writer_lock_acquired = true;
+ @End
+*/
+static inline void write_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_1
+ @End
+ */
+ while (priorvalue != RW_LOCK_BIAS) {
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_2
+ @End
+ */
+ }
+}
+
+/**
+ @Begin
+ @Interface: Read_Trylock
+ @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
+ //@Condition: writer_lock_acquired == false
+ @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__)
+ reader_lock_cnt++;
+ @Post_check: __RET__ == !writer_lock_acquired || !__RET__
+ @End
+*/
+static inline int read_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Read_Trylock_Point
+ @End
+ */
+ if (priorvalue > 0) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Write_Trylock
+ @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
+ @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__ == 1)
+ writer_lock_acquired = true;
+ @End
+*/
+static inline int write_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ //model_print("write_trylock: priorvalue: %d\n", priorvalue);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Write_Trylock_Point
+ @End
+ */
+ if (priorvalue == RW_LOCK_BIAS) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Read_Unlock
+ @Commit_point_set: Read_Unlock_Point
+ @Check: reader_lock_cnt > 0 && !writer_lock_acquired
+ @Action: reader_lock_cnt--;
+ @End
+*/
+static inline void read_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Read_Unlock_Point
+ @End
+ */
+}
+
+/**
+ @Begin
+ @Interface: Write_Unlock
+ @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
+ @Check: reader_lock_cnt == 0 && writer_lock_acquired
+ @Action: writer_lock_acquired = false;
+ @End
+*/
+static inline void write_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Write_Unlock_Point
+ @End
+ */
+
+ /**
+ //@Begin
+ @Commit_point_clear: true
+ @Label: Write_Unlock_Clear
+ @End
+ */
+}
+
+rwlock_t mylock;
+int shareddata;
+
+static void a(void *obj)
+{
+ write_lock(&mylock);
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
+}
+
+static void b(void *obj)
+{
+ if (read_trylock(&mylock)) {
+ //load_32(&shareddata);
+ //printf("%d\n", shareddata);
+ read_unlock(&mylock);
+ }
+}
+
+int user_main(int argc, char **argv)
+{
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
+ thrd_t t1, t2;
+ atomic_init(&mylock.lock, RW_LOCK_BIAS);
+
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <threads.h>
+#include <stdatomic.h>
+
+#include <spec_lib.h>
+#include <stdlib.h>
+#include <cdsannotate.h>
+#include <specannotation.h>
+#include <model_memory.h>
+#include "common.h"
+
+#include "librace.h"
+
+#define RW_LOCK_BIAS 0x00100000
+#define WRITE_LOCK_CMP RW_LOCK_BIAS
+
+typedef union {
+ atomic_int lock;
+} rwlock_t;
+
+/** Example implementation of linux rw lock along with 2 thread test
+ * driver... */
+
+/**
+ Properties to check:
+ 1. At most 1 thread can acquire the write lock, and at the same time, no
+ other threads can acquire any lock (including read/write lock).
+ 2. At most RW_LOCK_BIAS threads can successfully acquire the read lock.
+ 3. A read_unlock release 1 read lock, and a write_unlock release the write
+ lock. They can not release a lock that they don't acquire.
+ ###
+ 4. Read_lock and write_lock can not be grabbed at the same time.
+ 5. Happpens-before relationship should be checked and guaranteed, which
+ should be as the following:
+ a. read_unlock hb-> write_lock
+ b. write_unlock hb-> write_lock
+ c. write_unlock hb-> read_lock
+*/
+
+/**
+ Interesting point for locks:
+ a. If the users unlock() before any lock(), then the model checker will fail.
+ For this case, we can not say that the data structure is buggy, how can we
+ tell them from a real data structure bug???
+ b. We should specify that for a specific thread, successful locks and
+ unlocks should always come in pairs. We could make this check as an
+ auxiliary check since it is an extra rule for how the interfaces should called.
+*/
+
+/**
+ @Begin
+ @Options:
+ LANG = C;
+ @Global_define:
+ @DeclareVar:
+ bool writer_lock_acquired;
+ int reader_lock_cnt;
+ @InitVar:
+ writer_lock_acquired = false;
+ reader_lock_cnt = 0;
+ @Happens_before:
+ # Since commit_point_set has no ID attached, A -> B means that for any B,
+ # the previous A happens before B.
+ #Read_Unlock -> Read_Lock
+ Read_Unlock -> Write_Lock
+ Read_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+ #Read_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ Write_Unlock -> Write_Lock
+ Write_Unlock -> Write_Trylock(HB_Write_Trylock_Succ)
+
+ Write_Unlock -> Read_Lock
+ Write_Unlock -> Read_Trylock(HB_Read_Trylock_Succ)
+
+ @Commutativity: Read_Lock <-> Read_Lock: true
+ @Commutativity: Read_Lock <-> Read_Unlock: true
+ @Commutativity: Read_Lock <-> Read_Trylock: true
+ @Commutativity: Read_Lock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Unlock <-> Read_Unlock: true
+ @Commutativity: Read_Unlock <-> Read_Trylock: true
+ @Commutativity: Read_Unlock <-> Write_Trylock: !_Method2.__RET__
+
+ @Commutativity: Read_Trylock <-> Read_Trylock: true
+ @Commutativity: Read_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Read_Trylock <-> Write_Lock: !_Method1.__RET__
+ @Commutativity: Read_Trylock <-> Write_Unlock: !_Method1.__RET__
+
+ @Commutativity: Write_Trylock <-> Write_Trylock: !_Method1.__RET__ || !_Method2.__RET__
+ @Commutativity: Write_Trylock <-> Write_Unlock: !_Method1.__RET__
+ @Commutativity: Write_Trylock <-> Write_Lock: !_Method1.__RET__
+ @End
+*/
+
+static inline int read_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
+}
+
+static inline int write_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
+}
+
+
+/**
+ @Begin
+ @Interface: Read_Lock
+ @Commit_point_set: Read_Lock_Success_1 | Read_Lock_Success_2
+ @Check: !writer_lock_acquired
+ @Action: reader_lock_cnt++;
+ @End
+*/
+static inline void read_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_1
+ @End
+ */
+ while (priorvalue <= 0) {
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) <= 0) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue > 0
+ @Label:Read_Lock_Success_2
+ @End
+ */
+ }
+}
+
+
+/**
+ @Begin
+ @Interface: Write_Lock
+ @Commit_point_set: Write_Lock_Success_1 | Write_Lock_Success_2
+ @Check: !writer_lock_acquired && reader_lock_cnt == 0
+ @Action: writer_lock_acquired = true;
+ @End
+*/
+static inline void write_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_1
+ @End
+ */
+ while (priorvalue != RW_LOCK_BIAS) {
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ while (atomic_load_explicit(&rw->lock, memory_order_relaxed) != RW_LOCK_BIAS) {
+ thrd_yield();
+ }
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ /**
+ @Begin
+ @Commit_point_define_check: priorvalue == RW_LOCK_BIAS
+ @Label: Write_Lock_Success_2
+ @End
+ */
+ }
+}
+
+/**
+ @Begin
+ @Interface: Read_Trylock
+ @Commit_point_set: Read_Trylock_Succ_Point | Read_Trylock_Fail_Point
+ //@Condition: writer_lock_acquired == false
+ @HB_condition: HB_Read_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__)
+ reader_lock_cnt++;
+ @Post_check: __RET__ == !writer_lock_acquired || !__RET__
+ @End
+*/
+static inline int read_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Read_Trylock_Point
+ @End
+ */
+ if (priorvalue > 0) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Read_Trylock_Point
+ @Label: Read_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Write_Trylock
+ @Commit_point_set: Write_Trylock_Succ_Point | Write_Trylock_Fail_Point
+ @HB_condition: HB_Write_Trylock_Succ :: __RET__ == 1
+ @Action:
+ if (__RET__ == 1)
+ writer_lock_acquired = true;
+ @End
+*/
+static inline int write_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ //model_print("write_trylock: priorvalue: %d\n", priorvalue);
+ /**
+ @Begin
+ @Potential_commit_point_define: true
+ @Label: Potential_Write_Trylock_Point
+ @End
+ */
+ if (priorvalue == RW_LOCK_BIAS) {
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Succ_Point
+ @End
+ */
+ return 1;
+ }
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Potential_commit_point_label: Potential_Write_Trylock_Point
+ @Label: Write_Trylock_Fail_Point
+ @End
+ */
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ return 0;
+}
+
+/**
+ @Begin
+ @Interface: Read_Unlock
+ @Commit_point_set: Read_Unlock_Point
+ @Check: reader_lock_cnt > 0 && !writer_lock_acquired
+ @Action: reader_lock_cnt--;
+ @End
+*/
+static inline void read_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Read_Unlock_Point
+ @End
+ */
+}
+
+/**
+ @Begin
+ @Interface: Write_Unlock
+ @Commit_point_set: Write_Unlock_Point | Write_Unlock_Clear
+ @Check: reader_lock_cnt == 0 && writer_lock_acquired
+ @Action: writer_lock_acquired = false;
+ @End
+*/
+static inline void write_unlock(rwlock_t *rw)
+{
+ /********** Admissibility **********/
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define_check: true
+ @Label: Write_Unlock_Point
+ @End
+ */
+
+ /**
+ //@Begin
+ @Commit_point_clear: true
+ @Label: Write_Unlock_Clear
+ @End
+ */
+}
+
+rwlock_t mylock;
+int shareddata;
+
+static void a(void *obj)
+{
+ write_lock(&mylock);
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
+}
+
+static void b(void *obj)
+{
+ if (write_trylock(&mylock)) {
+ //store_32(&shareddata,(unsigned int)i);
+ shareddata = 47;
+ write_unlock(&mylock);
+ }
+
+ read_lock(&mylock);
+ //load_32(&shareddata);
+ //printf("%d\n", shareddata);
+ read_unlock(&mylock);
+}
+
+int user_main(int argc, char **argv)
+{
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
+ thrd_t t1, t2;
+ atomic_init(&mylock.lock, RW_LOCK_BIAS);
+
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&b, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+
+ return 0;
+}
concurrent-hashmap seqlock spsc-example spsc-queue-scfence \
treiber-stack
-DIRS := ms-queue concurrent-hashmap
+DIRS := ms-queue concurrent-hashmap linuxrwlocks
.PHONY: $(DIRS)
--- /dev/null
+include ../benchmarks.mk
+
+TESTNAME = linuxrwlocks testcase1 testcase2
+
+all: $(TESTNAME)
+
+$(TESTNAME): % : %.c
+ $(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
+
+clean:
+ rm -f $(TESTNAME) *.o
public static void main(String[] argvs) {
String homeDir = Environment.HOME_DIRECTORY;
-// File[] srcLinuxRWLocks = { new File(homeDir
-// + "/benchmark/linuxrwlocks/linuxrwlocks.c") };
+ File[] srcLinuxRWLock1 = {
+ new File(homeDir
+ + "/benchmark/linuxrwlocks/linuxrwlocks.c") };
+ File[] srcLinuxRWLock2 = {
+ new File(homeDir
+ + "/benchmark/linuxrwlocks/testcase1.c") };
+ File[] srcLinuxRWLock3 = {
+ new File(homeDir
+ + "/benchmark/linuxrwlocks/testcase2.c") };
//
File[] srcHashtable = {
new File(homeDir
// File[] srcRCU = { new File(homeDir
// + "/benchmark/read-copy-update/rcu.cc") };
//
-// File[] srcTrylock = { new File(homeDir
-// + "/benchmark/trylock/trylock.c") };
+ File[] srcTrylock = { new File(homeDir
+ + "/benchmark/trylock/trylock.c") };
// File[] srcDeque = {
// new File(homeDir + "/benchmark/chase-lev-deque-bugfix/deque.c"),
// File[][] sources = { srcLinuxRWLocks, srcMSQueue, srcRCU,
// srcDeque, srcMCSLock, srcSPSCQueue, srcMPMCQueue, srcHashtable };
- File[][] sources = {srcMSQueue, srcHashtable};
+ File[][] sources = {srcLinuxRWLock1, srcLinuxRWLock2, srcLinuxRWLock3};
// Compile all the benchmarks
for (int i = 0; i < sources.length; i++) {
CodeGenerator gen = new CodeGenerator(sources[i]);
for (int i = 0; i < rules.size(); i++) {
CommutativityRule rule = rules.get(i);
- String interfaceNumBefore = Integer
- .toString(semantics.interface2Num.get(rule.method1));
+ Integer method = semantics.interface2Num.get(rule.method1);
+ if (method == null) {
+ System.out.println("Wrong method label in commutativity rule: " + rule.method1);
+ }
+ String interfaceNumBefore = Integer.toString(method);
String interfaceNumAfter = Integer.toString(semantics.interface2Num
.get(rule.method2));
String conditionFuncName = "CommutativityCondition" + i;