return is_write() || is_rmwr();
}
+bool ModelAction::is_yield() const
+{
+ return type == THREAD_YIELD;
+}
+
bool ModelAction::is_rmwr() const
{
return type == ATOMIC_RMWR;
bool is_uninitialized() const;
bool is_read() const;
bool is_write() const;
+ bool is_yield() const;
bool could_be_write() const;
bool is_rmwr() const;
bool is_rmwc() const;
/** A no-op, for now */
void thrd_yield(void)
{
- //model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
+ model->switch_to_master(new ModelAction(THREAD_YIELD, std::memory_order_seq_cst, thread_current(), VALUE_NONE));
}
thrd_t thrd_current(void)
params->maxreads = 0;
params->maxfuturedelay = 100;
params->fairwindow = 0;
+ params->yieldon = false;
params->enabledcount = 1;
params->bound = 0;
params->maxfuturevalues = 0;
"-f Specify a fairness window in which actions that are\n"
" enabled sufficiently many times should receive\n"
" priority for execution. Default: %d\n"
+"-y Turn on yield support. Default: %d\n"
"-e Enabled count. Default: %d\n"
"-b Upper length bound. Default: %d\n"
"-v Print verbose execution information.\n"
"-- Program arguments follow.\n\n",
-params->maxreads, params->maxfuturevalues, params->maxfuturedelay, params->expireslop, params->fairwindow, params->enabledcount, params->bound);
+params->maxreads, params->maxfuturevalues, params->maxfuturedelay, params->expireslop, params->fairwindow, params->yieldon, params->enabledcount, params->bound);
exit(EXIT_SUCCESS);
}
static void parse_options(struct model_params *params, int argc, char **argv)
{
- const char *shortopts = "hm:M:s:S:f:e:b:v";
+ const char *shortopts = "hym:M:s:S:f:e:b:v";
int opt;
bool error = false;
while (!error && (opt = getopt(argc, argv, shortopts)) != -1) {
case 'v':
params->verbose = 1;
break;
+ case 'y':
+ params->yieldon = true;
+ break;
default: /* '?' */
error = true;
break;
if (unfair)
continue;
}
+
+ /* See if CHESS-like yield fairness allows */
+ if (model->params.yieldon) {
+ bool unfair = false;
+ for (int t = 0; t < node->get_num_threads(); t++) {
+ thread_id_t tother = int_to_id(t);
+ if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
+ unfair = true;
+ break;
+ }
+ }
+ if (unfair)
+ continue;
+ }
+
/* Cache the latest backtracking point */
set_latest_backtrack(prev);
wake_up_sleeping_actions(curr);
+ /* Compute fairness information for CHESS yield algorithm */
+ if (model->params.yieldon) {
+ curr->get_node()->update_yield(scheduler);
+ }
+
/* Add the action to lists before any other model-checking tasks */
if (!second_part_of_rmw)
add_action_to_lists(curr);
struct model_params {
int maxreads;
int maxfuturedelay;
+ bool yieldon;
unsigned int fairwindow;
unsigned int enabledcount;
unsigned int bound;
relseq_break_writes(),
relseq_break_index(0),
misc_index(0),
- misc_max(0)
+ misc_max(0),
+ yield_data(NULL)
{
ASSERT(act);
act->set_node(this);
}
}
+int Node::get_yield_data(int tid1, int tid2) const {
+ if (tid1<num_threads && tid2 < num_threads)
+ return yield_data[YIELD_INDEX(tid1,tid2,num_threads)];
+ else
+ return YIELD_S | YIELD_D;
+}
+
+void Node::update_yield(Scheduler * scheduler) {
+ yield_data=(int *) model_calloc(1, sizeof(int)*num_threads*num_threads);
+ //handle base case
+ if (parent == NULL) {
+ for(int i = 0; i < num_threads*num_threads; i++) {
+ yield_data[i] = YIELD_S | YIELD_D;
+ }
+ return;
+ }
+ int curr_tid=id_to_int(action->get_tid());
+
+ for(int u = 0; u < num_threads; u++) {
+ for(int v = 0; v < num_threads; v++) {
+ int yield_state=parent->get_yield_data(u, v);
+ bool next_enabled=scheduler->is_enabled(int_to_id(v));
+ bool curr_enabled=parent->is_enabled(int_to_id(v));
+ if (!next_enabled) {
+ //Compute intersection of ES and E
+ yield_state&=~YIELD_E;
+ //Check to see if we disabled the thread
+ if (u==curr_tid && curr_enabled)
+ yield_state|=YIELD_D;
+ }
+ yield_data[YIELD_INDEX(u, v, num_threads)]=yield_state;
+ }
+ yield_data[YIELD_INDEX(u, curr_tid, num_threads)]=(yield_data[YIELD_INDEX(u, curr_tid, num_threads)]&~YIELD_P)|YIELD_S;
+ }
+ //handle curr.yield(t) part of computation
+ if (action->is_yield()) {
+ for(int v = 0; v < num_threads; v++) {
+ int yield_state=yield_data[YIELD_INDEX(curr_tid, v, num_threads)];
+ if ((yield_state & (YIELD_E | YIELD_D)) && (!(yield_state & YIELD_S)))
+ yield_state |= YIELD_P;
+ yield_state &= YIELD_P;
+ if (scheduler->is_enabled(int_to_id(v))) {
+ yield_state|=YIELD_E;
+ }
+ yield_data[YIELD_INDEX(curr_tid, v, num_threads)]=yield_state;
+ }
+ }
+}
+
/** @brief Node desctructor */
Node::~Node()
{
delete action;
if (enabled_array)
model_free(enabled_array);
+ if (yield_data)
+ model_free(yield_data);
}
/** Prints debugging info for the ModelAction associated with this Node */
return fairness[id_to_int(tid)].priority;
}
+bool Node::has_priority_over(thread_id_t tid1, thread_id_t tid2) const
+{
+ return get_yield_data(id_to_int(tid1), id_to_int(tid2)) & YIELD_P;
+}
+
/*********************************** read from ********************************/
/**
READ_FROM_NONE,
} read_from_type_t;
+#define YIELD_E 1
+#define YIELD_D 2
+#define YIELD_S 4
+#define YIELD_P 8
+#define YIELD_INDEX(tid1, tid2, num_threads) (tid1*num_threads+tid2)
+
+
/**
* @brief A single node in a NodeStack
*
ModelAction * get_action() const { return action; }
bool has_priority(thread_id_t tid) const;
+ void update_yield(Scheduler *);
+ bool has_priority_over(thread_id_t tid, thread_id_t tid2) const;
int get_num_threads() const { return num_threads; }
/** @return the parent Node to this Node; that is, the action that
* occurred previously in the stack. */
MEMALLOC
private:
void explore(thread_id_t tid);
-
+ int get_yield_data(int tid1, int tid2) const;
bool read_from_past_empty() const;
bool increment_read_from_past();
bool read_from_promise_empty() const;
int misc_index;
int misc_max;
+ int * yield_data;
};
typedef std::vector< Node *, ModelAlloc< Node * > > node_list_t;
Thread * Scheduler::select_next_thread()
{
int old_curr_thread = curr_thread_index;
- bool have_enabled_thread_with_priority = false;
Node *n = model->get_curr_node();
- for (int i = 0; i < enabled_len; i++) {
- thread_id_t tid = int_to_id(i);
- if (n->has_priority(tid)) {
- DEBUG("Node (tid %d) has priority\n", i);
- if (enabled[i] != THREAD_DISABLED)
- have_enabled_thread_with_priority = true;
+ bool have_enabled_thread_with_priority = false;
+ if (model->params.fairwindow != 0) {
+ for (int i = 0; i < enabled_len; i++) {
+ thread_id_t tid = int_to_id(i);
+ if (n->has_priority(tid)) {
+ DEBUG("Node (tid %d) has priority\n", i);
+ if (enabled[i] != THREAD_DISABLED)
+ have_enabled_thread_with_priority = true;
+ }
}
- }
+ }
for (int i = 0; i < enabled_len; i++) {
curr_thread_index = (old_curr_thread + i + 1) % enabled_len;
thread_id_t curr_tid = int_to_id(curr_thread_index);
+ if (model->params.yieldon) {
+ bool bad_thread = false;
+ for (int j = 0; j < enabled_len; j++) {
+ thread_id_t tother = int_to_id(j);
+ if ((enabled[j] != THREAD_DISABLED) && n->has_priority_over(curr_tid, tother)) {
+ bad_thread=true;
+ break;
+ }
+ }
+ if (bad_thread)
+ continue;
+ }
+
if (enabled[curr_thread_index] == THREAD_ENABLED &&
(!have_enabled_thread_with_priority || n->has_priority(curr_tid))) {
return model->get_thread(curr_tid);
}
}
+
/* No thread was enabled */
return NULL;
}
--- /dev/null
+#include <stdio.h>
+#include <threads.h>
+#include <stdatomic.h>
+
+#include "librace.h"
+
+#define RW_LOCK_BIAS 0x00100000
+#define WRITE_LOCK_CMP RW_LOCK_BIAS
+
+/** Example implementation of linux rw lock along with 2 thread test
+ * driver... */
+
+typedef union {
+ atomic_int lock;
+} rwlock_t;
+
+static inline int read_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) > 0;
+}
+
+static inline int write_can_lock(rwlock_t *lock)
+{
+ return atomic_load_explicit(&lock->lock, memory_order_relaxed) == RW_LOCK_BIAS;
+}
+
+static inline void read_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ while (priorvalue <= 0) {
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ do {
+ priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
+ if (priorvalue > 0)
+ break;
+ thrd_yield();
+ } while (true);
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ }
+}
+
+static inline void write_lock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ while (priorvalue != RW_LOCK_BIAS) {
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ do {
+ priorvalue = atomic_load_explicit(&rw->lock, memory_order_relaxed);
+ if (priorvalue == RW_LOCK_BIAS)
+ break;
+ thrd_yield();
+ } while (true);
+ priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ }
+}
+
+static inline int read_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, 1, memory_order_acquire);
+ if (priorvalue > 0)
+ return 1;
+
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_relaxed);
+ return 0;
+}
+
+static inline int write_trylock(rwlock_t *rw)
+{
+ int priorvalue = atomic_fetch_sub_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_acquire);
+ if (priorvalue == RW_LOCK_BIAS)
+ return 1;
+
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_relaxed);
+ return 0;
+}
+
+static inline void read_unlock(rwlock_t *rw)
+{
+ atomic_fetch_add_explicit(&rw->lock, 1, memory_order_release);
+}
+
+static inline void write_unlock(rwlock_t *rw)
+{
+ atomic_fetch_add_explicit(&rw->lock, RW_LOCK_BIAS, memory_order_release);
+}
+
+rwlock_t mylock;
+int shareddata;
+
+static void a(void *obj)
+{
+ int i;
+ for(i = 0; i < 2; i++) {
+ if ((i % 2) == 0) {
+ read_lock(&mylock);
+ load_32(&shareddata);
+ read_unlock(&mylock);
+ } else {
+ write_lock(&mylock);
+ store_32(&shareddata,(unsigned int)i);
+ write_unlock(&mylock);
+ }
+ }
+}
+
+int user_main(int argc, char **argv)
+{
+ thrd_t t1, t2;
+ atomic_init(&mylock.lock, RW_LOCK_BIAS);
+
+ thrd_create(&t1, (thrd_start_t)&a, NULL);
+ thrd_create(&t2, (thrd_start_t)&a, NULL);
+
+ thrd_join(t1);
+ thrd_join(t2);
+
+ return 0;
+}