#define ACTION_INITIAL_CLOCK 0
-ModelAction::ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value) :
+/**
+ * @brief Construct a new ModelAction
+ *
+ * @param type The type of action
+ * @param order The memory order of this action. A "don't care" for non-ATOMIC
+ * actions (e.g., THREAD_* or MODEL_* actions).
+ * @param loc The location that this action acts upon
+ * @param value (optional) A value associated with the action (e.g., the value
+ * read or written). Defaults to a given macro constant, for debugging purposes.
+ * @param thread (optional) The Thread in which this action occurred. If NULL
+ * (default), then a Thread is assigned according to the scheduler.
+ */
+ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
+ uint64_t value, Thread *thread) :
type(type),
order(order),
location(loc),
seq_number(ACTION_INITIAL_CLOCK),
cv(NULL)
{
- Thread *t = thread_current();
+ Thread *t = thread ? thread : thread_current();
this->tid = t->get_id();
}
seq_number = num;
}
+bool ModelAction::is_relseq_fixup() const
+{
+ return type == MODEL_FIXUP_RELSEQ;
+}
+
bool ModelAction::is_mutex_op() const
{
return type == ATOMIC_LOCK || type == ATOMIC_TRYLOCK || type == ATOMIC_UNLOCK;
{
const char *type_str, *mo_str;
switch (this->type) {
+ case MODEL_FIXUP_RELSEQ:
+ type_str = "relseq fixup";
+ break;
case THREAD_CREATE:
type_str = "thread create";
break;
#include "modeltypes.h"
class ClockVector;
+class Thread;
using std::memory_order;
using std::memory_order_relaxed;
/** @brief Represents an action type, identifying one of several types of
* ModelAction */
typedef enum action_type {
+ MODEL_FIXUP_RELSEQ, /**< Special ModelAction: finalize a release
+ * sequence */
THREAD_CREATE, /**< A thread creation action */
THREAD_START, /**< First action in each thread */
THREAD_YIELD, /**< A thread yield action */
*/
class ModelAction {
public:
- ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE);
+ ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
~ModelAction();
void print() const;
void copy_from_new(ModelAction *newaction);
void set_seq_number(modelclock_t num);
void set_try_lock(bool obtainedlock);
+ bool is_relseq_fixup() const;
bool is_mutex_op() const;
bool is_lock() const;
bool is_trylock() const;
priv = (struct model_snapshot_members *)calloc(1, sizeof(*priv));
/* First thread created will have id INITIAL_THREAD_ID */
priv->next_thread_id = INITIAL_THREAD_ID;
+
+ /* Initialize a model-checker thread, for special ModelActions */
+ model_thread = new Thread(get_next_id());
+ thread_map->put(id_to_int(model_thread->get_id()), model_thread);
}
/** @brief Destructor */
/* The next node will try to read from a different future value. */
tid = next->get_tid();
node_stack->pop_restofstack(2);
+ } else if (nextnode->increment_relseq_break()) {
+ /* The next node will try to resolve a release sequence differently */
+ tid = next->get_tid();
+ node_stack->pop_restofstack(2);
} else {
/* Make a different thread execute for next step */
Node *node = nextnode->get_parent();
return updated;
}
+/**
+ * @brief Process the current action for release sequence fixup activity
+ *
+ * Performs model-checker release sequence fixups for the current action,
+ * forcing a single pending release sequence to break (with a given, potential
+ * "loose" write) or to complete (i.e., synchronize). If a pending release
+ * sequence forms a complete release sequence, then we must perform the fixup
+ * synchronization, mo_graph additions, etc.
+ *
+ * @param curr The current action; must be a release sequence fixup action
+ * @param work_queue The work queue to which to add work items as they are
+ * generated
+ */
+void ModelChecker::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
+{
+ const ModelAction *write = curr->get_node()->get_relseq_break();
+ struct release_seq *sequence = pending_rel_seqs->back();
+ pending_rel_seqs->pop_back();
+ ASSERT(sequence);
+ ModelAction *acquire = sequence->acquire;
+ const ModelAction *rf = sequence->rf;
+ const ModelAction *release = sequence->release;
+ ASSERT(acquire);
+ ASSERT(release);
+ ASSERT(rf);
+ ASSERT(release->same_thread(rf));
+
+ if (write == NULL) {
+ /**
+ * @todo Forcing a synchronization requires that we set
+ * modification order constraints. For instance, we can't allow
+ * a fixup sequence in which two separate read-acquire
+ * operations read from the same sequence, where the first one
+ * synchronizes and the other doesn't. Essentially, we can't
+ * allow any writes to insert themselves between 'release' and
+ * 'rf'
+ */
+
+ /* Must synchronize */
+ if (!acquire->synchronize_with(release)) {
+ set_bad_synchronization();
+ return;
+ }
+ /* Re-check all pending release sequences */
+ work_queue->push_back(CheckRelSeqWorkEntry(NULL));
+ /* Re-check act for mo_graph edges */
+ work_queue->push_back(MOEdgeWorkEntry(acquire));
+
+ /* propagate synchronization to later actions */
+ action_list_t::reverse_iterator rit = action_trace->rbegin();
+ for (; (*rit) != acquire; rit++) {
+ ModelAction *propagate = *rit;
+ if (acquire->happens_before(propagate)) {
+ propagate->synchronize_with(acquire);
+ /* Re-check 'propagate' for mo_graph edges */
+ work_queue->push_back(MOEdgeWorkEntry(propagate));
+ }
+ }
+ } else {
+ /* Break release sequence with new edges:
+ * release --mo--> write --mo--> rf */
+ mo_graph->addEdge(release, write);
+ mo_graph->addEdge(write, rf);
+ }
+
+ /* See if we have realized a data race */
+ if (checkDataRaces())
+ set_assert();
+}
+
/**
* Initialize the current action by performing one or more of the following
* actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
*/
if (newcurr->is_write())
compute_promises(newcurr);
+ else if (newcurr->is_relseq_fixup())
+ compute_relseq_breakwrites(newcurr);
}
return newcurr;
}
if (act->is_mutex_op() && process_mutex(act))
update_all = true;
+ if (act->is_relseq_fixup())
+ process_relseq_fixup(curr, &work_queue);
+
if (update_all)
work_queue.push_back(CheckRelSeqWorkEntry(NULL));
else if (update)
if ((!parnode->backtrack_empty() ||
!currnode->read_from_empty() ||
!currnode->future_value_empty() ||
- !currnode->promise_empty())
+ !currnode->promise_empty() ||
+ !currnode->relseq_break_empty())
&& (!priv->next_backtrack ||
*curr > *priv->next_backtrack)) {
priv->next_backtrack = curr;
th->is_complete())
future_ordered = true;
+ ASSERT(!th->is_model_thread() || future_ordered);
+
for (rit = list->rbegin(); rit != list->rend(); rit++) {
const ModelAction *act = *rit;
/* Reach synchronization -> this thread is complete */
}
}
+/**
+ * Compute the set of writes that may break the current pending release
+ * sequence. This information is extracted from previou release sequence
+ * calculations.
+ *
+ * @param curr The current ModelAction. Must be a release sequence fixup
+ * action.
+ */
+void ModelChecker::compute_relseq_breakwrites(ModelAction *curr)
+{
+ if (pending_rel_seqs->empty())
+ return;
+
+ struct release_seq *pending = pending_rel_seqs->back();
+ for (unsigned int i = 0; i < pending->writes.size(); i++) {
+ const ModelAction *write = pending->writes[i];
+ curr->get_node()->add_relseq_break(write);
+ }
+
+ /* NULL means don't break the sequence; just synchronize */
+ curr->get_node()->add_relseq_break(NULL);
+}
+
/**
* Build up an initial set of all past writes that this 'read' action may read
* from. This set is determined by the clock vector's "happens before"
if (has_asserted())
return false;
- Thread *curr = thread_current();
+ Thread *curr = priv->current_action ? get_thread(priv->current_action) : NULL;
if (curr) {
if (curr->get_state() == THREAD_READY) {
ASSERT(priv->current_action);
DEBUG("(%d, %d)\n", curr ? id_to_int(curr->get_id()) : -1,
next ? id_to_int(next->get_id()) : -1);
+ /*
+ * Launch end-of-execution release sequence fixups only when there are:
+ *
+ * (1) no more user threads to run (or when execution replay chooses
+ * the 'model_thread')
+ * (2) pending release sequences
+ * (3) pending assertions (i.e., data races)
+ * (4) no pending promises
+ */
+ if (!pending_rel_seqs->empty() && (!next || next->is_model_thread()) &&
+ isfinalfeasible() && !unrealizedraces.empty()) {
+ printf("*** WARNING: release sequence fixup action (%zu pending release seuqences) ***\n",
+ pending_rel_seqs->size());
+ ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
+ std::memory_order_seq_cst, NULL, VALUE_NONE,
+ model_thread);
+ set_current_action(fixup);
+ return true;
+ }
+
/* next == NULL -> don't take any more steps */
if (!next)
return false;
bool process_write(ModelAction *curr);
bool process_mutex(ModelAction *curr);
bool process_thread_action(ModelAction *curr);
+ void process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue);
bool check_action_enabled(ModelAction *curr);
bool take_step();
void reset_to_initial_state();
bool resolve_promises(ModelAction *curr);
void compute_promises(ModelAction *curr);
+ void compute_relseq_breakwrites(ModelAction *curr);
void check_curr_backtracking(ModelAction * curr);
void add_action_to_lists(ModelAction *act);
* together for efficiency and maintainability. */
struct model_snapshot_members *priv;
+ /** A special model-checker Thread; used for associating with
+ * model-checker-related ModelAcitons */
+ Thread *model_thread;
+
/**
* @brief The modification order graph
*
may_read_from(),
read_from_index(0),
future_values(),
- future_index(-1)
+ future_index(-1),
+ relseq_break_writes(),
+ relseq_break_index(0)
{
if (act) {
act->set_node(this);
return false;
}
+/**
+ * Add a write ModelAction to the set of writes that may break the release
+ * sequence. This is used during replay exploration of pending release
+ * sequences. This Node must correspond to a release sequence fixup action.
+ *
+ * @param write The write that may break the release sequence. NULL means we
+ * allow the release sequence to synchronize.
+ */
+void Node::add_relseq_break(const ModelAction *write)
+{
+ relseq_break_writes.push_back(write);
+}
+
+/**
+ * Get the write that may break the current pending release sequence,
+ * according to the replay / divergence pattern.
+ *
+ * @return A write that may break the release sequence. If NULL, that means
+ * the release sequence should not be broken.
+ */
+const ModelAction * Node::get_relseq_break()
+{
+ if (relseq_break_index < (int)relseq_break_writes.size())
+ return relseq_break_writes[relseq_break_index];
+ else
+ return NULL;
+}
+
+/**
+ * Increments the index into the relseq_break_writes set to explore the next
+ * item.
+ * @return Returns false if we have explored all values.
+ */
+bool Node::increment_relseq_break()
+{
+ DBG();
+ promises.clear();
+ if (relseq_break_index < ((int)relseq_break_writes.size())) {
+ relseq_break_index++;
+ return (relseq_break_index < ((int)relseq_break_writes.size()));
+ }
+ return false;
+}
+
+/**
+ * @return True if all writes that may break the release sequence have been
+ * explored
+ */
+bool Node::relseq_break_empty() {
+ return ((relseq_break_index + 1) >= ((int)relseq_break_writes.size()));
+}
+
void Node::explore(thread_id_t tid)
{
int i = id_to_int(tid);
bool increment_promise();
bool promise_empty();
+ void add_relseq_break(const ModelAction *write);
+ const ModelAction * get_relseq_break();
+ bool increment_relseq_break();
+ bool relseq_break_empty();
+
void print();
void print_may_read_from();
std::vector< struct future_value, ModelAlloc<struct future_value> > future_values;
std::vector< promise_t, ModelAlloc<promise_t> > promises;
int future_index;
+
+ std::vector< const ModelAction *, ModelAlloc<const ModelAction *> > relseq_break_writes;
+ int relseq_break_index;
};
typedef std::vector< Node *, ModelAlloc< Node * > > node_list_t;
void Scheduler::add_thread(Thread *t)
{
DEBUG("thread %d\n", id_to_int(t->get_id()));
+ ASSERT(!t->is_model_thread());
set_enabled(t, THREAD_ENABLED);
}
*/
void Scheduler::wake(Thread *t)
{
+ ASSERT(!t->is_model_thread());
set_enabled(t, THREAD_DISABLED);
t->set_state(THREAD_READY);
}
return NULL;
}
}
+ } else if (t->is_model_thread()) {
+ /* model-checker threads never run */
+ t = NULL;
} else {
curr_thread_index = id_to_int(t->get_id());
}
*/
Thread * Scheduler::get_current_thread() const
{
+ ASSERT(!current || !current->is_model_thread());
return current;
}
#include "stdatomic.h"
atomic_int x;
+int var = 0;
static void a(void *obj)
{
+ store_32(&var, 1);
atomic_store_explicit(&x, *((int *)obj), memory_order_release);
atomic_store_explicit(&x, *((int *)obj) + 1, memory_order_relaxed);
}
{
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ store_32(&var, 3);
}
static void b1(void *obj)
int i = 7;
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ store_32(&var, 2);
thrd_create(&t3, (thrd_start_t)&a, &i);
thrd_create(&t4, (thrd_start_t)&b2, NULL);
thrd_join(t3);
#include "stdatomic.h"
atomic_int x;
+int var = 0;
static void a(void *obj)
{
+ store_32(&var, 1);
atomic_store_explicit(&x, 1, memory_order_release);
atomic_store_explicit(&x, 42, memory_order_relaxed);
}
{
int r = atomic_load_explicit(&x, memory_order_acquire);
printf("r = %u\n", r);
+ printf("load %d\n", load_32(&var));
}
static void c(void *obj)
* @brief Thread functions.
*/
+#include <string.h>
+
#include "libthreads.h"
#include "common.h"
#include "threads.h"
}
}
+/**
+ * @brief Construct a new model-checker Thread
+ *
+ * A model-checker Thread is used for accounting purposes only. It will never
+ * have its own stack, and it should never be inserted into the Scheduler.
+ *
+ * @param tid The thread ID to assign
+ */
+Thread::Thread(thread_id_t tid) :
+ parent(NULL),
+ creation(NULL),
+ pending(NULL),
+ start_routine(NULL),
+ arg(NULL),
+ stack(NULL),
+ user_thread(NULL),
+ id(tid),
+ state(THREAD_READY), /* Thread is always ready? */
+ wait_list(),
+ last_action_val(0),
+ model_thread(true)
+{
+ memset(&context, 0, sizeof(context));
+}
+
/**
* Construct a new thread.
* @param t The thread identifier of the newly created thread.
user_thread(t),
state(THREAD_CREATED),
wait_list(),
- last_action_val(VALUE_NONE)
+ last_action_val(VALUE_NONE),
+ model_thread(false)
{
int ret;
/** @brief A Thread is created for each user-space thread */
class Thread {
public:
+ Thread(thread_id_t tid);
Thread(thrd_t *t, void (*func)(void *), void *a);
~Thread();
void complete();
return ret;
}
+ bool is_model_thread() { return model_thread; }
+
friend void thread_startup();
SNAPSHOTALLOC
* @see Thread::get_return_value()
*/
uint64_t last_action_val;
+
+ /** @brief Is this Thread a special model-checker thread? */
+ const bool model_thread;
};
Thread * thread_current();