* (default), then a Thread is assigned according to the scheduler.
*/
ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
- uint64_t value, Thread *thread) :
+ uint64_t value, Thread *thread) :
type(type),
order(order),
original_order(order),
* (default), then a Thread is assigned according to the scheduler.
*/
ModelAction::ModelAction(action_type_t type, memory_order order, void *loc,
- uint64_t value, int size) :
+ uint64_t value, int size) :
type(type),
order(order),
original_order(order),
*/
/*
- if (cv)
- delete cv; */
+ if (cv)
+ delete cv; */
}
int ModelAction::getSize() const {
- return size;
+ return size;
}
void ModelAction::copy_from_new(ModelAction *newaction)
ASSERT(is_read());
if (reads_from)
return reads_from->get_write_value();
- return VALUE_NONE; /* Only for new actions with no reads-from */
+ return VALUE_NONE; /* Only for new actions with no reads-from */
}
/**
reads_from = act;
- if (act->is_uninitialized()) { // WL
+ if (act->is_uninitialized()) { // WL
uint64_t val = *((uint64_t *) location);
ModelAction * act_initialized = (ModelAction *)act;
act_initialized->set_value(val);
// disabled by WL, because LLVM IR is unable to detect atomic init
/* model->assert_bug("May read from uninitialized atomic:\n"
- " action %d, thread %d, location %p (%s, %s)",
- seq_number, id_to_int(tid), location,
- get_type_str(), get_mo_str());
-*/
+ " action %d, thread %d, location %p (%s, %s)",
+ seq_number, id_to_int(tid), location,
+ get_type_str(), get_mo_str());
+ */
}
}
const char * ModelAction::get_type_str() const
{
switch (this->type) {
- case THREAD_CREATE: return "thread create";
- case THREAD_START: return "thread start";
- case THREAD_YIELD: return "thread yield";
- case THREAD_JOIN: return "thread join";
- case THREAD_FINISH: return "thread finish";
-
- case PTHREAD_CREATE: return "pthread create";
- case PTHREAD_JOIN: return "pthread join";
-
- case ATOMIC_UNINIT: return "uninitialized";
- case ATOMIC_READ: return "atomic read";
- case ATOMIC_WRITE: return "atomic write";
- case ATOMIC_RMW: return "atomic rmw";
- case ATOMIC_FENCE: return "fence";
- case ATOMIC_RMWR: return "atomic rmwr";
- case ATOMIC_RMWRCAS: return "atomic rmwrcas";
- case ATOMIC_RMWC: return "atomic rmwc";
- case ATOMIC_INIT: return "init atomic";
- case ATOMIC_LOCK: return "lock";
- case ATOMIC_UNLOCK: return "unlock";
- case ATOMIC_TRYLOCK: return "trylock";
- case ATOMIC_WAIT: return "wait";
- case ATOMIC_NOTIFY_ONE: return "notify one";
- case ATOMIC_NOTIFY_ALL: return "notify all";
- case ATOMIC_ANNOTATION: return "annotation";
- default: return "unknown type";
+ case THREAD_CREATE: return "thread create";
+ case THREAD_START: return "thread start";
+ case THREAD_YIELD: return "thread yield";
+ case THREAD_JOIN: return "thread join";
+ case THREAD_FINISH: return "thread finish";
+
+ case PTHREAD_CREATE: return "pthread create";
+ case PTHREAD_JOIN: return "pthread join";
+
+ case ATOMIC_UNINIT: return "uninitialized";
+ case ATOMIC_READ: return "atomic read";
+ case ATOMIC_WRITE: return "atomic write";
+ case ATOMIC_RMW: return "atomic rmw";
+ case ATOMIC_FENCE: return "fence";
+ case ATOMIC_RMWR: return "atomic rmwr";
+ case ATOMIC_RMWRCAS: return "atomic rmwrcas";
+ case ATOMIC_RMWC: return "atomic rmwc";
+ case ATOMIC_INIT: return "init atomic";
+ case ATOMIC_LOCK: return "lock";
+ case ATOMIC_UNLOCK: return "unlock";
+ case ATOMIC_TRYLOCK: return "trylock";
+ case ATOMIC_WAIT: return "wait";
+ case ATOMIC_NOTIFY_ONE: return "notify one";
+ case ATOMIC_NOTIFY_ALL: return "notify all";
+ case ATOMIC_ANNOTATION: return "annotation";
+ default: return "unknown type";
};
}
const char * ModelAction::get_mo_str() const
{
switch (this->order) {
- case std::memory_order_relaxed: return "relaxed";
- case std::memory_order_acquire: return "acquire";
- case std::memory_order_release: return "release";
- case std::memory_order_acq_rel: return "acq_rel";
- case std::memory_order_seq_cst: return "seq_cst";
- default: return "unknown";
+ case std::memory_order_relaxed: return "relaxed";
+ case std::memory_order_acquire: return "acquire";
+ case std::memory_order_release: return "release";
+ case std::memory_order_acq_rel: return "acq_rel";
+ case std::memory_order_seq_cst: return "seq_cst";
+ default: return "unknown";
}
}
const char *type_str = get_type_str(), *mo_str = get_mo_str();
model_print("%-4d %-2d %-14s %7s %14p %-#18" PRIx64,
- seq_number, id_to_int(tid), type_str, mo_str, location, get_return_value());
+ seq_number, id_to_int(tid), type_str, mo_str, location, get_return_value());
if (is_read()) {
if (reads_from)
model_print(" %-3d", reads_from->get_seq_number());
hash ^= id_to_int(tid) << 6;
if (is_read()) {
- if (reads_from)
- hash ^= reads_from->get_seq_number();
- hash ^= get_reads_from_value();
+ if (reads_from)
+ hash ^= reads_from->get_seq_number();
+ hash ^= get_reads_from_value();
}
return hash;
}
#include "classlist.h"
namespace cdsc {
- class mutex;
+class mutex;
}
using std::memory_order;
/** @brief Represents an action type, identifying one of several types of
* ModelAction */
typedef enum action_type {
- THREAD_CREATE, /**< A thread creation action */
- THREAD_START, /**< First action in each thread */
- THREAD_YIELD, /**< A thread yield action */
- THREAD_JOIN, /**< A thread join action */
- THREAD_FINISH, /**< A thread completion action */
- PTHREAD_CREATE, /**< A pthread creation action */
- PTHREAD_JOIN, /**< A pthread join action */
-
- ATOMIC_UNINIT, /**< Represents an uninitialized atomic */
- ATOMIC_READ, /**< An atomic read action */
- ATOMIC_WRITE, /**< An atomic write action */
- ATOMIC_RMWR, /**< The read part of an atomic RMW action */
- ATOMIC_RMWRCAS, /**< The read part of an atomic RMW action */
- ATOMIC_RMW, /**< The write part of an atomic RMW action */
- ATOMIC_RMWC, /**< Convert an atomic RMW action into a READ */
- ATOMIC_INIT, /**< Initialization of an atomic object (e.g.,
- * atomic_init()) */
- ATOMIC_FENCE, /**< A fence action */
- ATOMIC_LOCK, /**< A lock action */
- ATOMIC_TRYLOCK, /**< A trylock action */
- ATOMIC_UNLOCK, /**< An unlock action */
- ATOMIC_NOTIFY_ONE, /**< A notify_one action */
- ATOMIC_NOTIFY_ALL, /**< A notify all action */
- ATOMIC_WAIT, /**< A wait action */
- ATOMIC_ANNOTATION, /**< An annotation action to pass information
- to a trace analysis */
+ THREAD_CREATE, /**< A thread creation action */
+ THREAD_START, /**< First action in each thread */
+ THREAD_YIELD, /**< A thread yield action */
+ THREAD_JOIN, /**< A thread join action */
+ THREAD_FINISH, /**< A thread completion action */
+ PTHREAD_CREATE, /**< A pthread creation action */
+ PTHREAD_JOIN, /**< A pthread join action */
+
+ ATOMIC_UNINIT, /**< Represents an uninitialized atomic */
+ ATOMIC_READ, /**< An atomic read action */
+ ATOMIC_WRITE, /**< An atomic write action */
+ ATOMIC_RMWR, /**< The read part of an atomic RMW action */
+ ATOMIC_RMWRCAS, /**< The read part of an atomic RMW action */
+ ATOMIC_RMW, /**< The write part of an atomic RMW action */
+ ATOMIC_RMWC, /**< Convert an atomic RMW action into a READ */
+ ATOMIC_INIT, /**< Initialization of an atomic object (e.g.,
+ * atomic_init()) */
+ ATOMIC_FENCE, /**< A fence action */
+ ATOMIC_LOCK, /**< A lock action */
+ ATOMIC_TRYLOCK, /**< A trylock action */
+ ATOMIC_UNLOCK, /**< An unlock action */
+ ATOMIC_NOTIFY_ONE, /**< A notify_one action */
+ ATOMIC_NOTIFY_ALL, /**< A notify all action */
+ ATOMIC_WAIT, /**< A wait action */
+ ATOMIC_ANNOTATION, /**< An annotation action to pass information
+ to a trace analysis */
NOOP
} action_type_t;
class ModelAction {
public:
ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value = VALUE_NONE, Thread *thread = NULL);
- ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value, int size);
+ ModelAction(action_type_t type, memory_order order, void *loc, uint64_t value, int size);
~ModelAction();
void print() const;
bool is_yield() const;
bool could_be_write() const;
bool is_rmwr() const;
- bool is_rmwrcas() const;
+ bool is_rmwrcas() const;
bool is_rmwc() const;
bool is_rmw() const;
bool is_fence() const;
bool same_thread(const ModelAction *act) const;
bool is_conflicting_lock(const ModelAction *act) const;
bool could_synchronize_with(const ModelAction *act) const;
- int getSize() const;
-
+ int getSize() const;
+
Thread * get_thread_operand() const;
void create_cv(const ModelAction *parent = NULL);
/* to accomodate pthread create and join */
Thread * thread_operand;
- void set_thread_operand(Thread *th) { thread_operand = th; }
+ void set_thread_operand(Thread *th) { thread_operand = th; }
private:
const char * get_type_str() const;
/** @brief The value written (for write or RMW; undefined for read) */
uint64_t value;
- union {
- /**
- * @brief The store that this action reads from
- *
- * Only valid for reads
- */
- const ModelAction *reads_from;
- int size;
- };
+ union {
+ /**
+ * @brief The store that this action reads from
+ *
+ * Only valid for reads
+ */
+ const ModelAction *reads_from;
+ int size;
+ };
/** @brief The last fence release from the same thread */
const ModelAction *last_fence_release;
ClockVector *cv;
};
-#endif /* __ACTION_H__ */
+#endif/* __ACTION_H__ */
msg = (char *)snapshot_malloc(strlen(fmt) + strlen(str));
sprintf(msg, fmt, str);
}
- ~bug_message() { if (msg) snapshot_free(msg); }
+ ~bug_message() { if (msg) snapshot_free(msg);}
char *msg;
void print() { model_print("%s", msg); }
SNAPSHOTALLOC
};
-#endif /* __BUGMESSAGE_H__ */
+#endif/* __BUGMESSAGE_H__ */
bool changed = false;
if (cv->num_threads > num_threads) {
clock = (modelclock_t *)snapshot_realloc(clock, cv->num_threads * sizeof(modelclock_t));
- for (int i = num_threads; i < cv->num_threads; i++)
+ for (int i = num_threads;i < cv->num_threads;i++)
clock[i] = 0;
num_threads = cv->num_threads;
}
/* Element-wise maximum */
- for (int i = 0; i < cv->num_threads; i++)
+ for (int i = 0;i < cv->num_threads;i++)
if (cv->clock[i] > clock[i]) {
clock[i] = cv->clock[i];
changed = true;
}
-
+
return changed;
}
{
int i;
model_print("(");
- for (i = 0; i < num_threads; i++)
+ for (i = 0;i < num_threads;i++)
model_print("%2u%s", clock[i], (i == num_threads - 1) ? ")\n" : ", ");
}
int num_threads;
};
-#endif /* __CLOCKVECTOR_H__ */
+#endif/* __CLOCKVECTOR_H__ */
* of the RMW action w/o a write.
*/
uint64_t model_rmwrcas_action(void *obj, memory_order ord, uint64_t oldval, int size) {
- return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, ord, obj, oldval, size));
+ return model->switch_to_master(new ModelAction(ATOMIC_RMWRCAS, ord, obj, oldval, size));
}
/** Performs the write part of a RMW action. */
}
// cds atomic loads
-uint8_t cds_atomic_load8(void * obj, int atomic_index) {
+uint8_t cds_atomic_load8(void * obj, int atomic_index) {
return (uint8_t) ( model->switch_to_master(new ModelAction(ATOMIC_READ, orders[atomic_index], obj)) );
}
uint16_t cds_atomic_load16(void * obj, int atomic_index) {
// cds atomic stores
void cds_atomic_store8(void * obj, int atomic_index, uint8_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store16(void * obj, int atomic_index, uint16_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store32(void * obj, int atomic_index, uint32_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, (uint64_t) val));
}
void cds_atomic_store64(void * obj, int atomic_index, uint64_t val) {
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, orders[atomic_index], obj, val));
}
/*
-#define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
-({ \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- uint##size##_t _copy = _old; \
- _copy __op__ ( uint##size##_t ) _val; \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
- return _old; \
-})*/
+ #define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
+ ({ \
+ uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
+ uint##size##_t _copy = _old; \
+ _copy __op__ ( uint##size##_t ) _val; \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
+ return _old; \
+ })*/
#define _ATOMIC_RMW_(__op__, size, addr, atomic_index, val ) \
-({ \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- uint##size##_t _copy = _old; \
- uint##size##_t _val = val; \
- _copy __op__ _val; \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
- return _old; \
-})
+ ({ \
+ uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
+ uint ## size ## _t _copy = _old; \
+ uint ## size ## _t _val = val; \
+ _copy __op__ _val; \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _copy); \
+ return _old; \
+ })
// cds atomic exchange
uint8_t cds_atomic_exchange8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( = , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_exchange16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( = , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_exchange32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( = , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_exchange64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( = , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( =, 64, addr, atomic_index, val);
}
// cds atomic fetch add
uint8_t cds_atomic_fetch_add8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( += , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_add16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( += , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_add32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( += , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_add64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( += , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( +=, 64, addr, atomic_index, val);
}
// cds atomic fetch sub
uint8_t cds_atomic_fetch_sub8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( -= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_sub16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( -= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_sub32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( -= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_sub64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( -= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( -=, 64, addr, atomic_index, val);
}
// cds atomic fetch and
uint8_t cds_atomic_fetch_and8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( &= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_and16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( &= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_and32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( &= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_and64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( &= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( &=, 64, addr, atomic_index, val);
}
// cds atomic fetch or
uint8_t cds_atomic_fetch_or8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( |= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_or16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( |= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_or32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( |= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_or64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( |= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( |=, 64, addr, atomic_index, val);
}
// cds atomic fetch xor
uint8_t cds_atomic_fetch_xor8(void* addr, int atomic_index, uint8_t val) {
- _ATOMIC_RMW_( ^= , 8, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 8, addr, atomic_index, val);
}
uint16_t cds_atomic_fetch_xor16(void* addr, int atomic_index, uint16_t val) {
- _ATOMIC_RMW_( ^= , 16, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 16, addr, atomic_index, val);
}
uint32_t cds_atomic_fetch_xor32(void* addr, int atomic_index, uint32_t val) {
- _ATOMIC_RMW_( ^= , 32, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 32, addr, atomic_index, val);
}
uint64_t cds_atomic_fetch_xor64(void* addr, int atomic_index, uint64_t val) {
- _ATOMIC_RMW_( ^= , 64, addr, atomic_index, val);
+ _ATOMIC_RMW_( ^=, 64, addr, atomic_index, val);
}
// cds atomic compare and exchange
-// In order to accomodate the LLVM PASS, the return values are not true or false.
+// In order to accomodate the LLVM PASS, the return values are not true or false.
#define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
#define _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index) \
-({ \
- uint##size##_t _desired = desired; \
- uint##size##_t _expected = expected; \
- uint##size##_t _old = model_rmwr_action_helper(addr, atomic_index); \
- if (_old == _expected ) { \
- model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
- else { \
- model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
-})
+ ({ \
+ uint ## size ## _t _desired = desired; \
+ uint ## size ## _t _expected = expected; \
+ uint ## size ## _t _old = model_rmwr_action_helper(addr, atomic_index); \
+ if (_old == _expected ) { \
+ model_rmw_action_helper(addr, atomic_index, (uint64_t) _desired ); return _expected; } \
+ else { \
+ model_rmwc_action_helper(addr, atomic_index); _expected = _old; return _old; } \
+ })
// expected is supposed to be a pointer to an address, but the CmpOperand
-// extracted from LLVM IR is an integer type.
+// extracted from LLVM IR is an integer type.
-uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
- uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
+uint8_t cds_atomic_compare_exchange8(void* addr, uint8_t expected,
+ uint8_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(8, addr, expected, desired, atomic_index_succ );
}
uint16_t cds_atomic_compare_exchange16(void* addr, uint16_t expected,
- uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
+ uint16_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(16, addr, expected, desired, atomic_index_succ );
}
-uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
- uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
+uint32_t cds_atomic_compare_exchange32(void* addr, uint32_t expected,
+ uint32_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(32, addr, expected, desired, atomic_index_succ );
}
-uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
- uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
- _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
+uint64_t cds_atomic_compare_exchange64(void* addr, uint64_t expected,
+ uint64_t desired, int atomic_index_succ, int atomic_index_fail ) {
+ _ATOMIC_CMPSWP_(64, addr, expected, desired, atomic_index_succ );
}
// cds atomic thread fence
}
/*
-#define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
+ #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__(__e__) __q__ = (__e__); \
__typeof__(__m__) __v__ = (__m__); \
else { model_rmwc_action((void *)__p__, __x__); *__q__ = __t__; __r__ = false;} \
__r__; })
-#define _ATOMIC_FENCE_( __x__ ) \
+ #define _ATOMIC_FENCE_( __x__ ) \
({ model_fence_action(__x__);})
-*/
+ */
/*
-#define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
+ #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ ) \
({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__); \
__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
__typeof__(__m__) __v__ = (__m__); \
__copy__ __o__ __v__; \
model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__); \
__old__ = __old__; Silence clang (-Wunused-value) \
- })
-*/
+ })
+ */
model_print("\nDumping stack trace (%d frames):\n", size);
- for (i = 0; i < size; i++)
+ for (i = 0;i < size;i++)
model_print("\t%s\n", strings[i]);
free(strings);
-#endif /* CONFIG_STACKTRACE */
+#endif/* CONFIG_STACKTRACE */
}
void assert_hook(void)
if (!expr) {
char msg[100];
sprintf(msg, "Program has hit assertion in file %s at line %d\n",
- file, line);
+ file, line);
model->assert_user_bug(msg);
}
}
#ifndef CONFIG_DEBUG
-static int fd_user_out; /**< @brief File descriptor from which to read user program output */
+static int fd_user_out; /**< @brief File descriptor from which to read user program output */
/**
* @brief Setup output redirecting
{
fflush(stdout);
char buf[200];
- while (read_to_buf(fd_user_out, buf, sizeof(buf)));
+ while (read_to_buf(fd_user_out, buf, sizeof(buf))) ;
}
/** @brief Print out any pending program output */
model_print("---- END PROGRAM OUTPUT ----\n");
}
-#endif /* ! CONFIG_DEBUG */
+#endif/* ! CONFIG_DEBUG */
extern int model_out;
extern int switch_alloc;
-#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ##__VA_ARGS__); switch_alloc = 0; } while (0)
+#define model_print(fmt, ...) do { switch_alloc = 1; dprintf(model_out, fmt, ## __VA_ARGS__); switch_alloc = 0; } while (0)
#ifdef CONFIG_DEBUG
-#define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__); } while (0)
+#define DEBUG(fmt, ...) do { model_print("*** %15s:%-4d %25s() *** " fmt, __FILE__, __LINE__, __func__, ## __VA_ARGS__); } while (0)
#define DBG() DEBUG("\n")
#define DBG_ENABLED() (1)
#else
#ifdef CONFIG_ASSERT
#define ASSERT(expr) \
-do { \
- if (!(expr)) { \
- fprintf(stderr, "Error: assertion failed in %s at line %d\n", __FILE__, __LINE__); \
- /* print_trace(); // Trace printing may cause dynamic memory allocation */ \
- assert_hook(); \
- exit(EXIT_FAILURE); \
- } \
-} while (0)
+ do { \
+ if (!(expr)) { \
+ fprintf(stderr, "Error: assertion failed in %s at line %d\n", __FILE__, __LINE__); \
+ /* print_trace(); // Trace printing may cause dynamic memory allocation */ \
+ assert_hook(); \
+ exit(EXIT_FAILURE); \
+ } \
+ } while (0)
#else
#define ASSERT(expr) \
do { } while (0)
-#endif /* CONFIG_ASSERT */
+#endif/* CONFIG_ASSERT */
#define error_msg(...) fprintf(stderr, "Error: " __VA_ARGS__)
void print_trace(void);
-#endif /* __COMMON_H__ */
+#endif/* __COMMON_H__ */
namespace cdsc {
condition_variable::condition_variable() {
-
+
}
condition_variable::~condition_variable() {
-
+
}
void condition_variable::notify_one() {
/** Turn on debugging. */
/* #ifndef CONFIG_DEBUG
- #define CONFIG_DEBUG
- #endif
+ #define CONFIG_DEBUG
+ #endif
- #ifndef CONFIG_ASSERT
- #define CONFIG_ASSERT
- #endif
-*/
+ #ifndef CONFIG_ASSERT
+ #define CONFIG_ASSERT
+ #endif
+ */
/** Turn on support for dumping cyclegraphs as dot files at each
* printed summary.*/
#else
#define BIT48 0
#endif
-#endif /* BIT48 */
+#endif/* BIT48 */
/** Snapshotting configurables */
-/**
+/**
* If USE_MPROTECT_SNAPSHOT=2, then snapshot by tuned mmap() algorithm
* If USE_MPROTECT_SNAPSHOT=1, then snapshot by using mmap() and mprotect()
* If USE_MPROTECT_SNAPSHOT=0, then snapshot by using fork() */
return 0;
}
-#endif /* MAC */
+#endif/* MAC */
int model_swapcontext(ucontext_t *oucp, ucontext_t *ucp);
-#else /* !MAC */
+#else /* !MAC */
static inline int model_swapcontext(ucontext_t *oucp, ucontext_t *ucp)
-{
+{
return swapcontext(oucp, ucp);
}
-#endif /* !MAC */
+#endif/* !MAC */
-#endif /* __CONTEXT_H__ */
+#endif/* __CONTEXT_H__ */
if (!hasCycles)
hasCycles = checkReachable(tonode, fromnode);
} else
- return false; /* No new edge */
+ return false; /* No new edge */
/*
* If the fromnode has a rmwnode that is not the tonode, we should
* (2) the fromnode is the new node and therefore it should not
* have any outgoing edges.
*/
- for (unsigned int i = 0; i < fromnode->getNumEdges(); i++) {
+ for (unsigned int i = 0;i < fromnode->getNumEdges();i++) {
CycleNode *tonode = fromnode->getEdge(i);
if (tonode != rmwnode) {
if (rmwnode->addEdge(tonode))
static void print_node(FILE *file, const CycleNode *node, int label)
{
- const ModelAction *act = node->getAction();
- modelclock_t idx = act->get_seq_number();
- fprintf(file, "N%u", idx);
- if (label)
- fprintf(file, " [label=\"N%u, T%u\"]", idx, act->get_tid());
+ const ModelAction *act = node->getAction();
+ modelclock_t idx = act->get_seq_number();
+ fprintf(file, "N%u", idx);
+ if (label)
+ fprintf(file, " [label=\"N%u, T%u\"]", idx, act->get_tid());
}
static void print_edge(FILE *file, const CycleNode *from, const CycleNode *to, const char *prop)
void CycleGraph::dumpNodes(FILE *file) const
{
- for (unsigned int i = 0; i < nodeList.size(); i++) {
+ for (unsigned int i = 0;i < nodeList.size();i++) {
CycleNode *n = nodeList[i];
print_node(file, n, 1);
fprintf(file, ";\n");
if (n->getRMW())
print_edge(file, n, n->getRMW(), "style=dotted");
- for (unsigned int j = 0; j < n->getNumEdges(); j++)
+ for (unsigned int j = 0;j < n->getNumEdges();j++)
print_edge(file, n, n->getEdge(j), NULL);
}
}
queue->pop_back();
if (node == to)
return true;
- for (unsigned int i = 0; i < node->getNumEdges(); i++) {
+ for (unsigned int i = 0;i < node->getNumEdges();i++) {
CycleNode *next = node->getEdge(i);
if (!discovered->contains(next)) {
discovered->put(next, next);
}
/* Instantiate four forms of CycleGraph::checkReachable */
template bool CycleGraph::checkReachable(const ModelAction *from,
- const ModelAction *to) const;
+ const ModelAction *to) const;
/** @brief Begin a new sequence of graph additions which can be rolled back */
void CycleGraph::startChanges()
/** Rollback changes to the previous commit. */
void CycleGraph::rollbackChanges()
{
- for (unsigned int i = 0; i < rollbackvector.size(); i++)
+ for (unsigned int i = 0;i < rollbackvector.size();i++)
rollbackvector[i]->removeEdge();
- for (unsigned int i = 0; i < rmwrollbackvector.size(); i++)
+ for (unsigned int i = 0;i < rmwrollbackvector.size();i++)
rmwrollbackvector[i]->clearRMW();
hasCycles = oldCycles;
template <typename T>
static bool vector_remove_node(SnapVector<T>& v, const T n)
{
- for (unsigned int i = 0; i < v.size(); i++) {
+ for (unsigned int i = 0;i < v.size();i++) {
if (v[i] == n) {
v.erase(v.begin() + i);
return true;
*/
bool CycleNode::addEdge(CycleNode *node)
{
- for (unsigned int i = 0; i < edges.size(); i++)
+ for (unsigned int i = 0;i < edges.size();i++)
if (edges[i] == node)
return false;
edges.push_back(node);
/** @brief A graph of Model Actions for tracking cycles. */
class CycleGraph {
- public:
+public:
CycleGraph();
~CycleGraph();
CycleNode * getNode_noCreate(const ModelAction *act) const;
SNAPSHOTALLOC
- private:
+private:
bool addNodeEdge(CycleNode *fromnode, CycleNode *tonode);
void putNode(const ModelAction *act, CycleNode *node);
CycleNode * getNode(const ModelAction *act);
* @brief A node within a CycleGraph; corresponds either to one ModelAction
*/
class CycleNode {
- public:
+public:
CycleNode(const ModelAction *act);
bool addEdge(CycleNode *node);
CycleNode * getEdge(unsigned int i) const;
const ModelAction * getAction() const { return action; }
SNAPSHOTALLOC
- private:
+private:
/** @brief The ModelAction that this node represents */
const ModelAction *action;
CycleNode *hasRMW;
};
-#endif /* __CYCLEGRAPH_H__ */
+#endif/* __CYCLEGRAPH_H__ */
* @return true if the current clock allows a race with the event at clock2/tid2
*/
static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
- modelclock_t clock2, thread_id_t tid2)
+ modelclock_t clock2, thread_id_t tid2)
{
return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
}
if (get_execution()->isfeasibleprefix()) {
bool race_asserted = false;
/* Prune the non-racing unrealized dataraces */
- for (unsigned i = 0; i < unrealizedraces->size(); i++) {
+ for (unsigned i = 0;i < unrealizedraces->size();i++) {
struct DataRace *race = (*unrealizedraces)[i];
if (clock_may_race(race->newaction->get_cv(), race->newaction->get_tid(), race->oldclock, race->oldthread)) {
assert_race(race);
void assert_race(struct DataRace *race)
{
model->assert_bug(
- "Data race detected @ address %p:\n"
- " Access 1: %5s in thread %2d @ clock %3u\n"
- " Access 2: %5s in thread %2d @ clock %3u",
- race->address,
- race->isoldwrite ? "write" : "read",
- id_to_int(race->oldthread),
- race->oldclock,
- race->isnewwrite ? "write" : "read",
- id_to_int(race->newaction->get_tid()),
- race->newaction->get_seq_number()
+ "Data race detected @ address %p:\n"
+ " Access 1: %5s in thread %2d @ clock %3u\n"
+ " Access 2: %5s in thread %2d @ clock %3u",
+ race->address,
+ race->isoldwrite ? "write" : "read",
+ id_to_int(race->oldthread),
+ race->oldclock,
+ race->isnewwrite ? "write" : "read",
+ id_to_int(race->newaction->get_tid()),
+ race->newaction->get_seq_number()
);
}
/* Check for datarace against last read. */
- for (int i = 0; i < record->numReads; i++) {
+ for (int i = 0;i < record->numReads;i++) {
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
/* Note that readClock can't actuall be zero here, so it could be
- optimized. */
+ optimized. */
if (clock_may_race(currClock, thread, readClock, readThread)) {
/* We have a datarace */
int copytoindex = 0;
- for (int i = 0; i < record->numReads; i++) {
+ for (int i = 0;i < record->numReads;i++) {
modelclock_t readClock = record->readClock[i];
thread_id_t readThread = record->thread[i];
/* Note that is not really a datarace check as reads cannott
- actually race. It is just determining that this read subsumes
- another in the sense that either this read races or neither
- read races. Note that readClock can't actually be zero, so it
- could be optimized. */
+ actually race. It is just determining that this read subsumes
+ another in the sense that either this read races or neither
+ read races. Note that readClock can't actually be zero, so it
+ could be optimized. */
if (clock_may_race(currClock, thread, readClock, readThread)) {
/* Still need this read in vector */
struct DataRace {
/* Clock and thread associated with first action. This won't change in
- response to synchronization. */
+ response to synchronization. */
thread_id_t oldthread;
modelclock_t oldclock;
bool isoldwrite;
/* Model action associated with second action. This could change as
- a result of synchronization. */
+ a result of synchronization. */
ModelAction *newaction;
/* Record whether this is a write, so we can tell the user. */
bool isnewwrite;
#define MAXREADVECTOR (READMASK-1)
#define MAXWRITEVECTOR (WRITEMASK-1)
-#endif /* __DATARACE_H__ */
+#endif/* __DATARACE_H__ */
#include "bugmessage.h"
#include "fuzzer.h"
-#define INITIAL_THREAD_ID 0
+#define INITIAL_THREAD_ID 0
/**
* Structure for holding small ModelChecker members that should be snapshotted
{ }
~model_snapshot_members() {
- for (unsigned int i = 0; i < bugs.size(); i++)
+ for (unsigned int i = 0;i < bugs.size();i++)
delete bugs[i];
bugs.clear();
}
/** @brief Constructor */
ModelExecution::ModelExecution(ModelChecker *m,
- Scheduler *scheduler,
- NodeStack *node_stack) :
+ Scheduler *scheduler,
+ NodeStack *node_stack) :
model(m),
params(NULL),
scheduler(scheduler),
action_trace(),
- thread_map(2), /* We'll always need at least 2 threads */
+ thread_map(2), /* We'll always need at least 2 threads */
pthread_map(0),
pthread_counter(0),
obj_map(),
thrd_last_action(1),
thrd_last_fence_release(),
node_stack(node_stack),
- priv(new struct model_snapshot_members()),
- mo_graph(new CycleGraph()),
+ priv(new struct model_snapshot_members ()),
+ mo_graph(new CycleGraph()),
fuzzer(new Fuzzer())
{
/* Initialize a model-checker thread, for special ModelActions */
/** @brief Destructor */
ModelExecution::~ModelExecution()
{
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
delete get_thread(int_to_id(i));
delete mo_graph;
void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
{
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *thr = get_thread(int_to_id(i));
if (scheduler->is_sleep_set(thr)) {
if (should_wake_up(curr, thr))
bool ModelExecution::is_deadlocked() const
{
bool blocking_threads = false;
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
if (is_enabled(tid))
return false;
*/
bool ModelExecution::is_complete_execution() const
{
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
if (is_enabled(int_to_id(i)))
return false;
return true;
*/
bool ModelExecution::process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set)
{
- while(true) {
-
- int index = fuzzer->selectWrite(curr, rf_set);
- const ModelAction *rf = (*rf_set)[index];
-
-
- ASSERT(rf);
-
- mo_graph->startChanges();
- bool updated = r_modification_order(curr, rf);
- if (!is_infeasible()) {
- read_from(curr, rf);
- mo_graph->commitChanges();
- get_thread(curr)->set_return_value(curr->get_return_value());
- return updated;
- }
- mo_graph->rollbackChanges();
- (*rf_set)[index] = rf_set->back();
- rf_set->pop_back();
- }
+ while(true) {
+
+ int index = fuzzer->selectWrite(curr, rf_set);
+ const ModelAction *rf = (*rf_set)[index];
+
+
+ ASSERT(rf);
+
+ mo_graph->startChanges();
+ bool updated = r_modification_order(curr, rf);
+ if (!is_infeasible()) {
+ read_from(curr, rf);
+ mo_graph->commitChanges();
+ get_thread(curr)->set_return_value(curr->get_return_value());
+ return updated;
+ }
+ mo_graph->rollbackChanges();
+ (*rf_set)[index] = rf_set->back();
+ rf_set->pop_back();
+ }
}
/**
}
get_thread(curr)->set_return_value(1);
}
- //otherwise fall into the lock case
+ //otherwise fall into the lock case
case ATOMIC_LOCK: {
if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
assert_bug("Lock access before initialization");
case ATOMIC_WAIT:
case ATOMIC_UNLOCK: {
/* wake up the other threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *t = get_thread(int_to_id(i));
Thread *curr_thrd = get_thread(curr);
if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
state->locked = NULL;
if (!curr->is_wait())
- break; /* The rest is only for ATOMIC_WAIT */
+ break; /* The rest is only for ATOMIC_WAIT */
break;
}
case ATOMIC_NOTIFY_ALL: {
action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
//activate all the waiting threads
- for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
+ for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
scheduler->wake(get_thread(*rit));
}
waiters->clear();
action_list_t *list = &action_trace;
action_list_t::reverse_iterator rit;
/* Find X : is_read(X) && X --sb-> curr */
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr)
continue;
/* Establish hypothetical release sequences */
rel_heads_list_t release_heads;
get_release_seq_heads(curr, act, &release_heads);
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
synchronize(release_heads[i], curr);
if (release_heads.size() != 0)
updated = true;
break;
}
case PTHREAD_CREATE: {
- (*(uint32_t *)curr->get_location()) = pthread_counter++;
+ (*(uint32_t *)curr->get_location()) = pthread_counter++;
struct pthread_params *params = (struct pthread_params *)curr->get_value();
Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
add_thread(th);
th->set_creation(curr);
- if ( pthread_map.size() < pthread_counter )
- pthread_map.resize( pthread_counter );
+ if ( pthread_map.size() < pthread_counter )
+ pthread_map.resize( pthread_counter );
pthread_map[ pthread_counter-1 ] = th;
break;
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case PTHREAD_JOIN: {
Thread *blocking = curr->get_thread_operand();
ModelAction *act = get_last_action(blocking->get_id());
synchronize(act, curr);
- updated = true; /* trigger rel-seq checks */
- break; // WL: to be add (modified)
+ updated = true; /* trigger rel-seq checks */
+ break; // WL: to be add (modified)
}
case THREAD_FINISH: {
Thread *th = get_thread(curr);
/* Wake up any joining threads */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *waiting = get_thread(int_to_id(i));
if (waiting->waiting_on() == th &&
waiting->get_pending()->is_thread_join())
scheduler->wake(waiting);
}
th->complete();
- updated = true; /* trigger rel-seq checks */
+ updated = true; /* trigger rel-seq checks */
break;
}
case THREAD_START: {
newcurr->create_cv(get_parent_action(newcurr->get_tid()));
*curr = newcurr;
- return false; /* Action was explored previously */
+ return false; /* Action was explored previously */
} else {
newcurr = *curr;
/* Assign most recent release fence */
newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
- return true; /* This was a new ModelAction */
+ return true; /* This was a new ModelAction */
}
}
rel_heads_list_t release_heads;
get_release_seq_heads(act, act, &release_heads);
int num_heads = release_heads.size();
- for (unsigned int i = 0; i < release_heads.size(); i++)
+ for (unsigned int i = 0;i < release_heads.size();i++)
if (!synchronize(release_heads[i], act))
num_heads--;
return num_heads > 0;
ModelVector<ModelAction *> * rf_set = NULL;
/* Build may_read_from set for newly-created actions */
if (newly_explored && curr->is_read())
- rf_set = build_may_read_from(curr);
+ rf_set = build_may_read_from(curr);
process_thread_action(curr);
-
+
if (curr->is_read() && !second_part_of_rmw) {
- process_read(curr, rf_set);
- delete rf_set;
+ process_read(curr, rf_set);
+ delete rf_set;
} else {
- ASSERT(rf_set == NULL);
+ ASSERT(rf_set == NULL);
}
-
+
if (curr->is_write())
- process_write(curr);
-
+ process_write(curr);
+
if (curr->is_fence())
- process_fence(curr);
-
+ process_fence(curr);
+
if (curr->is_mutex_op())
- process_mutex(curr);
+ process_mutex(curr);
return curr;
}
*/
bool ModelExecution::isfeasibleprefix() const
{
- return !is_infeasible();
+ return !is_infeasible();
}
/**
bool ModelExecution::is_infeasible() const
{
return mo_graph->checkForCycles() ||
- priv->bad_synchronization ||
- priv->bad_sc_read;
+ priv->bad_synchronization ||
+ priv->bad_sc_read;
}
/** Close out a RMWR by converting previous RMWR into a RMW or READ. */
ModelAction *lastread = get_last_action(act->get_tid());
lastread->process_rmw(act);
if (act->is_rmw()) {
- mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
+ mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
mo_graph->commitChanges();
}
return lastread;
last_sc_write = get_last_seq_cst_write(curr);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i */
ModelAction *last_sc_fence_thread_local = NULL;
if (int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Skip curr */
}
/* C++, Section 29.3 statement 4 */
else if (act->is_seqcst() && last_sc_fence_local &&
- *act < *last_sc_fence_local) {
+ *act < *last_sc_fence_local) {
added = mo_graph->addEdge(act, rf) || added;
break;
}
/* C++, Section 29.3 statement 6 */
else if (last_sc_fence_thread_before &&
- *act < *last_sc_fence_thread_before) {
+ *act < *last_sc_fence_thread_before) {
added = mo_graph->addEdge(act, rf) || added;
break;
}
} else {
const ModelAction *prevrf = act->get_reads_from();
if (!prevrf->equals(rf))
- added = mo_graph->addEdge(prevrf, rf) || added;
+ added = mo_graph->addEdge(prevrf, rf) || added;
}
break;
}
if (curr->is_seqcst()) {
/* We have to at least see the last sequentially consistent write,
- so we are initialized. */
+ so we are initialized. */
ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
if (last_seq_cst != NULL) {
added = mo_graph->addEdge(last_seq_cst, curr) || added;
ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Last SC fence in thread i, before last SC fence in current thread */
ModelAction *last_sc_fence_thread_before = NULL;
if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
if (act == curr) {
/*
added = mo_graph->addEdge(act, curr) || added;
else if (act->is_read()) {
//if previous read accessed a null, just keep going
- added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
+ added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
}
break;
} else if (act->is_read() && !act->could_synchronize_with(curr) &&
- !act->same_thread(curr)) {
+ !act->same_thread(curr)) {
/* We have an action that:
(1) did not happen before us
(2) is a read and we are a write
SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
unsigned int i;
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
const ModelAction *write_after_read = NULL;
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Don't disallow due to act == reader */
* false otherwise
*/
bool ModelExecution::release_seq_heads(const ModelAction *rf,
- rel_heads_list_t *release_heads) const
+ rel_heads_list_t *release_heads) const
{
/* Only check for release sequences if there are no cycles */
if (mo_graph->checkForCycles())
return false;
- for ( ; rf != NULL; rf = rf->get_reads_from()) {
+ for ( ;rf != NULL;rf = rf->get_reads_from()) {
ASSERT(rf->is_write());
if (rf->is_release())
else if (rf->get_last_fence_release())
release_heads->push_back(rf->get_last_fence_release());
if (!rf->is_rmw())
- break; /* End of RMW chain */
+ break; /* End of RMW chain */
/** @todo Need to be smarter here... In the linux lock
* example, this will run to the beginning of the program for
/* acq_rel RMW is a sufficient stopping condition */
if (rf->is_acquire() && rf->is_release())
- return true; /* complete */
+ return true; /* complete */
};
- ASSERT(rf); // Needs to be real write
+ ASSERT(rf); // Needs to be real write
if (rf->is_release())
- return true; /* complete */
+ return true; /* complete */
/* else relaxed write
* - check for fence-release in the same thread (29.8, stmt. 3)
if (fence_release)
release_heads->push_back(fence_release);
- return true; /* complete */
+ return true; /* complete */
}
/**
* @see ModelExecution::release_seq_heads
*/
void ModelExecution::get_release_seq_heads(ModelAction *acquire,
- ModelAction *read, rel_heads_list_t *release_heads)
+ ModelAction *read, rel_heads_list_t *release_heads)
{
const ModelAction *rf = read->get_reads_from();
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); (*rit) != curr; rit++)
+ for (rit = list->rbegin();(*rit) != curr;rit++)
;
- rit++; /* Skip past curr */
- for ( ; rit != list->rend(); rit++)
+ rit++; /* Skip past curr */
+ for ( ;rit != list->rend();rit++)
if ((*rit)->is_write() && (*rit)->is_seqcst())
return *rit;
return NULL;
action_list_t::reverse_iterator rit = list->rbegin();
if (before_fence) {
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if (*rit == before_fence)
break;
rit++;
}
- for (; rit != list->rend(); rit++)
+ for (;rit != list->rend();rit++)
if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
return *rit;
return NULL;
action_list_t *list = obj_map.get(location);
/* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++)
+ for (rit = list->rbegin();rit != list->rend();rit++)
if ((*rit)->is_unlock() || (*rit)->is_wait())
return *rit;
return NULL;
}
bool valequals(uint64_t val1, uint64_t val2, int size) {
- switch(size) {
- case 1:
- return ((uint8_t)val1) == ((uint8_t)val2);
- case 2:
- return ((uint16_t)val1) == ((uint16_t)val2);
- case 4:
- return ((uint32_t)val1) == ((uint32_t)val2);
- case 8:
- return val1==val2;
- default:
- ASSERT(0);
- return false;
- }
+ switch(size) {
+ case 1:
+ return ((uint8_t)val1) == ((uint8_t)val2);
+ case 2:
+ return ((uint16_t)val1) == ((uint16_t)val2);
+ case 4:
+ return ((uint32_t)val1) == ((uint32_t)val2);
+ case 8:
+ return val1==val2;
+ default:
+ ASSERT(0);
+ return false;
+ }
}
/**
last_sc_write = get_last_seq_cst_write(curr);
ModelVector<ModelAction *> * rf_set = new ModelVector<ModelAction *>();
-
+
/* Iterate over all threads */
- for (i = 0; i < thrd_lists->size(); i++) {
+ for (i = 0;i < thrd_lists->size();i++) {
/* Iterate over actions in thread, starting from most recent */
action_list_t *list = &(*thrd_lists)[i];
action_list_t::reverse_iterator rit;
- for (rit = list->rbegin(); rit != list->rend(); rit++) {
+ for (rit = list->rbegin();rit != list->rend();rit++) {
ModelAction *act = *rit;
/* Only consider 'write' actions */
/* Need to check whether we will have two RMW reading from the same value */
if (curr->is_rmwr()) {
- /* It is okay if we have a failing CAS */
- if (!curr->is_rmwrcas() ||
- valequals(curr->get_value(), act->get_value(), curr->getSize())) {
- //Need to make sure we aren't the second RMW
- CycleNode * node = mo_graph->getNode_noCreate(act);
- if (node != NULL && node->getRMW() != NULL) {
- //we are the second RMW
- allow_read = false;
+ /* It is okay if we have a failing CAS */
+ if (!curr->is_rmwrcas() ||
+ valequals(curr->get_value(), act->get_value(), curr->getSize())) {
+ //Need to make sure we aren't the second RMW
+ CycleNode * node = mo_graph->getNode_noCreate(act);
+ if (node != NULL && node->getRMW() != NULL) {
+ //we are the second RMW
+ allow_read = false;
+ }
}
- }
}
-
+
if (allow_read) {
/* Only add feasible reads */
mo_graph->startChanges();
r_modification_order(curr, act);
if (!is_infeasible())
- rf_set->push_back(act);
+ rf_set->push_back(act);
mo_graph->rollbackChanges();
}
unsigned int hash = 0;
- for (it = list->begin(); it != list->end(); it++) {
+ for (it = list->begin();it != list->end();it++) {
const ModelAction *act = *it;
if (act->get_seq_number() > 0)
act->print();
mo_graph->dumpNodes(file);
ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
- for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
+ for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
ModelAction *act = *it;
if (act->is_read()) {
mo_graph->dot_print_node(file, act);
mo_graph->dot_print_edge(file,
- act->get_reads_from(),
- act,
- "label=\"rf\", color=red, weight=2");
+ act->get_reads_from(),
+ act,
+ "label=\"rf\", color=red, weight=2");
}
if (thread_array[act->get_tid()]) {
mo_graph->dot_print_edge(file,
- thread_array[id_to_int(act->get_tid())],
- act,
- "label=\"sb\", color=blue, weight=400");
+ thread_array[id_to_int(act->get_tid())],
+ act,
+ "label=\"sb\", color=blue, weight=400");
}
thread_array[act->get_tid()] = act;
* @return A Thread reference
*/
Thread * ModelExecution::get_pthread(pthread_t pid) {
- union {
- pthread_t p;
- uint32_t v;
- } x;
- x.p = pid;
- uint32_t thread_id = x.v;
- if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
- else return NULL;
+ union {
+ pthread_t p;
+ uint32_t v;
+ } x;
+ x.p = pid;
+ uint32_t thread_id = x.v;
+ if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
+ else return NULL;
}
/**
if (curr->is_rmwr())
return get_thread(curr);
if (curr->is_write()) {
- std::memory_order order = curr->get_mo();
+ std::memory_order order = curr->get_mo();
switch(order) {
- case std::memory_order_relaxed:
- return get_thread(curr);
- case std::memory_order_release:
- return get_thread(curr);
- default:
- return NULL;
- }
+ case std::memory_order_relaxed:
+ return get_thread(curr);
+ case std::memory_order_release:
+ return get_thread(curr);
+ default:
+ return NULL;
+ }
}
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
if (curr->get_type() == THREAD_CREATE)
- return curr->get_thread_operand();
+ return curr->get_thread_operand();
if (curr->get_type() == PTHREAD_CREATE) {
return curr->get_thread_operand();
}
Thread *curr_thrd = get_thread(curr);
ASSERT(curr_thrd->get_state() == THREAD_READY);
- ASSERT(check_action_enabled(curr)); /* May have side effects? */
+ ASSERT(check_action_enabled(curr)); /* May have side effects? */
curr = check_current_action(curr);
ASSERT(curr);
}
Fuzzer * ModelExecution::getFuzzer() {
- return fuzzer;
+ return fuzzer;
}
class ModelExecution {
public:
ModelExecution(ModelChecker *m,
- Scheduler *scheduler,
- NodeStack *node_stack);
+ Scheduler *scheduler,
+ NodeStack *node_stack);
~ModelExecution();
struct model_params * get_params() const { return params; }
void setParams(struct model_params * _params) {params = _params;}
-
+
Thread * take_step(ModelAction *curr);
void print_summary() const;
bool is_deadlocked() const;
action_list_t * get_action_trace() { return &action_trace; }
- Fuzzer * getFuzzer();
+ Fuzzer * getFuzzer();
CycleGraph * const get_mo_graph() { return mo_graph; }
- HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
- HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
-
+ HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
+ HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
+
SNAPSHOTALLOC
private:
int get_execution_number() const;
ModelChecker *model;
- struct model_params * params;
+ struct model_params * params;
/** The scheduler to use: tracks the running/ready Threads */
Scheduler * const scheduler;
bool next_execution();
ModelAction * check_current_action(ModelAction *curr);
bool initialize_curr_action(ModelAction **curr);
- bool process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set);
+ bool process_read(ModelAction *curr, ModelVector<ModelAction *> * rf_set);
bool process_write(ModelAction *curr);
bool process_fence(ModelAction *curr);
bool process_mutex(ModelAction *curr);
ModelAction * get_last_seq_cst_write(ModelAction *curr) const;
ModelAction * get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const;
ModelAction * get_last_unlock(ModelAction *curr) const;
- ModelVector<ModelAction *> * build_may_read_from(ModelAction *curr);
+ ModelVector<ModelAction *> * build_may_read_from(ModelAction *curr);
ModelAction * process_rmw(ModelAction *curr);
template <typename rf_type>
bool w_modification_order(ModelAction *curr);
void get_release_seq_heads(ModelAction *acquire, ModelAction *read, rel_heads_list_t *release_heads);
- bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const;
+ bool release_seq_heads(const ModelAction *rf, rel_heads_list_t *release_heads) const;
ModelAction * get_uninitialized_action(const ModelAction *curr) const;
action_list_t action_trace;
SnapVector<Thread *> thread_map;
SnapVector<Thread *> pthread_map;
- uint32_t pthread_counter;
+ uint32_t pthread_counter;
/** Per-object list of actions. Maps an object (i.e., memory location)
* to a trace of all actions performed on the object. */
HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_thrd_map;
- HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
+ HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> cond_map;
/**
*/
CycleGraph * const mo_graph;
- Fuzzer * fuzzer;
-
+ Fuzzer * fuzzer;
+
Thread * action_select_next_thread(const ModelAction *curr) const;
};
-#endif /* __EXECUTION_H__ */
+#endif/* __EXECUTION_H__ */
// Copyright (C) 2015-2019 Free Software Foundation, Inc.
//
-// This is a reimplementation of libstdc++-v3/src/c++11/futex.cc.
+// This is a reimplementation of libstdc++-v3/src/c++11/futex.cc.
#include <bits/atomic_futex.h>
#ifdef _GLIBCXX_HAS_GTHREADS
namespace std _GLIBCXX_VISIBILITY(default)
{
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
+ _GLIBCXX_BEGIN_NAMESPACE_VERSION
bool
__atomic_futex_unsigned_base::_M_futex_wait_until(unsigned *__addr,
- unsigned __val,
- bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns)
+ unsigned __val,
+ bool __has_timeout, chrono::seconds __s, chrono::nanoseconds __ns)
{
// do nothing if the two values are not equal
if ( *__addr != __val ) {
return true;
}
- // if a timeout is specified, return immedialy. Letting the scheduler decide how long this thread will wait.
+ // if a timeout is specified, return immedialy. Letting the scheduler decide how long this thread will wait.
if (__has_timeout) {
return true;
}
v->notify_all();
}
-_GLIBCXX_END_NAMESPACE_VERSION
+ _GLIBCXX_END_NAMESPACE_VERSION
}
-#endif // defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
-#endif // _GLIBCXX_HAS_GTHREADS
+#endif// defined(_GLIBCXX_HAVE_LINUX_FUTEX) && ATOMIC_INT_LOCK_FREE > 1
+#endif// _GLIBCXX_HAS_GTHREADS
#include "model.h"
int Fuzzer::selectWrite(ModelAction *read, ModelVector<ModelAction *> * rf_set) {
- int random_index = random() % rf_set->size();
- return random_index;
+ int random_index = random() % rf_set->size();
+ return random_index;
}
Thread * Fuzzer::selectThread(Node *n, int * threadlist, int numthreads) {
- int random_index = random() % numthreads;
- int thread = threadlist[random_index];
- thread_id_t curr_tid = int_to_id(thread);
- return model->get_thread(curr_tid);
+ int random_index = random() % numthreads;
+ int thread = threadlist[random_index];
+ thread_id_t curr_tid = int_to_id(thread);
+ return model->get_thread(curr_tid);
}
Thread * Fuzzer::selectNotify(action_list_t * waiters) {
- int numwaiters = waiters->size();
- int random_index = random() % numwaiters;
- action_list_t::iterator it = waiters->begin();
- advance(it, random_index);
- Thread *thread = model->get_thread(*it);
- waiters->erase(it);
- return thread;
+ int numwaiters = waiters->size();
+ int random_index = random() % numwaiters;
+ action_list_t::iterator it = waiters->begin();
+ advance(it, random_index);
+ Thread *thread = model->get_thread(*it);
+ waiters->erase(it);
+ return thread;
}
class Fuzzer {
public:
- Fuzzer() {}
- int selectWrite(ModelAction *read, ModelVector<ModelAction *>* rf_set);
- Thread * selectThread(Node *n, int * threadlist, int numthreads);
- Thread * selectNotify(action_list_t * waiters);
- MEMALLOC
+ Fuzzer() {}
+ int selectWrite(ModelAction *read, ModelVector<ModelAction *>* rf_set);
+ Thread * selectThread(Node *n, int * threadlist, int numthreads);
+ Thread * selectNotify(action_list_t * waiters);
+ MEMALLOC
private:
};
#endif
*/
template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free>
class HashTable {
- public:
+public:
/**
* @brief Hash table constructor
* @param initialcapacity Sets the initial capacity of the hash table.
capacitymask = initialcapacity - 1;
threshold = (unsigned int)(initialcapacity * loadfactor);
- size = 0; // Initial number of elements in the hash
+ size = 0; // Initial number of elements in the hash
}
/** @brief Hash table destructor */
exit(EXIT_FAILURE);
}
- table = newtable; // Update the global hashtable upon resize()
+ table = newtable; // Update the global hashtable upon resize()
capacity = newsize;
capacitymask = newsize - 1;
struct hashlistnode<_Key, _Val> *bin = &oldtable[0];
struct hashlistnode<_Key, _Val> *lastbin = &oldtable[oldcapacity];
- for (; bin < lastbin; bin++) {
+ for (;bin < lastbin;bin++) {
_Key key = bin->key;
struct hashlistnode<_Key, _Val> *search;
search->val = bin->val;
}
- _free(oldtable); // Free the memory of the old hash table
+ _free(oldtable); // Free the memory of the old hash table
}
- private:
+private:
struct hashlistnode<_Key, _Val> *table;
unsigned int capacity;
unsigned int size;
double loadfactor;
};
-#endif /* __HASHTABLE_H__ */
+#endif/* __HASHTABLE_H__ */
{ return atomic_flag_test_and_set_explicit( __a__, memory_order_seq_cst ); }
void atomic_flag_clear_explicit
-( volatile atomic_flag* __a__, memory_order __x__ )
+ ( volatile atomic_flag* __a__, memory_order __x__ )
{
volatile bool * __p__ = &((__a__)->__f__);
model->switch_to_master(new ModelAction(ATOMIC_WRITE, __x__, (void *) __p__, false));
void atomic_flag_clear( volatile atomic_flag* __a__ )
{ atomic_flag_clear_explicit( __a__, memory_order_seq_cst ); }
-void __atomic_flag_wait__( volatile atomic_flag* __a__ ) {
+void __atomic_flag_wait__( volatile atomic_flag* __a__ ) {
while ( atomic_flag_test_and_set( __a__ ) )
- ;
+ ;
}
void __atomic_flag_wait_explicit__( volatile atomic_flag* __a__,
- memory_order __x__ ) {
+ memory_order __x__ ) {
while ( atomic_flag_test_and_set_explicit( __a__, __x__ ))
;
}
// helper functions used by CdsPass
// The CdsPass implementation does not replace normal load/stores with cds load/stores,
// but inserts cds load/stores to check dataraces. Thus, the cds load/stores do not
-// return anything.
+// return anything.
void cds_store8(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu8 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
}
void cds_store16(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu16 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
void cds_store32(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu32 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
void cds_store64(void *addr)
{
- //DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
+ //DEBUG("addr = %p, val = %" PRIu64 "\n", addr, val);
thread_id_t tid = thread_current()->get_id();
raceCheckWrite(tid, addr);
raceCheckWrite(tid, (void *)(((uintptr_t)addr) + 1));
param_defaults(params);
model_print(
-"Copyright (c) 2013 Regents of the University of California. All rights reserved.\n"
-"Distributed under the GPLv2\n"
-"Written by Brian Norris and Brian Demsky\n"
-"\n"
-"Usage: %s [MODEL-CHECKER OPTIONS] -- [PROGRAM ARGS]\n"
-"\n"
-"MODEL-CHECKER OPTIONS can be any of the model-checker options listed below. Arguments\n"
-"provided after the `--' (the PROGRAM ARGS) are passed to the user program.\n"
-"\n"
-"Model-checker options:\n"
-"-h, --help Display this help message and exit\n"
-"-v[NUM], --verbose[=NUM] Print verbose execution information. NUM is optional:\n"
-" 0 is quiet; 1 shows valid executions; 2 is noisy;\n"
-" 3 is noisier.\n"
-" Default: %d\n"
-"-u, --uninitialized=VALUE Return VALUE any load which may read from an\n"
-" uninitialized atomic.\n"
-" Default: %u\n"
-"-t, --analysis=NAME Use Analysis Plugin.\n"
-"-o, --options=NAME Option for previous analysis plugin. \n"
-"-x, --maxexec=NUM Maximum number of executions.\n"
-" Default: %u\n"
-" -o help for a list of options\n"
-" -- Program arguments follow.\n\n",
+ "Copyright (c) 2013 Regents of the University of California. All rights reserved.\n"
+ "Distributed under the GPLv2\n"
+ "Written by Brian Norris and Brian Demsky\n"
+ "\n"
+ "Usage: %s [MODEL-CHECKER OPTIONS] -- [PROGRAM ARGS]\n"
+ "\n"
+ "MODEL-CHECKER OPTIONS can be any of the model-checker options listed below. Arguments\n"
+ "provided after the `--' (the PROGRAM ARGS) are passed to the user program.\n"
+ "\n"
+ "Model-checker options:\n"
+ "-h, --help Display this help message and exit\n"
+ "-v[NUM], --verbose[=NUM] Print verbose execution information. NUM is optional:\n"
+ " 0 is quiet; 1 shows valid executions; 2 is noisy;\n"
+ " 3 is noisier.\n"
+ " Default: %d\n"
+ "-u, --uninitialized=VALUE Return VALUE any load which may read from an\n"
+ " uninitialized atomic.\n"
+ " Default: %u\n"
+ "-t, --analysis=NAME Use Analysis Plugin.\n"
+ "-o, --options=NAME Option for previous analysis plugin. \n"
+ "-x, --maxexec=NUM Maximum number of executions.\n"
+ " Default: %u\n"
+ " -o help for a list of options\n"
+ " -- Program arguments follow.\n\n",
program_name,
params->verbose,
params->uninitvalue,
{"analysis", required_argument, NULL, 't'},
{"options", required_argument, NULL, 'o'},
{"maxexecutions", required_argument, NULL, 'x'},
- {0, 0, 0, 0} /* Terminator */
+ {0, 0, 0, 0} /* Terminator */
};
int opt, longindex;
bool error = false;
error = true;
break;
case 'o':
- {
- ModelVector<TraceAnalysis *> * analyses = getInstalledTraceAnalysis();
- if ( analyses->size() == 0 || (*analyses)[analyses->size()-1]->option(optarg))
- error = true;
- }
- break;
- default: /* '?' */
+ {
+ ModelVector<TraceAnalysis *> * analyses = getInstalledTraceAnalysis();
+ if ( analyses->size() == 0 || (*analyses)[analyses->size()-1]->option(optarg))
+ error = true;
+ }
+ break;
+ default: /* '?' */
error = true;
break;
}
snapshot_stack_init();
if (!model)
- model = new ModelChecker();
+ model = new ModelChecker();
model->setParams(params);
install_trace_analyses(model->get_execution());
/** Method to set parameters */
void ModelChecker::setParams(struct model_params params) {
- this->params = params;
- execution->setParams(¶ms);
+ this->params = params;
+ execution->setParams(¶ms);
}
/**
* those pending actions which were NOT pending before the rollback
* point
*/
- for (unsigned int i = 0; i < get_num_threads(); i++)
+ for (unsigned int i = 0;i < get_num_threads();i++)
delete get_thread(int_to_id(i))->get_pending();
snapshot_backtrack_before(0);
SnapVector<bug_message *> *bugs = execution->get_bugs();
model_print("Bug report: %zu bug%s detected\n",
- bugs->size(),
- bugs->size() > 1 ? "s" : "");
- for (unsigned int i = 0; i < bugs->size(); i++)
+ bugs->size(),
+ bugs->size() > 1 ? "s" : "");
+ for (unsigned int i = 0;i < bugs->size();i++)
(*bugs)[i]->print();
}
void ModelChecker::print_execution(bool printbugs) const
{
model_print("Program output from execution %d:\n",
- get_execution_number());
+ get_execution_number());
print_program_output();
if (params.verbose >= 3) {
DBG();
/* Is this execution a feasible execution that's worth bug-checking? */
bool complete = execution->isfeasibleprefix() &&
- (execution->is_complete_execution() ||
- execution->have_bug_reports());
+ (execution->is_complete_execution() ||
+ execution->have_bug_reports());
/* End-of-execution bug checks */
if (complete) {
checkDataRaces();
run_trace_analyses();
- }
+ }
record_stats();
/* Output */
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- for (unsigned int i = 0; i < trace_analyses.size(); i++)
+ for (unsigned int i = 0;i < trace_analyses.size();i++)
trace_analyses[i]->analyze(execution->get_action_trace());
}
scheduler->set_current_thread(NULL);
ASSERT(!old->get_pending());
/* W: No plugin
- if (inspect_plugin != NULL) {
- inspect_plugin->inspectModelAction(act);
- }*/
+ if (inspect_plugin != NULL) {
+ inspect_plugin->inspectModelAction(act);
+ }*/
old->set_pending(act);
if (Thread::swap(old, &system_context) < 0) {
perror("swap threads");
char random_state[256];
initstate(423121, random_state, sizeof(random_state));
- for(int exec = 0; exec < params.maxexecutions; exec++) {
+ for(int exec = 0;exec < params.maxexecutions;exec++) {
thrd_t user_thread;
- Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
+ Thread *t = new Thread(execution->get_next_id(), &user_thread, &user_main_wrapper, NULL, NULL); // L: user_main_wrapper passes the user program
execution->add_thread(t);
//Need to seed random number generator, otherwise its state gets reset
do {
* for any newly-created thread
*/
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
thread_id_t tid = int_to_id(i);
Thread *thr = get_thread(tid);
if (!thr->is_model_thread() && !thr->is_complete() && !thr->get_pending()) {
- switch_from_master(thr); // L: context swapped, and action type of thr changed.
+ switch_from_master(thr); // L: context swapped, and action type of thr changed.
if (thr->is_waiting_on(thr))
assert_bug("Deadlock detected (thread %u)", i);
}
}
/* Don't schedule threads which should be disabled */
- for (unsigned int i = 0; i < get_num_threads(); i++) {
+ for (unsigned int i = 0;i < get_num_threads();i++) {
Thread *th = get_thread(int_to_id(i));
ModelAction *act = th->get_pending();
if (act && execution->is_enabled(th) && !execution->check_action_enabled(act)) {
}
}
- for (unsigned int i = 1; i < get_num_threads(); i++) {
+ for (unsigned int i = 1;i < get_num_threads();i++) {
Thread *th = get_thread(int_to_id(i));
ModelAction *act = th->get_pending();
- if (act && execution->is_enabled(th) && (th->get_state() != THREAD_BLOCKED) ){
- if (act->is_write()){
- std::memory_order order = act->get_mo();
- if (order == std::memory_order_relaxed || \
- order == std::memory_order_release) {
+ if (act && execution->is_enabled(th) && (th->get_state() != THREAD_BLOCKED) ) {
+ if (act->is_write()) {
+ std::memory_order order = act->get_mo();
+ if (order == std::memory_order_relaxed || \
+ order == std::memory_order_release) {
t = th;
break;
}
} else if (act->get_type() == THREAD_CREATE || \
- act->get_type() == PTHREAD_CREATE || \
- act->get_type() == THREAD_START || \
- act->get_type() == THREAD_FINISH) {
+ act->get_type() == PTHREAD_CREATE || \
+ act->get_type() == THREAD_START || \
+ act->get_type() == THREAD_FINISH) {
t = th;
break;
- }
+ }
}
}
/* Catch assertions from prior take_step or from
- * between-ModelAction bugs (e.g., data races) */
+ * between-ModelAction bugs (e.g., data races) */
if (execution->has_asserted())
break;
- if (!t)
+ if (!t)
t = get_next_thread();
if (!t || t->is_model_thread())
break;
print_stats();
/* Have the trace analyses dump their output. */
- for (unsigned int i = 0; i < trace_analyses.size(); i++)
+ for (unsigned int i = 0;i < trace_analyses.size();i++)
trace_analyses[i]->finish();
}
/** @brief Model checker execution stats */
struct execution_stats {
- int num_total; /**< @brief Total number of executions */
- int num_infeasible; /**< @brief Number of infeasible executions */
- int num_buggy_executions; /** @brief Number of buggy executions */
- int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
- int num_redundant; /**< @brief Number of redundant, aborted executions */
+ int num_total; /**< @brief Total number of executions */
+ int num_infeasible; /**< @brief Number of infeasible executions */
+ int num_buggy_executions; /** @brief Number of buggy executions */
+ int num_complete; /**< @brief Number of feasible, non-buggy, complete executions */
+ int num_redundant; /**< @brief Number of redundant, aborted executions */
};
/** @brief The central structure for model-checking */
bool assert_bug(const char *msg, ...);
void assert_user_bug(const char *msg);
- model_params params;
- void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
- void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
+ model_params params;
+ void add_trace_analysis(TraceAnalysis *a) { trace_analyses.push_back(a); }
+ void set_inspect_plugin(TraceAnalysis *a) { inspect_plugin=a; }
MEMALLOC
private:
/** Flag indicates whether to restart the model checker. */
extern ModelChecker *model;
-#endif /* __MODEL_H__ */
+#endif/* __MODEL_H__ */
state.alloc_tid = tid;
state.alloc_clock = model->get_execution()->get_cv(tid)->getClock(tid);
}
-
+
void mutex::lock()
{
model->switch_to_master(new ModelAction(ATOMIC_LOCK, std::memory_order_seq_cst, this));
}
-
+
bool mutex::try_lock()
{
return model->switch_to_master(new ModelAction(ATOMIC_TRYLOCK, std::memory_order_seq_cst, this));
free(p);
}
-#else /* !USE_MPROTECT_SNAPSHOT */
+#else /* !USE_MPROTECT_SNAPSHOT */
/** @brief Snapshotting allocation function for use by the Thread class only */
void * Thread_malloc(size_t size)
free(ptr);
}
-#endif /* !USE_MPROTECT_SNAPSHOT */
+#endif/* !USE_MPROTECT_SNAPSHOT */
void operator delete[](void *p, size_t size) { \
model_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
void operator delete[](void *p, size_t size) { \
snapshot_free(p); \
} \
- void * operator new(size_t size, void *p) { /* placement new */ \
+ void * operator new(size_t size, void *p) { /* placement new */ \
return p; \
}
*/
template <class T>
class ModelAlloc {
- public:
+public:
// type definitions
- typedef T value_type;
+ typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
- typedef size_t size_type;
- typedef size_t difference_type;
+ typedef size_t size_type;
+ typedef size_t difference_type;
// rebind allocator to type U
template <class U>
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator ==(const ModelAlloc<T1>&,
- const ModelAlloc<T2>&) throw() {
+ const ModelAlloc<T2>&) throw() {
return true;
}
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator!= (const ModelAlloc<T1>&,
- const ModelAlloc<T2>&) throw() {
+ const ModelAlloc<T2>&) throw() {
return false;
}
*/
template <class T>
class SnapshotAlloc {
- public:
+public:
// type definitions
- typedef T value_type;
+ typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
- typedef size_t size_type;
- typedef size_t difference_type;
+ typedef size_t size_type;
+ typedef size_t difference_type;
// rebind allocator to type U
template <class U>
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator ==(const SnapshotAlloc<T1>&,
- const SnapshotAlloc<T2>&) throw() {
+ const SnapshotAlloc<T2>&) throw() {
return true;
}
/** Return that all specializations of this allocator are interchangeable. */
template <class T1, class T2>
bool operator!= (const SnapshotAlloc<T1>&,
- const SnapshotAlloc<T2>&) throw() {
+ const SnapshotAlloc<T2>&) throw() {
return false;
}
#ifdef __cplusplus
extern "C" {
#endif
- typedef void * mspace;
- extern void * mspace_malloc(mspace msp, size_t bytes);
- extern void mspace_free(mspace msp, void* mem);
- extern void * mspace_realloc(mspace msp, void* mem, size_t newsize);
- extern void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
- extern mspace create_mspace_with_base(void* base, size_t capacity, int locked);
- extern mspace create_mspace(size_t capacity, int locked);
+typedef void * mspace;
+extern void * mspace_malloc(mspace msp, size_t bytes);
+extern void mspace_free(mspace msp, void* mem);
+extern void * mspace_realloc(mspace msp, void* mem, size_t newsize);
+extern void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+extern mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+extern mspace create_mspace(size_t capacity, int locked);
#if USE_MPROTECT_SNAPSHOT
- extern mspace user_snapshot_space;
+extern mspace user_snapshot_space;
#endif
- extern mspace model_snapshot_space;
+extern mspace model_snapshot_space;
#ifdef __cplusplus
-}; /* end of extern "C" */
+}; /* end of extern "C" */
#endif
-#endif /* _MY_MEMORY_H */
+#endif/* _MY_MEMORY_H */
NodeStack::~NodeStack()
{
- for (unsigned int i = 0; i < node_list.size(); i++)
+ for (unsigned int i = 0;i < node_list.size();i++)
delete node_list[i];
}
{
model_print("............................................\n");
model_print("NodeStack printing node_list:\n");
- for (unsigned int it = 0; it < node_list.size(); it++) {
+ for (unsigned int it = 0;it < node_list.size();it++) {
if ((int)it == this->head_idx)
model_print("vvv following action is the current iterator vvv\n");
node_list[it]->print();
/** Reset the node stack. */
-void NodeStack::full_reset()
+void NodeStack::full_reset()
{
- for (unsigned int i = 0; i < node_list.size(); i++)
+ for (unsigned int i = 0;i < node_list.size();i++)
delete node_list[i];
node_list.clear();
reset_execution();
/** @file nodestack.h
* @brief Stack of operations for use in backtracking.
-*/
+ */
#ifndef __NODESTACK_H__
#define __NODESTACK_H__
int head_idx;
};
-#endif /* __NODESTACK_H__ */
+#endif/* __NODESTACK_H__ */
void redirect_output();
void clear_program_output();
void print_program_output();
-#endif /* ! CONFIG_DEBUG */
+#endif/* ! CONFIG_DEBUG */
-#endif /* __OUTPUT_H__ */
+#endif/* __OUTPUT_H__ */
char **argv;
};
-#endif /* __PARAMS_H__ */
+#endif/* __PARAMS_H__ */
#include "execution.h"
int pthread_create(pthread_t *t, const pthread_attr_t * attr,
- pthread_start_t start_routine, void * arg) {
+ pthread_start_t start_routine, void * arg) {
struct pthread_params params = { start_routine, arg };
ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)¶ms);
// store return value
void *rtval = th->get_pthread_return();
*value_ptr = rtval;
- }
+ }
return 0;
}
void pthread_exit(void *value_ptr) {
Thread * th = thread_current();
model->switch_to_master(new ModelAction(THREAD_FINISH, std::memory_order_seq_cst, th));
- while(1) ; //make warning goaway
+ while(1) ; //make warning goaway
}
int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
if (!model) {
- model = new ModelChecker();
+ model = new ModelChecker();
}
cdsc::mutex *m = new cdsc::mutex();
int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
- /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
+ /* to protect the case where PTHREAD_MUTEX_INITIALIZER is used
instead of pthread_mutex_init, or where *p_mutex is not stored
in the execution->mutex_map for some reason. */
- if (!execution->getMutexMap()->contains(p_mutex)) {
+ if (!execution->getMutexMap()->contains(p_mutex)) {
pthread_mutex_init(p_mutex, NULL);
}
cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
return m->try_lock();
}
-int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
+int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
ModelExecution *execution = model->get_execution();
cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
}
int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
- const struct timespec *__restrict abstime) {
+ const struct timespec *__restrict abstime) {
// timedlock just gives the option of giving up the lock, so return and let the scheduler decide which thread goes next
/*
- ModelExecution *execution = model->get_execution();
- if (!execution->mutex_map.contains(p_mutex)) {
- pthread_mutex_init(p_mutex, NULL);
- }
- cdsc::mutex *m = execution->mutex_map.get(p_mutex);
-
- if (m != NULL) {
- m->lock();
- } else {
- printf("something is wrong with pthread_mutex_timedlock\n");
- }
-
- printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
-*/
+ ModelExecution *execution = model->get_execution();
+ if (!execution->mutex_map.contains(p_mutex)) {
+ pthread_mutex_init(p_mutex, NULL);
+ }
+ cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+
+ if (m != NULL) {
+ m->lock();
+ } else {
+ printf("something is wrong with pthread_mutex_timedlock\n");
+ }
+
+ printf("pthread_mutex_timedlock is called. It is currently implemented as a normal lock operation without no timeout\n");
+ */
return 0;
}
return 0;
}
-int pthread_cond_timedwait(pthread_cond_t *p_cond,
- pthread_mutex_t *p_mutex, const struct timespec *abstime) {
+int pthread_cond_timedwait(pthread_cond_t *p_cond,
+ pthread_mutex_t *p_mutex, const struct timespec *abstime) {
// implement cond_timedwait as a noop and let the scheduler decide which thread goes next
ModelExecution *execution = model->get_execution();
bool Scheduler::all_threads_sleeping() const
{
bool sleeping = false;
- for (int i = 0; i < enabled_len; i++)
+ for (int i = 0;i < enabled_len;i++)
if (enabled[i] == THREAD_ENABLED)
return false;
else if (enabled[i] == THREAD_SLEEP_SET)
{
int avail_threads = 0;
int thread_list[enabled_len];
- for (int i = 0; i< enabled_len; i++) {
- if (enabled[i] == THREAD_ENABLED)
- thread_list[avail_threads++] = i;
+ for (int i = 0;i< enabled_len;i++) {
+ if (enabled[i] == THREAD_ENABLED)
+ thread_list[avail_threads++] = i;
}
if (avail_threads == 0)
- return NULL; // No threads availablex
-
+ return NULL; // No threads availablex
+
Thread * thread = execution->getFuzzer()->selectThread(n, thread_list, avail_threads);
curr_thread_index = id_to_int(thread->get_id());
return thread;
int curr_id = current ? id_to_int(current->get_id()) : -1;
model_print("Scheduler: ");
- for (int i = 0; i < enabled_len; i++) {
+ for (int i = 0;i < enabled_len;i++) {
char str[20];
enabled_type_to_string(enabled[i], str);
model_print("[%i: %s%s]", i, i == curr_id ? "current, " : "", str);
Thread *current;
};
-#endif /* __SCHEDULE_H__ */
+#endif/* __SCHEDULE_H__ */
};
class SnapshotStack {
- public:
+public:
int backTrackBeforeStep(int seq_index);
void snapshotStep(int seq_index);
MEMALLOC
- private:
+private:
ModelVector<struct snapshot_entry> stack;
};
sscanf(buf, "%22s %p-%p", type, &begin, &end);
char * secondpart = strstr(buf, "]");
-
+
sscanf(&secondpart[2], "%c%c%c/%c%c%c SM=%3s %200s\n", &r, &w, &x, &mr, &mw, &mx, smstr, regionname);
if (w == 'w' && strstr(regionname, MYBINARYNAME)) {
size_t len = ((uintptr_t)end - (uintptr_t)begin) / PAGESIZE;
int SnapshotStack::backTrackBeforeStep(int seqindex)
{
int i;
- for (i = (int)stack.size() - 1; i >= 0; i++)
+ for (i = (int)stack.size() - 1;i >= 0;i++)
if (stack[i].index <= seqindex)
break;
else
typedef void (*VoidFuncPtr)();
void snapshot_system_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint);
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint);
void snapshot_stack_init();
void snapshot_record(int seq_index);
/* Struct for each memory region */
struct MemoryRegion {
- void *basePtr; // base of memory region
- int sizeInPages; // size of memory region in pages
+ void *basePtr; // base of memory region
+ int sizeInPages; // size of memory region in pages
};
/** ReturnPageAlignedAddress returns a page aligned address for the
mprot_snapshotter(unsigned int numbackingpages, unsigned int numsnapshots, unsigned int nummemoryregions);
~mprot_snapshotter();
- struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
- snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
- void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
- struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
- struct SnapShotRecord *snapShots; //This pointer references the snapshot array
+ struct MemoryRegion *regionsToSnapShot; //This pointer references an array of memory regions to snapshot
+ snapshot_page_t *backingStore; //This pointer references an array of snapshotpage's that form the backing store
+ void *backingStoreBasePtr; //This pointer references an array of snapshotpage's that form the backing store
+ struct BackingPageRecord *backingRecords; //This pointer references an array of backingpagerecord's (same number of elements as backingstore
+ struct SnapShotRecord *snapShots; //This pointer references the snapshot array
- unsigned int lastSnapShot; //Stores the next snapshot record we should use
- unsigned int lastBackingPage; //Stores the next backingpage we should use
- unsigned int lastRegion; //Stores the next memory region to be used
+ unsigned int lastSnapShot; //Stores the next snapshot record we should use
+ unsigned int lastBackingPage; //Stores the next backingpage we should use
+ unsigned int lastRegion; //Stores the next memory region to be used
- unsigned int maxRegions; //Stores the max number of memory regions we support
- unsigned int maxBackingPages; //Stores the total number of backing pages
- unsigned int maxSnapShots; //Stores the total number of snapshots we allow
+ unsigned int maxRegions; //Stores the max number of memory regions we support
+ unsigned int maxBackingPages; //Stores the total number of backing pages
+ unsigned int maxSnapShots; //Stores the total number of snapshots we allow
MEMALLOC
};
if (si->si_code == SEGV_MAPERR) {
model_print("Segmentation fault at %p\n", si->si_addr);
model_print("For debugging, place breakpoint at: %s:%d\n",
- __FILE__, __LINE__);
+ __FILE__, __LINE__);
// print_trace(); // Trace printing may cause dynamic memory allocation
- exit(EXIT_FAILURE);
+ exit(EXIT_FAILURE);
}
void* addr = ReturnPageAlignedAddress(si->si_addr);
- unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
+ unsigned int backingpage = mprot_snap->lastBackingPage++; //Could run out of pages...
if (backingpage == mprot_snap->maxBackingPages) {
model_print("Out of backing pages at %p\n", si->si_addr);
exit(EXIT_FAILURE);
}
static void mprot_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
/* Setup a stack for our signal handler.... */
stack_t ss;
memset(&si, 0, sizeof(si));
si.si_addr = ss.ss_sp;
mprot_handle_pf(SIGSEGV, &si, NULL);
- mprot_snap->lastBackingPage--; //remove the fake page we copied
+ mprot_snap->lastBackingPage--; //remove the fake page we copied
void *basemySpace = model_malloc((numheappages + 1) * PAGESIZE);
void *pagealignedbase = PageAlignAddressUpward(basemySpace);
}
DEBUG("snapshot region %p-%p (%u page%s)\n",
- addr, (char *)addr + numPages * PAGESIZE, numPages,
- numPages > 1 ? "s" : "");
+ addr, (char *)addr + numPages * PAGESIZE, numPages,
+ numPages > 1 ? "s" : "");
mprot_snap->regionsToSnapShot[memoryregion].basePtr = addr;
mprot_snap->regionsToSnapShot[memoryregion].sizeInPages = numPages;
}
static snapshot_id mprot_take_snapshot()
{
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
{
#if USE_MPROTECT_SNAPSHOT == 2
if (mprot_snap->lastSnapShot == (theID + 1)) {
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
return;
#endif
HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, model_free> duplicateMap;
- for (unsigned int region = 0; region < mprot_snap->lastRegion; region++) {
+ for (unsigned int region = 0;region < mprot_snap->lastRegion;region++) {
if (mprotect(mprot_snap->regionsToSnapShot[region].basePtr, mprot_snap->regionsToSnapShot[region].sizeInPages * sizeof(snapshot_page_t), PROT_READ | PROT_WRITE) == -1) {
perror("mprotect");
model_print("Failed to mprotect inside of takeSnapShot\n");
exit(EXIT_FAILURE);
}
}
- for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage; page < mprot_snap->lastBackingPage; page++) {
+ for (unsigned int page = mprot_snap->snapShots[theID].firstBackingPage;page < mprot_snap->lastBackingPage;page++) {
if (!duplicateMap.contains(mprot_snap->backingRecords[page].basePtrOfPage)) {
duplicateMap.put(mprot_snap->backingRecords[page].basePtrOfPage, true);
memcpy(mprot_snap->backingRecords[page].basePtrOfPage, &mprot_snap->backingStore[page], sizeof(snapshot_page_t));
}
mprot_snap->lastSnapShot = theID;
mprot_snap->lastBackingPage = mprot_snap->snapShots[theID].firstBackingPage;
- mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
+ mprot_take_snapshot(); //Make sure current snapshot is still good...All later ones are cleared
}
-#else /* !USE_MPROTECT_SNAPSHOT */
+#else /* !USE_MPROTECT_SNAPSHOT */
-#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20)) // 100mb for the shared memory
-#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
+#define SHARED_MEMORY_DEFAULT (100 * ((size_t)1 << 20))// 100mb for the shared memory
+#define STACK_SIZE_DEFAULT (((size_t)1 << 20) * 20) // 20 mb out of the above 100 mb for my stack
struct fork_snapshotter {
/** @brief Pointer to the shared (non-snapshot) memory heap base
static struct fork_snapshotter *fork_snap = NULL;
/** @statics
-* These variables are necessary because the stack is shared region and
-* there exists a race between all processes executing the same function.
-* To avoid the problem above, we require variables allocated in 'safe' regions.
-* The bug was actually observed with the forkID, these variables below are
-* used to indicate the various contexts to which to switch to.
-*
-* @private_ctxt: the context which is internal to the current process. Used
-* for running the internal snapshot/rollback loop.
-* @exit_ctxt: a special context used just for exiting from a process (so we
-* can use swapcontext() instead of setcontext() + hacks)
-* @snapshotid: it is a running counter for the various forked processes
-* snapshotid. it is incremented and set in a persistently shared record
-*/
+ * These variables are necessary because the stack is shared region and
+ * there exists a race between all processes executing the same function.
+ * To avoid the problem above, we require variables allocated in 'safe' regions.
+ * The bug was actually observed with the forkID, these variables below are
+ * used to indicate the various contexts to which to switch to.
+ *
+ * @private_ctxt: the context which is internal to the current process. Used
+ * for running the internal snapshot/rollback loop.
+ * @exit_ctxt: a special context used just for exiting from a process (so we
+ * can use swapcontext() instead of setcontext() + hacks)
+ * @snapshotid: it is a running counter for the various forked processes
+ * snapshotid. it is incremented and set in a persistently shared record
+ */
static ucontext_t private_ctxt;
static ucontext_t exit_ctxt;
static snapshot_id snapshotid = 0;
* @param func The entry point function for the context
*/
static void create_context(ucontext_t *ctxt, void *stack, size_t stacksize,
- void (*func)(void))
+ void (*func)(void))
{
getcontext(ctxt);
ctxt->uc_stack.ss_sp = stack;
}
static void fork_snapshot_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
if (!fork_snap)
createSharedMemory();
/* setup the shared-stack context */
create_context(&fork_snap->shared_ctxt, fork_snap->mStackBase,
- STACK_SIZE_DEFAULT, entryPoint);
+ STACK_SIZE_DEFAULT, entryPoint);
/* switch to a new entryPoint context, on a new stack */
model_swapcontext(&private_ctxt, &fork_snap->shared_ctxt);
setcontext(&fork_snap->shared_ctxt);
} else {
DEBUG("parent PID: %d, child PID: %d, snapshot ID: %d\n",
- getpid(), forkedID, snapshotid);
+ getpid(), forkedID, snapshotid);
while (waitpid(forkedID, NULL, 0) < 0) {
/* waitpid() may be interrupted */
fork_snap->mIDToRollback = -1;
}
-#endif /* !USE_MPROTECT_SNAPSHOT */
+#endif/* !USE_MPROTECT_SNAPSHOT */
/**
* @brief Initializes the snapshot system
* @param entryPoint the function that should run the program.
*/
void snapshot_system_init(unsigned int numbackingpages,
- unsigned int numsnapshots, unsigned int nummemoryregions,
- unsigned int numheappages, VoidFuncPtr entryPoint)
+ unsigned int numsnapshots, unsigned int nummemoryregions,
+ unsigned int numheappages, VoidFuncPtr entryPoint)
{
#if USE_MPROTECT_SNAPSHOT
mprot_snapshot_init(numbackingpages, numsnapshots, nummemoryregions, numheappages, entryPoint);
// iterate over the returned symbol lines. skip the first, it is the
// address of this function.
- for (int i = 1; i < addrlen; i++) {
+ for (int i = 1;i < addrlen;i++) {
char *begin_name = 0, *begin_offset = 0, *end_offset = 0;
// find parentheses and +address offset surrounding the mangled name:
// ./module(function+0x15c) [0x8048a6d]
- for (char *p = symbollist[i]; *p; ++p) {
+ for (char *p = symbollist[i];*p;++p) {
if (*p == '(')
begin_name = p;
else if (*p == '+')
int status;
char* ret = abi::__cxa_demangle(begin_name,
- funcname, &funcnamesize, &status);
+ funcname, &funcnamesize, &status);
if (status == 0) {
- funcname = ret; // use possibly realloc()-ed string
+ funcname = ret; // use possibly realloc()-ed string
dprintf(fd, " %s : %s+%s\n",
- symbollist[i], funcname, begin_offset);
+ symbollist[i], funcname, begin_offset);
} else {
// demangling failed. Output function name as a C function with
// no arguments.
dprintf(fd, " %s : %s()+%s\n",
- symbollist[i], begin_name, begin_offset);
+ symbollist[i], begin_name, begin_offset);
}
} else {
// couldn't parse the line? print the whole line.
print_stacktrace(fileno(out), max_frames);
}
-#endif // __STACKTRACE_H__
+#endif// __STACKTRACE_H__
template<typename _Tp>
class ModelList : public std::list<_Tp, ModelAlloc<_Tp> >
{
- public:
+public:
typedef std::list< _Tp, ModelAlloc<_Tp> > list;
ModelList() :
template<typename _Tp>
class SnapList : public std::list<_Tp, SnapshotAlloc<_Tp> >
{
- public:
+public:
typedef std::list<_Tp, SnapshotAlloc<_Tp> > list;
SnapList() :
template<typename _Tp>
class ModelVector : public std::vector<_Tp, ModelAlloc<_Tp> >
{
- public:
+public:
typedef std::vector< _Tp, ModelAlloc<_Tp> > vector;
ModelVector() :
template<typename _Tp>
class SnapVector : public std::vector<_Tp, SnapshotAlloc<_Tp> >
{
- public:
+public:
typedef std::vector< _Tp, SnapshotAlloc<_Tp> > vector;
SnapVector() :
SNAPSHOTALLOC
};
-#endif /* __STL_MODEL_H__ */
+#endif/* __STL_MODEL_H__ */
return id;
}
-#endif /* __THREADS_MODEL_H__ */
+#endif/* __THREADS_MODEL_H__ */
stack(NULL),
user_thread(NULL),
id(tid),
- state(THREAD_READY), /* Thread is always ready? */
+ state(THREAD_READY), /* Thread is always ready? */
last_action_val(0),
model_thread(true)
{
if (ret)
model_print("Error in create_context\n");
- user_thread->priv = this; // WL
+ user_thread->priv = this; // WL
}
/**
bool Thread::is_waiting_on(const Thread *t) const
{
Thread *wait;
- for (wait = waiting_on(); wait != NULL; wait = wait->waiting_on())
+ for (wait = waiting_on();wait != NULL;wait = wait->waiting_on())
if (wait == t)
return true;
return false;
class TraceAnalysis {
- public:
+public:
/** setExecution is called once after installation with a reference to
* the ModelExecution object. */
virtual void setExecution(ModelExecution * execution) = 0;
-
+
/** analyze is called once for each feasible trace with the complete
* action_list object. */