using std::memory_order_release;
using std::memory_order_acq_rel;
using std::memory_order_seq_cst;
+ using std::volatile_order;
/**
* @brief A recognizable don't-care value for use in the ModelAction::value
PTHREAD_CREATE, // < A pthread creation action
PTHREAD_JOIN, // < A pthread join action
ATOMIC_UNINIT, // < Represents an uninitialized atomic
- ATOMIC_READ, // < An atomic read action
+ NONATOMIC_WRITE, // < Represents a non-atomic store
+ ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
ATOMIC_WRITE, // < An atomic write action
+ ATOMIC_RMW, // < The write part of an atomic RMW action
+ ATOMIC_READ, // < An atomic read action
ATOMIC_RMWR, // < The read part of an atomic RMW action
ATOMIC_RMWRCAS, // < The read part of an atomic RMW action
- ATOMIC_RMW, // < The write part of an atomic RMW action
ATOMIC_RMWC, // < Convert an atomic RMW action into a READ
- ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
+
ATOMIC_FENCE, // < A fence action
ATOMIC_LOCK, // < A lock action
ATOMIC_TRYLOCK, // < A trylock action
ATOMIC_NOTIFY_ALL, // < A notify all action
ATOMIC_WAIT, // < A wait action
ATOMIC_ANNOTATION, // < An annotation action to pass information to a trace analysis
+ VOLATILE_READ,
+ VOLATILE_WRITE,
NOOP // no operation, which returns control to scheduler
} action_type_t;
#include "cmodelint.h"
#include "snapshot-interface.h"
#include "threads-model.h"
+#include "datarace.h"
- memory_order orders[6] = {
+ memory_order orders[8] = {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
- memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+ memory_order_release, memory_order_acq_rel, memory_order_seq_cst,
+ volatile_order
};
static void ensureModel() {
model->switch_to_master(new ModelAction(ATOMIC_RMWC, position, orders[atomic_index], obj));
}
+ // cds volatile loads
+ uint8_t cds_volatile_load8(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint8_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+ }
+ uint16_t cds_volatile_load16(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint16_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+ }
+ uint32_t cds_volatile_load32(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return (uint32_t) model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj)
+ );
+ }
+ uint64_t cds_volatile_load64(void * obj, int atomic_index, const char * position) {
+ ensureModel();
+ return model->switch_to_master(
+ new ModelAction(VOLATILE_READ, position, orders[atomic_index], obj));
+ }
+
+ // cds volatile stores
+ void cds_volatile_store8(void * obj, uint8_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+ }
+ void cds_volatile_store16(void * obj, uint16_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+ }
+ void cds_volatile_store32(void * obj, uint32_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+ }
+ void cds_volatile_store64(void * obj, uint64_t val, int atomic_index, const char * position) {
+ ensureModel();
+ model->switch_to_master(new ModelAction(VOLATILE_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
+ }
+
// cds atomic inits
-void cds_atomic_init8(void * obj, uint8_t val, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
-}
-void cds_atomic_init16(void * obj, uint16_t val, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
-}
-void cds_atomic_init32(void * obj, uint32_t val, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val));
-}
-void cds_atomic_init64(void * obj, uint64_t val, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, val));
-}
+#define CDSATOMICINT(size) \
+ void cds_atomic_init ## size (void * obj, uint ## size ## _t val, const char * position) { \
+ ensureModel(); \
+ model->switch_to_master(new ModelAction(ATOMIC_INIT, position, memory_order_relaxed, obj, (uint64_t) val)); \
+ *((uint ## size ## _t *)obj) = val; \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ recordWrite(tid, (void *)(((char *)obj)+i)); \
+ } \
+ }
+CDSATOMICINT(8)
+CDSATOMICINT(16)
+CDSATOMICINT(32)
+CDSATOMICINT(64)
// cds atomic loads
-uint8_t cds_atomic_load8(void * obj, int atomic_index, const char * position) {
- ensureModel();
- return (uint8_t) model->switch_to_master(
- new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
-}
-uint16_t cds_atomic_load16(void * obj, int atomic_index, const char * position) {
- ensureModel();
- return (uint16_t) model->switch_to_master(
- new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
-}
-uint32_t cds_atomic_load32(void * obj, int atomic_index, const char * position) {
- ensureModel();
- return (uint32_t) model->switch_to_master(
- new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)
- );
-}
-uint64_t cds_atomic_load64(void * obj, int atomic_index, const char * position) {
- ensureModel();
- return model->switch_to_master(
- new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj));
-}
+#define CDSATOMICLOAD(size) \
+ uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
+ ensureModel(); \
+ return (uint ## size ## _t)model->switch_to_master( \
+ new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
+ }
+
+CDSATOMICLOAD(8)
+CDSATOMICLOAD(16)
+CDSATOMICLOAD(32)
+CDSATOMICLOAD(64)
// cds atomic stores
-void cds_atomic_store8(void * obj, uint8_t val, int atomic_index, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
-}
-void cds_atomic_store16(void * obj, uint16_t val, int atomic_index, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
-}
-void cds_atomic_store32(void * obj, uint32_t val, int atomic_index, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
-}
-void cds_atomic_store64(void * obj, uint64_t val, int atomic_index, const char * position) {
- ensureModel();
- model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val));
-}
+#define CDSATOMICSTORE(size) \
+ void cds_atomic_store ## size(void * obj, uint ## size ## _t val, int atomic_index, const char * position) { \
+ ensureModel(); \
+ model->switch_to_master(new ModelAction(ATOMIC_WRITE, position, orders[atomic_index], obj, (uint64_t) val)); \
+ *((uint ## size ## _t *)obj) = val; \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ recordWrite(tid, (void *)(((char *)obj)+i)); \
+ } \
+ }
+
+CDSATOMICSTORE(8)
+CDSATOMICSTORE(16)
+CDSATOMICSTORE(32)
+CDSATOMICSTORE(64)
+
#define _ATOMIC_RMW_(__op__, size, addr, val, atomic_index, position) \
({ \
uint ## size ## _t _val = val; \
_copy __op__ _val; \
model_rmw_action_helper(addr, (uint64_t) _copy, atomic_index, position); \
+ *((uint ## size ## _t *)addr) = _copy; \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ recordWrite(tid, (void *)(((char *)addr)+i)); \
+ } \
return _old; \
})
// cds atomic exchange
-uint8_t cds_atomic_exchange8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( =, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_exchange16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( =, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_exchange32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( =, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_exchange64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( =, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICEXCHANGE(size) \
+ uint ## size ## _t cds_atomic_exchange ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( =, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICEXCHANGE(8)
+CDSATOMICEXCHANGE(16)
+CDSATOMICEXCHANGE(32)
+CDSATOMICEXCHANGE(64)
// cds atomic fetch add
-uint8_t cds_atomic_fetch_add8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( +=, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_fetch_add16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( +=, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_fetch_add32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( +=, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_fetch_add64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( +=, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICADD(size) \
+ uint ## size ## _t cds_atomic_fetch_add ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( +=, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICADD(8)
+CDSATOMICADD(16)
+CDSATOMICADD(32)
+CDSATOMICADD(64)
// cds atomic fetch sub
-uint8_t cds_atomic_fetch_sub8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( -=, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_fetch_sub16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( -=, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_fetch_sub32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( -=, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_fetch_sub64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( -=, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICSUB(size) \
+ uint ## size ## _t cds_atomic_fetch_sub ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( -=, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICSUB(8)
+CDSATOMICSUB(16)
+CDSATOMICSUB(32)
+CDSATOMICSUB(64)
// cds atomic fetch and
-uint8_t cds_atomic_fetch_and8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( &=, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_fetch_and16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( &=, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_fetch_and32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( &=, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_fetch_and64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( &=, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICAND(size) \
+ uint ## size ## _t cds_atomic_fetch_and ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( &=, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICAND(8)
+CDSATOMICAND(16)
+CDSATOMICAND(32)
+CDSATOMICAND(64)
// cds atomic fetch or
-uint8_t cds_atomic_fetch_or8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( |=, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_fetch_or16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( |=, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_fetch_or32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( |=, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_fetch_or64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( |=, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICOR(size) \
+ uint ## size ## _t cds_atomic_fetch_or ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( |=, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICOR(8)
+CDSATOMICOR(16)
+CDSATOMICOR(32)
+CDSATOMICOR(64)
// cds atomic fetch xor
-uint8_t cds_atomic_fetch_xor8(void* addr, uint8_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( ^=, 8, addr, val, atomic_index, position);
-}
-uint16_t cds_atomic_fetch_xor16(void* addr, uint16_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( ^=, 16, addr, val, atomic_index, position);
-}
-uint32_t cds_atomic_fetch_xor32(void* addr, uint32_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( ^=, 32, addr, val, atomic_index, position);
-}
-uint64_t cds_atomic_fetch_xor64(void* addr, uint64_t val, int atomic_index, const char * position) {
- _ATOMIC_RMW_( ^=, 64, addr, val, atomic_index, position);
-}
+#define CDSATOMICXOR(size) \
+ uint ## size ## _t cds_atomic_fetch_xor ## size(void* addr, uint ## size ## _t val, int atomic_index, const char * position) { \
+ _ATOMIC_RMW_( ^=, size, addr, val, atomic_index, position); \
+ }
+
+CDSATOMICXOR(8)
+CDSATOMICXOR(16)
+CDSATOMICXOR(32)
+CDSATOMICXOR(64)
// cds atomic compare and exchange
// In order to accomodate the LLVM PASS, the return values are not true or false.
uint ## size ## _t _expected = expected; \
uint ## size ## _t _old = model_rmwrcas_action_helper(addr, atomic_index, _expected, sizeof(_expected), position); \
if (_old == _expected) { \
- model_rmw_action_helper(addr, (uint64_t) _desired, atomic_index, position); return _expected; } \
+ model_rmw_action_helper(addr, (uint64_t) _desired, atomic_index, position); \
+ *((uint ## size ## _t *)addr) = desired; \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ recordWrite(tid, (void *)(((char *)addr)+i)); \
+ } \
+ return _expected; } \
else { \
model_rmwc_action_helper(addr, atomic_index, position); _expected = _old; return _old; } \
})
// atomic_compare_exchange version 1: the CmpOperand (corresponds to expected)
// extracted from LLVM IR is an integer type.
+#define CDSATOMICCASV1(size) \
+ uint ## size ## _t cds_atomic_compare_exchange ## size ## _v1(void* addr, uint ## size ## _t expected, uint ## size ## _t desired, int atomic_index_succ, int atomic_index_fail, const char *position) { \
+ _ATOMIC_CMPSWP_(size, addr, expected, desired, atomic_index_succ, position); \
+ }
-uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, uint8_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position )
-{
- _ATOMIC_CMPSWP_(8, addr, expected, desired,
- atomic_index_succ, position);
-}
-uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected, uint16_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- _ATOMIC_CMPSWP_(16, addr, expected, desired,
- atomic_index_succ, position);
-}
-uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, uint32_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- _ATOMIC_CMPSWP_(32, addr, expected, desired,
- atomic_index_succ, position);
-}
-uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, uint64_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- _ATOMIC_CMPSWP_(64, addr, expected, desired,
- atomic_index_succ, position);
-}
+CDSATOMICCASV1(8)
+CDSATOMICCASV1(16)
+CDSATOMICCASV1(32)
+CDSATOMICCASV1(64)
// atomic_compare_exchange version 2
-bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, uint8_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position )
-{
- uint8_t ret = cds_atomic_compare_exchange8_v1(addr, *expected,
- desired, atomic_index_succ, atomic_index_fail, position);
- if (ret == *expected) return true;
- else return false;
-}
-bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected, uint16_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- uint16_t ret = cds_atomic_compare_exchange16_v1(addr, *expected,
- desired, atomic_index_succ, atomic_index_fail, position);
- if (ret == *expected) return true;
- else return false;
-}
-bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, uint32_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- uint32_t ret = cds_atomic_compare_exchange32_v1(addr, *expected,
- desired, atomic_index_succ, atomic_index_fail, position);
- if (ret == *expected) return true;
- else return false;
-}
-bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, uint64_t desired,
- int atomic_index_succ, int atomic_index_fail, const char *position)
-{
- uint64_t ret = cds_atomic_compare_exchange64_v1(addr, *expected,
- desired, atomic_index_succ, atomic_index_fail, position);
- if (ret == *expected) return true;
- else return false;
-}
+#define CDSATOMICCASV2(size) \
+ bool cds_atomic_compare_exchange ## size ## _v2(void* addr, uint ## size ## _t* expected, uint ## size ## _t desired, int atomic_index_succ, int atomic_index_fail, const char *position) { \
+ uint ## size ## _t ret = cds_atomic_compare_exchange ## size ## _v1(addr, *expected, desired, atomic_index_succ, atomic_index_fail, position); \
+ if (ret == *expected) {return true;} else {return false;} \
+ }
+CDSATOMICCASV2(8)
+CDSATOMICCASV2(16)
+CDSATOMICCASV2(32)
+CDSATOMICCASV2(64)
// cds atomic thread fence
// add func id to reverse func map
ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
- if ( func_map_rev->size() <= func_id )
- func_map_rev->resize( func_id + 1 );
+ if ( func_map_rev->size() <= func_id )
+ func_map_rev->resize( func_id + 1 );
func_map_rev->at(func_id) = funcName;
} else {
func_id = history->getFuncMap()->get(funcName);
func_id = history->getFuncMap()->get(funcName);
/* func_id not found; this could happen in the case where a function calls cds_func_entry
- * when the model has been defined yet, but then an atomic inside the function initializes
- * the model. And then cds_func_exit is called upon the function exiting.
- */
+ * when the model has been defined yet, but then an atomic inside the function initializes
+ * the model. And then cds_func_exit is called upon the function exiting.
+ */
if (func_id == 0)
return;
next_thread_id(INITIAL_THREAD_ID),
used_sequence_numbers(0),
bugs(),
+ dataraces(),
bad_synchronization(false),
asserted(false)
{ }
~model_snapshot_members() {
for (unsigned int i = 0;i < bugs.size();i++)
delete bugs[i];
+ for (unsigned int i = 0;i < dataraces.size(); i++)
+ delete dataraces[i];
bugs.clear();
+ dataraces.clear();
}
unsigned int next_thread_id;
modelclock_t used_sequence_numbers;
SnapVector<bug_message *> bugs;
+ SnapVector<bug_message *> dataraces;
/** @brief Incorrectly-ordered synchronization was made */
bool bad_synchronization;
bool asserted;
return false;
}
+ /* @brief Put data races in a different list from other bugs. The purpose
+ * is to continue the program untill the number of data races exceeds a
+ * threshold */
+
+ /* TODO: check whether set_assert should be called */
+ bool ModelExecution::assert_race(const char *msg)
+ {
+ priv->dataraces.push_back(new bug_message(msg));
+
+ if (isfeasibleprefix()) {
+ set_assert();
+ return true;
+ }
+ return false;
+ }
+
/** @return True, if any bugs have been reported for this execution */
bool ModelExecution::have_bug_reports() const
{
return priv->bugs.size() != 0;
}
+ /** @return True, if any fatal bugs have been reported for this execution.
+ * Any bug other than a data race is considered a fatal bug. Data races
+ * are not considered fatal unless the number of races is exceeds
+ * a threshold (temporarily set as 15).
+ */
+ bool ModelExecution::have_fatal_bug_reports() const
+ {
+ return priv->bugs.size() != 0 || priv->dataraces.size() >= 15;
+ }
+
SnapVector<bug_message *> * ModelExecution::get_bugs() const
{
return &priv->bugs;
return true;
}
+ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
+ uint64_t value = *((const uint64_t *) location);
+ modelclock_t storeclock;
+ thread_id_t storethread;
+ getStoreThreadAndClock(location, &storethread, &storeclock);
+ setAtomicStoreFlag(location);
+ ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
+ act->set_seq_number(storeclock);
+ add_normal_write_to_lists(act);
+ add_write_to_lists(act);
+ w_modification_order(act);
+ return act;
+}
+
/**
* Processes a read model action.
void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
{
SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
- while(true) {
+ bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
+ if (hasnonatomicstore) {
+ ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
+ rf_set->push_back(nonatomicstore);
+ }
+ while(true) {
int index = fuzzer->selectWrite(curr, rf_set);
ModelAction *rf = (*rf_set)[index];
}
}
+void insertIntoActionList(action_list_t *list, ModelAction *act) {
+ action_list_t::reverse_iterator rit = list->rbegin();
+ modelclock_t next_seq = act->get_seq_number();
+ if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
+ list->push_back(act);
+ else {
+ for(;rit != list->rend();rit++) {
+ if ((*rit)->get_seq_number() == next_seq) {
+ action_list_t::iterator it = rit.base();
+ list->insert(it, act);
+ break;
+ }
+ }
+ }
+}
+
+void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+ action_list_t::reverse_iterator rit = list->rbegin();
+ modelclock_t next_seq = act->get_seq_number();
+ if (rit == list->rend()) {
+ act->create_cv(NULL);
+ } else if ((*rit)->get_seq_number() == next_seq) {
+ act->create_cv((*rit));
+ list->push_back(act);
+ } else {
+ for(;rit != list->rend();rit++) {
+ if ((*rit)->get_seq_number() == next_seq) {
+ act->create_cv((*rit));
+ action_list_t::iterator it = rit.base();
+ list->insert(it, act);
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Performs various bookkeeping operations for a normal write. The
+ * complication is that we are typically inserting a normal write
+ * lazily, so we need to insert it into the middle of lists.
+ *
+ * @param act is the ModelAction to add.
+ */
+
+void ModelExecution::add_normal_write_to_lists(ModelAction *act)
+{
+ int tid = id_to_int(act->get_tid());
+ insertIntoActionListAndSetCV(&action_trace, act);
+
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ insertIntoActionList(list, act);
+
+ // Update obj_thrd_map, a per location, per thread, order of actions
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ if (tid >= (int)vec->size())
+ vec->resize(priv->next_thread_id);
+ insertIntoActionList(&(*vec)[tid],act);
+
+ // Update thrd_last_action, the last action taken by each thrad
+ if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
+ thrd_last_action[tid] = act;
+}
+
+
void ModelExecution::add_write_to_lists(ModelAction *write) {
// Update seq_cst map
if (write->is_seqcst())
curr = check_current_action(curr);
ASSERT(curr);
- // model_print("poitner loc: %p, thread: %d, type: %d, order: %d, position: %s\n", curr, curr->get_tid(), curr->get_type(), curr->get_mo(), curr->get_position() );
- model->get_history()->add_func_atomic( curr, curr_thrd->get_id() );
+ /* Process this action in ModelHistory for records*/
+ model->get_history()->process_action( curr, curr_thrd->get_id() );
if (curr_thrd->is_blocked() || curr_thrd->is_complete())
scheduler->remove_thread(curr_thrd);
bool check_action_enabled(ModelAction *curr);
bool assert_bug(const char *msg);
+ bool assert_race(const char *msg);
+
bool have_bug_reports() const;
+ bool have_fatal_bug_reports() const;
+
SnapVector<bug_message *> * get_bugs() const;
bool has_asserted() const;
bool synchronize(const ModelAction *first, ModelAction *second);
void add_action_to_lists(ModelAction *act);
+ void add_normal_write_to_lists(ModelAction *act);
void add_write_to_lists(ModelAction *act);
ModelAction * get_last_fence_release(thread_id_t tid) const;
ModelAction * get_last_seq_cst_write(ModelAction *curr) const;
void w_modification_order(ModelAction *curr);
ClockVector * get_hb_from_write(ModelAction *rf) const;
ModelAction * get_uninitialized_action(ModelAction *curr) const;
+ ModelAction * convertNonAtomicStore(void*);
action_list_t action_trace;
SnapVector<Thread *> thread_map;
Thread * action_select_next_thread(const ModelAction *curr) const;
- /* thrd_func_list stores a list of function ids for each thread.
+ /* thrd_func_list stores a list of function ids for each thread.
* Each element in thrd_func_list stores the functions that
- * thread i has entered and yet to exit from
+ * thread i has entered and yet to exit from
*
* This data structure is handled by ModelHistory
*/
SnapVector< func_id_list_t * > thrd_func_list;
/* Keeps track of atomic actions that thread i has performed in some
- * function. Index of SnapVector is thread id. SnapList simulates
- * the call stack.
+ * function. Index of SnapVector is thread id. SnapList simulates
+ * the call stack.
*
* This data structure is handled by ModelHistory
*/
FuncNode::FuncNode() :
func_inst_map(),
inst_list(),
- entry_insts()
+ entry_insts(),
+ thrd_read_map(),
+ read_locations()
{}
-/* Check whether FuncInst with the same type, position, and location
+/* Check whether FuncInst with the same type, position, and location
* as act has been added to func_inst_map or not. If so, return it;
* if not, add it and return it.
*
func_inst = new FuncInst(act, this);
inst->get_collisions()->push_back(func_inst);
- inst_list.push_back(func_inst); // delete?
- if (func_inst->is_read())
- group_reads_by_loc(func_inst);
+ inst_list.push_back(func_inst); // delete?
return func_inst;
}
}
FuncInst * func_inst = new FuncInst(act, this);
+
func_inst_map.put(position, func_inst);
inst_list.push_back(func_inst);
- if (func_inst->is_read())
- group_reads_by_loc(func_inst);
-
return func_inst;
}
return;
func_inst_list_mt::iterator it;
- for (it = entry_insts.begin(); it != entry_insts.end(); it++) {
+ for (it = entry_insts.begin();it != entry_insts.end();it++) {
if (inst == *it)
return;
}
entry_insts.push_back(inst);
}
- /* group atomic read actions by memory location */
- void FuncNode::group_reads_by_loc(FuncInst * inst)
+ /* @param inst_list a list of FuncInsts; this argument comes from ModelExecution
+ * Link FuncInsts in a list - add one FuncInst to another's predecessors and successors
+ */
+ void FuncNode::link_insts(func_inst_list_t * inst_list)
{
- ASSERT(inst);
- if ( !inst->is_read() )
+ if (inst_list == NULL)
return;
- void * location = inst->get_location();
+ func_inst_list_t::iterator it = inst_list->begin();
+ func_inst_list_t::iterator prev;
- func_inst_list_mt * reads;
- if ( !reads_by_loc.contains(location) ) {
- reads = new func_inst_list_mt();
- reads->push_back(inst);
- reads_by_loc.put(location, reads);
+ if (inst_list->size() == 0)
return;
+
+ /* add the first instruction to the list of entry insts */
+ FuncInst * entry_inst = *it;
+ add_entry_inst(entry_inst);
+
+ it++;
+ while (it != inst_list->end()) {
+ prev = it;
+ prev--;
+
+ FuncInst * prev_inst = *prev;
+ FuncInst * curr_inst = *it;
+
+ prev_inst->add_succ(curr_inst);
+ curr_inst->add_pred(prev_inst);
+
+ it++;
}
+ }
- reads = reads_by_loc.get(location);
- func_inst_list_mt::iterator it;
- for (it = reads->begin();it != reads->end();it++) {
- if (inst == *it)
- return;
+ /* @param tid thread id
+ * Store the values read by atomic read actions into thrd_read_map */
+ void FuncNode::store_read(ModelAction * act, uint32_t tid)
+ {
+ ASSERT(act);
+
+ void * location = act->get_location();
+ uint64_t read_from_val = act->get_reads_from_value();
+
+ if (thrd_read_map.size() <= tid)
+ thrd_read_map.resize(tid + 1);
+
+ read_map_t * read_map = thrd_read_map[tid];
+ if (read_map == NULL) {
+ read_map = new read_map_t();
+ thrd_read_map[tid] = read_map;
}
- reads->push_back(inst);
+ read_map->put(location, read_from_val);
+
+ /* Store the memory locations where atomic reads happen */
+ bool push_loc = true;
+ ModelList<void *>::iterator it;
+ for (it = read_locations.begin(); it != read_locations.end(); it++) {
+ if (location == *it) {
+ push_loc = false;
+ break;
+ }
+ }
+
+ if (push_loc)
+ read_locations.push_back(location);
+ }
+
+ uint64_t FuncNode::query_last_read(ModelAction * act, uint32_t tid)
+ {
+ if (thrd_read_map.size() <= tid)
+ return 0xdeadbeef;
+
+ read_map_t * read_map = thrd_read_map[tid];
+ void * location = act->get_location();
+
+ /* last read value not found */
+ if ( !read_map->contains(location) )
+ return 0xdeadbeef;
+
+ uint64_t read_val = read_map->get(location);
+ return read_val;
+ }
+
+ /* @param tid thread id
+ * Reset read map for a thread. This function shall only be called
+ * when a thread exits a function
+ */
+ void FuncNode::clear_read_map(uint32_t tid)
+ {
+ ASSERT(thrd_read_map.size() > tid);
+ thrd_read_map[tid]->reset();
+ }
+
+ /* @param tid thread id
+ * Print the values read by the last read actions for each memory location
+ */
+ void FuncNode::print_last_read(uint32_t tid)
+ {
+ ASSERT(thrd_read_map.size() > tid);
+ read_map_t * read_map = thrd_read_map[tid];
+
+ ModelList<void *>::iterator it;
+ for (it = read_locations.begin(); it != read_locations.end(); it++) {
+ if ( !read_map->contains(*it) )
+ break;
+
+ uint64_t read_val = read_map->get(*it);
+ model_print("last read of thread %d at %p: 0x%x\n", tid, *it, read_val);
+ }
}
class ModelAction;
typedef ModelList<FuncInst *> func_inst_list_mt;
+ typedef HashTable<void *, uint64_t, uintptr_t, 4, model_malloc, model_calloc, model_free> read_map_t;
class FuncNode {
public:
func_inst_list_mt * get_inst_list() { return &inst_list; }
func_inst_list_mt * get_entry_insts() { return &entry_insts; }
void add_entry_inst(FuncInst * inst);
+ void link_insts(func_inst_list_t * inst_list);
- void group_reads_by_loc(FuncInst * inst);
+ void store_read(ModelAction * act, uint32_t tid);
+ uint64_t query_last_read(ModelAction * act, uint32_t tid);
+ void clear_read_map(uint32_t tid);
+
+ void print_last_read(uint32_t tid);
MEMALLOC
private:
uint32_t func_id;
const char * func_name;
- /* Use source line number as the key of hashtable, to check if
+ /* Use source line number as the key of hashtable, to check if
* atomic operation with this line number has been added or not
- *
- * To do: cds_atomic_compare_exchange contains three atomic operations
- * that are feeded with the same source line number by llvm pass
*/
HashTable<const char *, FuncInst *, uintptr_t, 4, model_malloc, model_calloc, model_free> func_inst_map;
/* possible entry atomic actions in this function */
func_inst_list_mt entry_insts;
- /* group atomic read actions by memory location */
- HashTable<void *, func_inst_list_mt *, uintptr_t, 4, model_malloc, model_calloc, model_free> reads_by_loc;
+ /* Store the values read by atomic read actions per memory location for each thread */
+ ModelVector<read_map_t *> thrd_read_map;
+ ModelList<void *> read_locations;
};
/** @brief Constructor */
ModelHistory::ModelHistory() :
- func_counter(0), /* function id starts with 0 */
+ func_counter(1), /* function id starts with 1 */
func_map(),
func_map_rev(),
- func_atomics()
+ func_nodes()
{}
void ModelHistory::enter_function(const uint32_t func_id, thread_id_t tid)
uint32_t id = id_to_int(tid);
SnapVector<func_id_list_t *> * thrd_func_list = model->get_execution()->get_thrd_func_list();
SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
+ thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
if ( thrd_func_list->size() <= id ) {
thrd_func_list->resize( id + 1 );
uint32_t id = id_to_int(tid);
SnapVector<func_id_list_t *> * thrd_func_list = model->get_execution()->get_thrd_func_list();
SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
+ thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
func_id_list_t * func_list = thrd_func_list->at(id);
SnapList<func_inst_list_t *> * func_inst_lists = thrd_func_inst_lists->at(id);
uint32_t last_func_id = func_list->back();
if (last_func_id == func_id) {
- func_list->pop_back();
+ /* clear read map upon exiting functions */
+ FuncNode * func_node = func_nodes[func_id];
+ func_node->clear_read_map(tid);
func_inst_list_t * curr_inst_list = func_inst_lists->back();
- link_insts(curr_inst_list);
+ func_node->link_insts(curr_inst_list);
+ func_list->pop_back();
func_inst_lists->pop_back();
} else {
model_print("trying to exit with a wrong function id\n");
//model_print("thread %d exiting func %d\n", tid, func_id);
}
- void ModelHistory::add_func_atomic(ModelAction *act, thread_id_t tid)
+ void ModelHistory::process_action(ModelAction *act, thread_id_t tid)
{
/* return if thread i has not entered any function or has exited
from all functions */
SnapVector<func_id_list_t *> * thrd_func_list = model->get_execution()->get_thrd_func_list();
SnapVector< SnapList<func_inst_list_t *> *> *
- thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
+ thrd_func_inst_lists = model->get_execution()->get_thrd_func_inst_lists();
uint32_t id = id_to_int(tid);
if ( thrd_func_list->size() <= id )
uint32_t func_id = func_list->back();
- if ( func_atomics.size() <= func_id )
- func_atomics.resize( func_id + 1 );
+ if ( func_nodes.size() <= func_id )
+ func_nodes.resize( func_id + 1 );
- FuncNode * func_node = func_atomics[func_id];
+ FuncNode * func_node = func_nodes[func_id];
if (func_node == NULL) {
const char * func_name = func_map_rev[func_id];
func_node = new FuncNode();
func_node->set_func_id(func_id);
func_node->set_func_name(func_name);
- func_atomics[func_id] = func_node;
+ func_nodes[func_id] = func_node;
}
+ /* add corresponding FuncInst to func_node */
FuncInst * inst = func_node->get_or_add_action(act);
- if (inst != NULL) {
- func_inst_list_t * curr_inst_list = func_inst_lists->back();
-
- ASSERT(curr_inst_list != NULL);
- curr_inst_list->push_back(inst);
- }
- }
-
- /* Link FuncInsts in a list - add one FuncInst to another's predecessors and successors */
- void ModelHistory::link_insts(func_inst_list_t * inst_list)
- {
- if (inst_list == NULL)
- return;
- func_inst_list_t::iterator it = inst_list->begin();
- func_inst_list_t::iterator prev;
-
- if (inst_list->size() == 0)
+ if (inst == NULL)
return;
- /* add the first instruction to the list of entry insts */
- FuncInst * entry_inst = *it;
- FuncNode * func_node = entry_inst->get_func_node();
- func_node->add_entry_inst(entry_inst);
-
- it++;
- while (it != inst_list->end()) {
- prev = it;
- prev--;
+ if (inst->is_read())
+ func_node->store_read(act, tid);
- FuncInst * prev_inst = *prev;
- FuncInst * curr_inst = *it;
+ /* add to curr_inst_list */
+ func_inst_list_t * curr_inst_list = func_inst_lists->back();
+ ASSERT(curr_inst_list != NULL);
+ curr_inst_list->push_back(inst);
+ }
- prev_inst->add_succ(curr_inst);
- curr_inst->add_pred(prev_inst);
+ /* return the FuncNode given its func_id */
+ FuncNode * ModelHistory::get_func_node(uint32_t func_id)
+ {
+ if (func_nodes.size() <= func_id) // this node has not been added
+ return NULL;
- it++;
- }
+ return func_nodes[func_id];
}
void ModelHistory::print()
{
- for (uint32_t i = 0;i < func_atomics.size();i++ ) {
- FuncNode * funcNode = func_atomics[i];
- if (funcNode == NULL)
- continue;
+ /* function id starts with 1 */
+ for (uint32_t i = 1; i < func_nodes.size(); i++) {
+ FuncNode * func_node = func_nodes[i];
- func_inst_list_mt * entry_insts = funcNode->get_entry_insts();
+ func_inst_list_mt * entry_insts = func_node->get_entry_insts();
+ model_print("function %s has entry actions\n", func_node->get_func_name());
- model_print("function %s has entry actions\n", funcNode->get_func_name());
func_inst_list_mt::iterator it;
- for (it = entry_insts->begin(); it != entry_insts->end(); it++) {
+ for (it = entry_insts->begin();it != entry_insts->end();it++) {
FuncInst *inst = *it;
model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
}
/*
- func_inst_list_mt * inst_list = funcNode->get_inst_list();
-
- model_print("function %s has following actions\n", funcNode->get_func_name());
- func_inst_list_mt::iterator it;
- for (it = inst_list->begin(); it != inst_list->end(); it++) {
- FuncInst *inst = *it;
- model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
- }
-*/
+ func_inst_list_mt * inst_list = funcNode->get_inst_list();
+
+ model_print("function %s has following actions\n", funcNode->get_func_name());
+ func_inst_list_mt::iterator it;
+ for (it = inst_list->begin(); it != inst_list->end(); it++) {
+ FuncInst *inst = *it;
+ model_print("type: %d, at: %s\n", inst->get_type(), inst->get_position());
+ }
+ */
}
}
uint32_t get_func_counter() { return func_counter; }
void incr_func_counter() { func_counter++; }
- void add_func_atomic(ModelAction *act, thread_id_t tid);
+ void process_action(ModelAction *act, thread_id_t tid);
HashTable<const char *, uint32_t, uintptr_t, 4, model_malloc, model_calloc, model_free> * getFuncMap() { return &func_map; }
ModelVector<const char *> * getFuncMapRev() { return &func_map_rev; }
- ModelVector<FuncNode *> * getFuncAtomics() { return &func_atomics; }
+ ModelVector<FuncNode *> * getFuncNodes() { return &func_nodes; }
+ FuncNode * get_func_node(uint32_t func_id);
- void link_insts(func_inst_list_t * inst_list);
void print();
MEMALLOC
private:
uint32_t func_counter;
- /* map function names to integer ids */
+ /* map function names to integer ids */
HashTable<const char *, uint32_t, uintptr_t, 4, model_malloc, model_calloc, model_free> func_map;
- /* map integer ids to function names */
+ /* map integer ids to function names */
ModelVector<const char *> func_map_rev;
- ModelVector<FuncNode *> func_atomics;
+ ModelVector<FuncNode *> func_nodes;
};
return execution->assert_bug(str);
}
+ /**
+ * @brief Assert a data race in the executing program.
+ *
+ * Different from assert_bug, the program will not be aborted immediately
+ * upon calling this function, unless the number of data races exceeds
+ * a threshold.
+ *
+ * @param msg Descriptive message for the bug (do not include newline char)
+ * @return True if bug is immediately-feasible
+ */
+ bool ModelChecker::assert_race(const char *msg, ...)
+ {
+ char str[800];
+
+ va_list ap;
+ va_start(ap, msg);
+ vsnprintf(str, sizeof(str), msg, ap);
+ va_end(ap);
+
+ return execution->assert_race(str);
+ }
+
/**
* @brief Assert a bug in the executing program, asserted by a user thread
* @see ModelChecker::assert_bug
bugs->size(),
bugs->size() > 1 ? "s" : "");
for (unsigned int i = 0;i < bugs->size();i++)
- (*bugs)[i]->print();
+ (*bugs)[i] -> print();
}
/**
*/
void ModelChecker::record_stats()
{
- stats.num_total++;
+ stats.num_total ++;
if (!execution->isfeasibleprefix())
- stats.num_infeasible++;
+ stats.num_infeasible ++;
else if (execution->have_bug_reports())
- stats.num_buggy_executions++;
+ stats.num_buggy_executions ++;
else if (execution->is_complete_execution())
- stats.num_complete++;
+ stats.num_complete ++;
else {
- stats.num_redundant++;
+ stats.num_redundant ++;
/**
* @todo We can violate this ASSERT() when fairness/sleep sets
return true;
}
// test code
- execution_number++;
+ execution_number ++;
reset_to_initial_state();
return false;
}
/** @brief Run trace analyses on complete trace */
void ModelChecker::run_trace_analyses() {
- for (unsigned int i = 0;i < trace_analyses.size();i++)
- trace_analyses[i]->analyze(execution->get_action_trace());
+ for (unsigned int i = 0;i < trace_analyses.size();i ++)
+ trace_analyses[i] -> analyze(execution->get_action_trace());
}
/**
/* Infeasible -> don't take any more steps */
if (execution->is_infeasible())
return true;
- else if (execution->isfeasibleprefix() && execution->have_bug_reports()) {
+ else if (execution->isfeasibleprefix() && execution->have_fatal_bug_reports()) {
execution->set_assert();
return true;
}