bool ModelAction::is_write() const
{
- return type == ATOMIC_WRITE || type == ATOMIC_RMW || type == ATOMIC_INIT || type == ATOMIC_UNINIT;
+ return type == ATOMIC_WRITE || type == ATOMIC_RMW || type == ATOMIC_INIT || type == ATOMIC_UNINIT || type == NONATOMIC_WRITE;
}
bool ModelAction::could_be_write() const
if (act->is_uninitialized()) { // WL
uint64_t val = *((uint64_t *) location);
- ModelAction * act_initialized = (ModelAction *)act;
- act_initialized->set_value(val);
- reads_from = act_initialized;
+ ModelAction * act_uninitialized = (ModelAction *)act;
+ act_uninitialized->set_value(val);
+ reads_from = act_uninitialized;
// disabled by WL, because LLVM IR is unable to detect atomic init
/* model->assert_bug("May read from uninitialized atomic:\n"
case PTHREAD_JOIN: return "pthread join";
case ATOMIC_UNINIT: return "uninitialized";
+ case NONATOMIC_WRITE: return "nonatomic write";
case ATOMIC_READ: return "atomic read";
case ATOMIC_WRITE: return "atomic write";
case ATOMIC_RMW: return "atomic rmw";
PTHREAD_CREATE, // < A pthread creation action
PTHREAD_JOIN, // < A pthread join action
ATOMIC_UNINIT, // < Represents an uninitialized atomic
- ATOMIC_READ, // < An atomic read action
+ NONATOMIC_WRITE, // < Represents a non-atomic store
+ ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
ATOMIC_WRITE, // < An atomic write action
+ ATOMIC_RMW, // < The write part of an atomic RMW action
+ ATOMIC_READ, // < An atomic read action
ATOMIC_RMWR, // < The read part of an atomic RMW action
ATOMIC_RMWRCAS, // < The read part of an atomic RMW action
- ATOMIC_RMW, // < The write part of an atomic RMW action
ATOMIC_RMWC, // < Convert an atomic RMW action into a READ
- ATOMIC_INIT, // < Initialization of an atomic object (e.g., atomic_init())
+
ATOMIC_FENCE, // < A fence action
ATOMIC_LOCK, // < A lock action
ATOMIC_TRYLOCK, // < A trylock action
return &basetable->array[((uintptr_t)address) & MASK16BIT];
}
+
+bool hasNonAtomicStore(const void *address) {
+ uint64_t * shadow = lookupAddressEntry(address);
+ uint64_t shadowval = *shadow;
+ if (ISSHORTRECORD(shadowval)) {
+ //Do we have a non atomic write with a non-zero clock
+ return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
+ } else {
+ struct RaceRecord *record = (struct RaceRecord *)shadowval;
+ return !record->isAtomic && record->writeClock != 0;
+ }
+}
+
+void setAtomicStoreFlag(const void *address) {
+ uint64_t * shadow = lookupAddressEntry(address);
+ uint64_t shadowval = *shadow;
+ if (ISSHORTRECORD(shadowval)) {
+ *shadow = shadowval | ATOMICMASK;
+ } else {
+ struct RaceRecord *record = (struct RaceRecord *)shadowval;
+ record->isAtomic = 1;
+ }
+}
+
+void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
+ uint64_t * shadow = lookupAddressEntry(address);
+ uint64_t shadowval = *shadow;
+ if (ISSHORTRECORD(shadowval)) {
+ //Do we have a non atomic write with a non-zero clock
+ *thread = WRTHREADID(shadowval);
+ *clock = WRITEVECTOR(shadowval);
+ } else {
+ struct RaceRecord *record = (struct RaceRecord *)shadowval;
+ *thread = record->writeThread;
+ *clock = record->writeClock;
+ }
+}
+
/**
* Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
* to check the potential for a data race.
*/
void assert_race(struct DataRace *race)
{
- model_print("At location: \n");
+ model_print("Race detected at location: \n");
backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
- model_print("Data race detected @ address %p:\n"
+ model_print("\nData race detected @ address %p:\n"
" Access 1: %5s in thread %2d @ clock %3u\n"
- " Access 2: %5s in thread %2d @ clock %3u",
+ " Access 2: %5s in thread %2d @ clock %3u\n\n",
race->address,
race->isoldwrite ? "write" : "read",
id_to_int(race->oldthread),
void recordWrite(thread_id_t thread, void *location);
bool checkDataRaces();
void assert_race(struct DataRace *race);
+bool hasNonAtomicStore(const void *location);
+void setAtomicStoreFlag(const void *location);
+void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock);
/**
* @brief A record of information for detecting data races
return true;
}
+ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
+ uint64_t value = *((const uint64_t *) location);
+ modelclock_t storeclock;
+ thread_id_t storethread;
+ getStoreThreadAndClock(location, &storethread, &storeclock);
+ setAtomicStoreFlag(location);
+ ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
+ add_normal_write_to_lists(act);
+ add_write_to_lists(act);
+ w_modification_order(act);
+ return act;
+}
+
/**
* Processes a read model action.
{
SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
while(true) {
+ bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
+ if (hasnonatomicstore) {
+ ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
+ rf_set->push_back(nonatomicstore);
+ }
int index = fuzzer->selectWrite(curr, rf_set);
ModelAction *rf = (*rf_set)[index];
}
}
+void insertIntoActionList(action_list_t *list, ModelAction *act) {
+ action_list_t::reverse_iterator rit = list->rbegin();
+ modelclock_t next_seq = act->get_seq_number();
+ if ((*rit)->get_seq_number() == next_seq)
+ list->push_back(act);
+ else {
+ for(;rit != list->rend();rit++) {
+ if ((*rit)->get_seq_number() == next_seq) {
+ action_list_t::iterator it = rit.base();
+ it++; //get to right sequence number
+ it++; //get to item after it
+ list->insert(it, act);
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Performs various bookkeeping operations for a normal write. The
+ * complication is that we are typically inserting a normal write
+ * lazily, so we need to insert it into the middle of lists.
+ *
+ * @param act is the ModelAction to add.
+ */
+
+void ModelExecution::add_normal_write_to_lists(ModelAction *act)
+{
+ int tid = id_to_int(act->get_tid());
+ action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
+ insertIntoActionList(list, act);
+ insertIntoActionList(&action_trace, act);
+
+ // Update obj_thrd_map, a per location, per thread, order of actions
+ SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
+ if (tid >= (int)vec->size())
+ vec->resize(priv->next_thread_id);
+ insertIntoActionList(&(*vec)[tid],act);
+
+ // Update thrd_last_action, the last action taken by each thrad
+ if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
+ thrd_last_action[tid] = act;
+}
+
+
void ModelExecution::add_write_to_lists(ModelAction *write) {
// Update seq_cst map
if (write->is_seqcst())
bool synchronize(const ModelAction *first, ModelAction *second);
void add_action_to_lists(ModelAction *act);
+ void add_normal_write_to_lists(ModelAction *act);
void add_write_to_lists(ModelAction *act);
ModelAction * get_last_fence_release(thread_id_t tid) const;
ModelAction * get_last_seq_cst_write(ModelAction *curr) const;
void w_modification_order(ModelAction *curr);
ClockVector * get_hb_from_write(ModelAction *rf) const;
ModelAction * get_uninitialized_action(ModelAction *curr) const;
+ ModelAction * convertNonAtomicStore(void*);
action_list_t action_trace;
SnapVector<Thread *> thread_map;