X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=execution.cc;h=9ef071209cbefc9020b662a3ded0d8409b958bf1;hb=HEAD;hp=a98567edb9ba7b723c474405cbb5d3d75433687a;hpb=9ecf77b2763e447363738b6453042982f787a184;p=c11tester.git diff --git a/execution.cc b/execution.cc index a98567ed..9ef07120 100644 --- a/execution.cc +++ b/execution.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "model.h" #include "execution.h" @@ -13,11 +14,21 @@ #include "datarace.h" #include "threads-model.h" #include "bugmessage.h" -#include "history.h" #include "fuzzer.h" -#include "newfuzzer.h" -#define INITIAL_THREAD_ID 0 +#ifdef COLLECT_STAT +static unsigned int atomic_load_count = 0; +static unsigned int atomic_store_count = 0; +static unsigned int atomic_rmw_count = 0; + +static unsigned int atomic_fence_count = 0; +static unsigned int atomic_lock_count = 0; +static unsigned int atomic_trylock_count = 0; +static unsigned int atomic_unlock_count = 0; +static unsigned int atomic_notify_count = 0; +static unsigned int atomic_wait_count = 0; +static unsigned int atomic_timedwait_count = 0; +#endif /** * Structure for holding small ModelChecker members that should be snapshotted @@ -53,7 +64,7 @@ ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) : scheduler(scheduler), thread_map(2), /* We'll always need at least 2 threads */ pthread_map(0), - pthread_counter(1), + pthread_counter(2), action_trace(), obj_map(), condvar_waiters_map(), @@ -66,11 +77,7 @@ ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) : thrd_last_fence_release(), priv(new struct model_snapshot_members ()), mo_graph(new CycleGraph()), -#ifdef NEWFUZZER - fuzzer(new NewFuzzer()), -#else fuzzer(new Fuzzer()), -#endif isfinished(false) { /* Initialize a model-checker thread, for special ModelActions */ @@ -86,7 +93,7 @@ ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) : /** @brief Destructor */ ModelExecution::~ModelExecution() { - for (unsigned int i = 0;i < get_num_threads();i++) + for (unsigned int i = INITIAL_THREAD_ID;i < get_num_threads();i++) delete get_thread(int_to_id(i)); delete mo_graph; @@ -98,26 +105,104 @@ int ModelExecution::get_execution_number() const return model->get_execution_number(); } -static action_list_t * get_safe_ptr_action(HashTable * hash, void * ptr) +static SnapVector * get_safe_ptr_vect_action(HashTable *, uintptr_t, 2> * hash, void * ptr) { - action_list_t *tmp = hash->get(ptr); + SnapVector *tmp = hash->get(ptr); if (tmp == NULL) { - tmp = new action_list_t(); + tmp = new SnapVector(); hash->put(ptr, tmp); } return tmp; } -static SnapVector * get_safe_ptr_vect_action(HashTable *, uintptr_t, 2> * hash, void * ptr) +static simple_action_list_t * get_safe_ptr_action(HashTable * hash, void * ptr) { - SnapVector *tmp = hash->get(ptr); + simple_action_list_t *tmp = hash->get(ptr); if (tmp == NULL) { - tmp = new SnapVector(); + tmp = new simple_action_list_t(); + hash->put(ptr, tmp); + } + return tmp; +} + +static SnapVector * get_safe_ptr_vect_action(HashTable *, uintptr_t, 2> * hash, void * ptr) +{ + SnapVector *tmp = hash->get(ptr); + if (tmp == NULL) { + tmp = new SnapVector(); hash->put(ptr, tmp); } return tmp; } +/** + * When vectors of action lists are reallocated due to resize, the root address of + * action lists may change. Hence we need to fix the parent pointer of the children + * of root. + */ +static void fixup_action_list(SnapVector * vec) +{ + for (uint i = 0;i < vec->size();i++) { + action_list_t * list = &(*vec)[i]; + if (list != NULL) + list->fixupParent(); + } +} + +#ifdef COLLECT_STAT +static inline void record_atomic_stats(ModelAction * act) +{ + switch (act->get_type()) { + case ATOMIC_WRITE: + atomic_store_count++; + break; + case ATOMIC_RMW: + atomic_load_count++; + break; + case ATOMIC_READ: + atomic_rmw_count++; + break; + case ATOMIC_FENCE: + atomic_fence_count++; + break; + case ATOMIC_LOCK: + atomic_lock_count++; + break; + case ATOMIC_TRYLOCK: + atomic_trylock_count++; + break; + case ATOMIC_UNLOCK: + atomic_unlock_count++; + break; + case ATOMIC_NOTIFY_ONE: + case ATOMIC_NOTIFY_ALL: + atomic_notify_count++; + break; + case ATOMIC_WAIT: + atomic_wait_count++; + break; + case ATOMIC_TIMEDWAIT: + atomic_timedwait_count++; + default: + return; + } +} + +void print_atomic_accesses() +{ + model_print("atomic store count: %u\n", atomic_store_count); + model_print("atomic load count: %u\n", atomic_load_count); + model_print("atomic rmw count: %u\n", atomic_rmw_count); + + model_print("atomic fence count: %u\n", atomic_fence_count); + model_print("atomic lock count: %u\n", atomic_lock_count); + model_print("atomic trylock count: %u\n", atomic_trylock_count); + model_print("atomic unlock count: %u\n", atomic_unlock_count); + model_print("atomic notify count: %u\n", atomic_notify_count); + model_print("atomic wait count: %u\n", atomic_wait_count); + model_print("atomic timedwait count: %u\n", atomic_timedwait_count); +} +#endif /** @return a thread ID for a new Thread */ thread_id_t ModelExecution::get_next_id() { @@ -155,43 +240,54 @@ void ModelExecution::restore_last_seq_num() * @param thread The thread that we might wake up * @return True, if we should wake up the sleeping thread; false otherwise */ -bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const +bool ModelExecution::should_wake_up(const ModelAction * asleep) const { - const ModelAction *asleep = thread->get_pending(); - /* Don't allow partial RMW to wake anyone up */ - if (curr->is_rmwr()) - return false; - /* Synchronizing actions may have been backtracked */ - if (asleep->could_synchronize_with(curr)) - return true; - /* All acquire/release fences and fence-acquire/store-release */ - if (asleep->is_fence() && asleep->is_acquire() && curr->is_release()) - return true; - /* Fence-release + store can awake load-acquire on the same location */ - if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) { - ModelAction *fence_release = get_last_fence_release(curr->get_tid()); - if (fence_release && *(get_last_action(thread->get_id())) < *fence_release) - return true; - } /* The sleep is literally sleeping */ - if (asleep->is_sleep()) { - if (fuzzer->shouldWake(asleep)) - return true; + switch (asleep->get_type()) { + case THREAD_SLEEP: + if (fuzzer->shouldWake(asleep)) + return true; + break; + case ATOMIC_WAIT: + case ATOMIC_TIMEDWAIT: + if (fuzzer->waitShouldWakeUp(asleep)) + return true; + break; + default: + return false; } return false; } -void ModelExecution::wake_up_sleeping_actions(ModelAction *curr) +void ModelExecution::wake_up_sleeping_actions() { - for (unsigned int i = 0;i < get_num_threads();i++) { - Thread *thr = get_thread(int_to_id(i)); - if (scheduler->is_sleep_set(thr)) { - if (should_wake_up(curr, thr)) { + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { + thread_id_t tid = int_to_id(i); + if (scheduler->is_sleep_set(tid)) { + Thread *thr = get_thread(tid); + ModelAction * pending = thr->get_pending(); + if (should_wake_up(pending)) { /* Remove this thread from sleep set */ scheduler->remove_sleep(thr); - if (thr->get_pending()->is_sleep()) + + if (pending->is_sleep()) { + thr->set_wakeup_state(true); + } else if (pending->is_wait()) { thr->set_wakeup_state(true); + /* Remove this thread from list of waiters */ + simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location()); + for (sllnode * rit = waiters->begin();rit != NULL;rit=rit->getNext()) { + if (rit->getVal()->get_tid() == tid) { + waiters->erase(rit); + break; + } + } + + /* Set ETIMEDOUT error */ + if (pending->is_timedwait()) + thr->set_return_value(ETIMEDOUT); + } } } } @@ -245,7 +341,7 @@ void ModelExecution::set_assert() bool ModelExecution::is_deadlocked() const { bool blocking_threads = false; - for (unsigned int i = 0;i < get_num_threads();i++) { + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { thread_id_t tid = int_to_id(i); if (is_enabled(tid)) return false; @@ -265,7 +361,7 @@ bool ModelExecution::is_deadlocked() const */ bool ModelExecution::is_complete_execution() const { - for (unsigned int i = 0;i < get_num_threads();i++) + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) if (is_enabled(int_to_id(i))) return false; return true; @@ -282,9 +378,6 @@ ModelAction * ModelExecution::convertNonAtomicStore(void * location) { add_normal_write_to_lists(act); add_write_to_lists(act); w_modification_order(act); -#ifdef NEWFUZZER - model->get_history()->process_action(act, act->get_tid()); -#endif return act; } @@ -327,8 +420,12 @@ bool ModelExecution::process_read(ModelAction *curr, SnapVector * mo_graph->addEdge((*priorset)[i], rf); } read_from(curr, rf); - get_thread(curr)->set_return_value(curr->get_return_value()); + get_thread(curr)->set_return_value(rf->get_write_value()); delete priorset; + //Update acquire fence clock vector + ClockVector * hbcv = get_hb_from_write(rf); + if (hbcv != NULL) + get_thread(curr)->get_acq_fence_cv()->merge(hbcv); return canprune && (curr->get_type() == ATOMIC_READ); } priorset->clear(); @@ -376,6 +473,8 @@ bool ModelExecution::process_mutex(ModelAction *curr) //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock) // assert_bug("Lock access before initialization"); + + // TODO: lock count for recursive mutexes state->locked = get_thread(curr); ModelAction *unlock = get_last_unlock(curr); //synchronize with the previous unlock statement @@ -386,41 +485,59 @@ bool ModelExecution::process_mutex(ModelAction *curr) break; } case ATOMIC_WAIT: { - //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY - if (fuzzer->shouldWait(curr)) { - /* wake up the other threads */ - for (unsigned int i = 0;i < get_num_threads();i++) { - Thread *t = get_thread(int_to_id(i)); - Thread *curr_thrd = get_thread(curr); - if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock()) - scheduler->wake(t); - } + Thread *curr_thrd = get_thread(curr); + /* wake up the other threads */ + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { + Thread *t = get_thread(int_to_id(i)); + if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock()) + scheduler->wake(t); + } - /* unlock the lock - after checking who was waiting on it */ - state->locked = NULL; + /* unlock the lock - after checking who was waiting on it */ + state->locked = NULL; + + /* disable this thread */ + simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); + waiters->push_back(curr); + curr_thrd->set_pending(curr); // Forbid this thread to stash a new action - /* disable this thread */ - get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr); - scheduler->sleep(get_thread(curr)); + if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously, + scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET + else + scheduler->sleep(curr_thrd); + + break; + } + case ATOMIC_TIMEDWAIT: { + Thread *curr_thrd = get_thread(curr); + if (!fuzzer->randomizeWaitTime(curr)) { + curr_thrd->set_return_value(ETIMEDOUT); + return false; + } + + /* wake up the other threads */ + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { + Thread *t = get_thread(int_to_id(i)); + if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock()) + scheduler->wake(t); } + /* unlock the lock - after checking who was waiting on it */ + state->locked = NULL; + + /* disable this thread */ + simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); + waiters->push_back(curr); + curr_thrd->set_pending(curr); // Forbid this thread to stash a new action + scheduler->add_sleep(curr_thrd); break; } - case ATOMIC_TIMEDWAIT: case ATOMIC_UNLOCK: { - //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY - //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME - //AS NORMAL WAITS...THINK ABOUT PROBABILITIES - //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE - //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL - //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY - //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS - //MUST EVENMTUALLY RELEASE... - + // TODO: lock count for recursive mutexes /* wake up the other threads */ - for (unsigned int i = 0;i < get_num_threads();i++) { + Thread *curr_thrd = get_thread(curr); + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { Thread *t = get_thread(int_to_id(i)); - Thread *curr_thrd = get_thread(curr); if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock()) scheduler->wake(t); } @@ -430,19 +547,24 @@ bool ModelExecution::process_mutex(ModelAction *curr) break; } case ATOMIC_NOTIFY_ALL: { - action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); + simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); //activate all the waiting threads for (sllnode * rit = waiters->begin();rit != NULL;rit=rit->getNext()) { - scheduler->wake(get_thread(rit->getVal())); + Thread * thread = get_thread(rit->getVal()); + if (thread->get_state() != THREAD_COMPLETED) + scheduler->wake(thread); + thread->set_wakeup_state(true); } waiters->clear(); break; } case ATOMIC_NOTIFY_ONE: { - action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); + simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location()); if (waiters->size() != 0) { Thread * thread = fuzzer->selectNotify(waiters); - scheduler->wake(thread); + if (thread->get_state() != THREAD_COMPLETED) + scheduler->wake(thread); + thread->set_wakeup_state(true); } break; } @@ -469,7 +591,7 @@ void ModelExecution::process_write(ModelAction *curr) * @param curr The ModelAction to process * @return True if synchronization was updated */ -bool ModelExecution::process_fence(ModelAction *curr) +void ModelExecution::process_fence(ModelAction *curr) { /* * fence-relaxed: no-op @@ -479,36 +601,9 @@ bool ModelExecution::process_fence(ModelAction *curr) * sequences * fence-seq-cst: MO constraints formed in {r,w}_modification_order */ - bool updated = false; if (curr->is_acquire()) { - action_list_t *list = &action_trace; - sllnode * rit; - /* Find X : is_read(X) && X --sb-> curr */ - for (rit = list->end();rit != NULL;rit=rit->getPrev()) { - ModelAction *act = rit->getVal(); - if (act == curr) - continue; - if (act->get_tid() != curr->get_tid()) - continue; - /* Stop at the beginning of the thread */ - if (act->is_thread_start()) - break; - /* Stop once we reach a prior fence-acquire */ - if (act->is_fence() && act->is_acquire()) - break; - if (!act->is_read()) - continue; - /* read-acquire will find its own release sequences */ - if (act->is_acquire()) - continue; - - /* Establish hypothetical release sequences */ - ClockVector *cv = get_hb_from_write(act->get_reads_from()); - if (cv != NULL && curr->get_cv()->merge(cv)) - updated = true; - } + curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv()); } - return updated; } /** @@ -559,7 +654,7 @@ void ModelExecution::process_thread_action(ModelAction *curr) Thread *blocking = curr->get_thread_operand(); ModelAction *act = get_last_action(blocking->get_id()); synchronize(act, curr); - break; // WL: to be add (modified) + break; } case THREADONLY_FINISH: @@ -573,7 +668,7 @@ void ModelExecution::process_thread_action(ModelAction *curr) } /* Wake up any joining threads */ - for (unsigned int i = 0;i < get_num_threads();i++) { + for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) { Thread *waiting = get_thread(int_to_id(i)); if (waiting->waiting_on() == th && waiting->get_pending()->is_thread_join()) @@ -688,19 +783,36 @@ bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second) * @return a bool that indicates whether the action is enabled. */ bool ModelExecution::check_action_enabled(ModelAction *curr) { - if (curr->is_lock()) { + switch (curr->get_type()) { + case ATOMIC_LOCK: { cdsc::mutex *lock = curr->get_mutex(); struct cdsc::mutex_state *state = lock->get_state(); - if (state->locked) + if (state->locked) { + Thread *lock_owner = (Thread *)state->locked; + Thread *curr_thread = get_thread(curr); + if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) { + return true; + } + return false; - } else if (curr->is_thread_join()) { + } + break; + } + case THREAD_JOIN: + case PTHREAD_JOIN: { Thread *blocking = curr->get_thread_operand(); if (!blocking->is_complete()) { return false; } - } else if (curr->is_sleep()) { + break; + } + case THREAD_SLEEP: { if (!fuzzer->shouldSleep(curr)) return false; + break; + } + default: + return true; } return true; @@ -720,29 +832,29 @@ bool ModelExecution::check_action_enabled(ModelAction *curr) { ModelAction * ModelExecution::check_current_action(ModelAction *curr) { ASSERT(curr); - bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw(); bool newly_explored = initialize_curr_action(&curr); DBG(); - wake_up_sleeping_actions(curr); + wake_up_sleeping_actions(); SnapVector * rf_set = NULL; + bool canprune = false; /* Build may_read_from set for newly-created actions */ - if (newly_explored && curr->is_read()) + if (curr->is_read() && newly_explored) { rf_set = build_may_read_from(curr); - - bool canprune = false; - - if (curr->is_read() && !second_part_of_rmw) { canprune = process_read(curr, rf_set); delete rf_set; } else ASSERT(rf_set == NULL); - /* Add the action to lists */ - if (!second_part_of_rmw) + /* Add the action to lists if not the second part of a rmw */ + if (newly_explored) { +#ifdef COLLECT_STAT + record_atomic_stats(curr); +#endif add_action_to_lists(curr, canprune); + } if (curr->is_write()) add_write_to_lists(curr); @@ -792,7 +904,7 @@ ModelAction * ModelExecution::process_rmw(ModelAction *act) { */ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, - SnapVector * priorset, bool * canprune, bool check_only) + SnapVector * priorset, bool * canprune) { SnapVector *thrd_lists = obj_thrd_map.get(curr->get_location()); ASSERT(curr->is_read()); @@ -808,6 +920,8 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction * thrd_lists->resize(priv->next_thread_id); for(uint i = oldsize;i < priv->next_thread_id;i++) new (&(*thrd_lists)[i]) action_list_t(); + + fixup_action_list(thrd_lists); } ModelAction *prev_same_thread = NULL; @@ -853,8 +967,7 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction * *act < *last_sc_fence_thread_local) { if (mo_graph->checkReachable(rf, act)) return false; - if (!check_only) - priorset->push_back(act); + priorset->push_back(act); break; } /* C++, Section 29.3 statement 4 */ @@ -862,8 +975,7 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction * *act < *last_sc_fence_local) { if (mo_graph->checkReachable(rf, act)) return false; - if (!check_only) - priorset->push_back(act); + priorset->push_back(act); break; } /* C++, Section 29.3 statement 6 */ @@ -871,8 +983,7 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction * *act < *last_sc_fence_thread_before) { if (mo_graph->checkReachable(rf, act)) return false; - if (!check_only) - priorset->push_back(act); + priorset->push_back(act); break; } } @@ -891,20 +1002,17 @@ bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction * if (act->is_write()) { if (mo_graph->checkReachable(rf, act)) return false; - if (!check_only) - priorset->push_back(act); + priorset->push_back(act); } else { ModelAction *prevrf = act->get_reads_from(); if (!prevrf->equals(rf)) { if (mo_graph->checkReachable(rf, prevrf)) return false; - if (!check_only) - priorset->push_back(prevrf); + priorset->push_back(prevrf); } else { if (act->get_tid() == curr->get_tid()) { //Can prune curr from obj list - if (!check_only) - *canprune = true; + *canprune = true; } } } @@ -1026,43 +1134,6 @@ void ModelExecution::w_modification_order(ModelAction *curr) } -/** - * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places - * some constraints. This method checks one the following constraint (others - * require compiler support): - * - * If X --hb-> Y --mo-> Z, then X should not read from Z. - * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z. - */ -bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader) -{ - SnapVector *thrd_lists = obj_thrd_map.get(reader->get_location()); - unsigned int i; - /* Iterate over all threads */ - for (i = 0;i < thrd_lists->size();i++) { - const ModelAction *write_after_read = NULL; - - /* Iterate over actions in thread, starting from most recent */ - action_list_t *list = &(*thrd_lists)[i]; - sllnode* rit; - for (rit = list->end();rit != NULL;rit=rit->getPrev()) { - ModelAction *act = rit->getVal(); - - /* Don't disallow due to act == reader */ - if (!reader->happens_before(act) || reader == act) - break; - else if (act->is_write()) - write_after_read = act; - else if (act->is_read() && act->get_reads_from() != NULL) - write_after_read = act->get_reads_from(); - } - - if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer)) - return false; - } - return true; -} - /** * Computes the clock vector that happens before propagates from this write. * @@ -1100,9 +1171,17 @@ ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const { //operation that isn't release if (rf->get_last_fence_release()) { if (vec == NULL) - vec = rf->get_last_fence_release()->get_cv(); + vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL); else (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv()); + } else { + if (vec == NULL) { + if (rf->is_rmw()) { + vec = new ClockVector(NULL, NULL); + } + } else { + vec = new ClockVector(vec, NULL); + } } rf->set_rfcv(vec); } @@ -1128,12 +1207,12 @@ void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune) { int tid = id_to_int(act->get_tid()); if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) { - action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location()); + simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location()); act->setActionRef(list->add_back(act)); } // Update action trace, a total order of all actions - act->setTraceRef(action_trace.add_back(act)); + action_trace.addAction(act); // Update obj_thrd_map, a per location, per thread, order of actions @@ -1143,9 +1222,11 @@ void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune) vec->resize(priv->next_thread_id); for(uint i = oldsize;i < priv->next_thread_id;i++) new (&(*vec)[i]) action_list_t(); + + fixup_action_list(vec); } if (!canprune && (act->is_read() || act->is_write())) - act->setThrdMapRef((*vec)[tid].add_back(act)); + (*vec)[tid].addAction(act); // Update thrd_last_action, the last action taken by each thread if ((int)thrd_last_action.size() <= tid) @@ -1165,39 +1246,13 @@ void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune) } } -sllnode* insertIntoActionList(action_list_t *list, ModelAction *act) { - sllnode * rit = list->end(); - modelclock_t next_seq = act->get_seq_number(); - if (rit == NULL || (rit->getVal()->get_seq_number() <= next_seq)) - return list->add_back(act); - else { - for(;rit != NULL;rit=rit->getPrev()) { - if (rit->getVal()->get_seq_number() <= next_seq) { - return list->insertAfter(rit, act); - } - } - return list->add_front(act); - } +void insertIntoActionList(action_list_t *list, ModelAction *act) { + list->addAction(act); } -sllnode* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) { - sllnode * rit = list->end(); - modelclock_t next_seq = act->get_seq_number(); - if (rit == NULL) { - act->create_cv(NULL); - return list->add_back(act); - } else if (rit->getVal()->get_seq_number() <= next_seq) { - act->create_cv(rit->getVal()); - return list->add_back(act); - } else { - for(;rit != NULL;rit=rit->getPrev()) { - if (rit->getVal()->get_seq_number() <= next_seq) { - act->create_cv(rit->getVal()); - return list->insertAfter(rit, act); - } - } - return list->add_front(act); - } +void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) { + act->create_cv(NULL); + list->addAction(act); } /** @@ -1211,7 +1266,7 @@ sllnode* insertIntoActionListAndSetCV(action_list_t *list, ModelA void ModelExecution::add_normal_write_to_lists(ModelAction *act) { int tid = id_to_int(act->get_tid()); - act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act)); + insertIntoActionListAndSetCV(&action_trace, act); // Update obj_thrd_map, a per location, per thread, order of actions SnapVector *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location()); @@ -1220,8 +1275,10 @@ void ModelExecution::add_normal_write_to_lists(ModelAction *act) vec->resize(priv->next_thread_id); for(uint i=oldsize;inext_thread_id;i++) new (&(*vec)[i]) action_list_t(); + + fixup_action_list(vec); } - act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act)); + insertIntoActionList(&(*vec)[tid],act); ModelAction * lastact = thrd_last_action[tid]; // Update thrd_last_action, the last action taken by each thrad @@ -1231,13 +1288,13 @@ void ModelExecution::add_normal_write_to_lists(ModelAction *act) void ModelExecution::add_write_to_lists(ModelAction *write) { - SnapVector *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location()); + SnapVector *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location()); int tid = id_to_int(write->get_tid()); if (tid >= (int)vec->size()) { uint oldsize =vec->size(); vec->resize(priv->next_thread_id); for(uint i=oldsize;inext_thread_id;i++) - new (&(*vec)[i]) action_list_t(); + new (&(*vec)[i]) simple_action_list_t(); } write->setActionRef((*vec)[tid].add_back(write)); } @@ -1295,7 +1352,7 @@ ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const { /* All fences should have location FENCE_LOCATION */ - action_list_t *list = obj_map.get(FENCE_LOCATION); + simple_action_list_t *list = obj_map.get(FENCE_LOCATION); if (!list) return NULL; @@ -1331,7 +1388,7 @@ ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const { void *location = curr->get_location(); - action_list_t *list = obj_map.get(location); + simple_action_list_t *list = obj_map.get(location); if (list == NULL) return NULL; @@ -1387,7 +1444,7 @@ bool valequals(uint64_t val1, uint64_t val2, int size) { */ SnapVector * ModelExecution::build_may_read_from(ModelAction *curr) { - SnapVector *thrd_lists = obj_wr_thrd_map.get(curr->get_location()); + SnapVector *thrd_lists = obj_wr_thrd_map.get(curr->get_location()); unsigned int i; ASSERT(curr->is_read()); @@ -1402,7 +1459,7 @@ SnapVector * ModelExecution::build_may_read_from(ModelAction *cu if (thrd_lists != NULL) for (i = 0;i < thrd_lists->size();i++) { /* Iterate over actions in thread, starting from most recent */ - action_list_t *list = &(*thrd_lists)[i]; + simple_action_list_t *list = &(*thrd_lists)[i]; sllnode * rit; for (rit = list->end();rit != NULL;rit=rit->getPrev()) { ModelAction *act = rit->getVal(); @@ -1527,6 +1584,40 @@ void ModelExecution::print_summary() } +void ModelExecution::print_tail() +{ + model_print("Execution trace %d:\n", get_execution_number()); + + sllnode *it; + + model_print("------------------------------------------------------------------------------------\n"); + model_print("# t Action type MO Location Value Rf CV\n"); + model_print("------------------------------------------------------------------------------------\n"); + + unsigned int hash = 0; + + int length = 25; + int counter = 0; + SnapList list; + for (it = action_trace.end();it != NULL;it = it->getPrev()) { + if (counter > length) + break; + + ModelAction * act = it->getVal(); + list.push_front(act); + counter++; + } + + for (it = list.begin();it != NULL;it=it->getNext()) { + const ModelAction *act = it->getVal(); + if (act->get_seq_number() > 0) + act->print(); + hash = hash^(hash<<3)^(it->getVal()->hash()); + } + model_print("HASH %u\n", hash); + model_print("------------------------------------------------------------------------------------\n"); +} + /** * Add a Thread to the system for the first time. Should only be called once * per thread. @@ -1571,14 +1662,21 @@ Thread * ModelExecution::get_thread(const ModelAction *act) const * @return A Thread reference */ Thread * ModelExecution::get_pthread(pthread_t pid) { + // pid 1 is reserved for the main thread, pthread ids should start from 2 + if (pid == 1) + return get_thread(pid); + union { pthread_t p; uint32_t v; } x; x.p = pid; uint32_t thread_id = x.v; - if (thread_id < pthread_counter + 1) return pthread_map[thread_id]; - else return NULL; + + if (thread_id < pthread_counter + 1) + return pthread_map[thread_id]; + else + return NULL; } /** @@ -1643,9 +1741,6 @@ Thread * ModelExecution::take_step(ModelAction *curr) ASSERT(curr); /* Process this action in ModelHistory for records */ -#ifdef NEWFUZZER - model->get_history()->process_action( curr, curr->get_tid() ); -#endif if (curr_thrd->is_blocked() || curr_thrd->is_complete()) scheduler->remove_thread(curr_thrd); @@ -1656,22 +1751,16 @@ Thread * ModelExecution::take_step(ModelAction *curr) void ModelExecution::removeAction(ModelAction *act) { { - sllnode * listref = act->getTraceRef(); - if (listref != NULL) { - action_trace.erase(listref); - } + action_trace.removeAction(act); } { - sllnode * listref = act->getThrdMapRef(); - if (listref != NULL) { - SnapVector *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location()); - (*vec)[act->get_tid()].erase(listref); - } + SnapVector *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location()); + (*vec)[act->get_tid()].removeAction(act); } if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) { sllnode * listref = act->getActionRef(); if (listref != NULL) { - action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location()); + simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location()); list->erase(listref); } } else if (act->is_wait()) { @@ -1683,9 +1772,10 @@ void ModelExecution::removeAction(ModelAction *act) { } else if (act->is_free()) { sllnode * listref = act->getActionRef(); if (listref != NULL) { - SnapVector *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location()); + SnapVector *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location()); (*vec)[act->get_tid()].erase(listref); } + //Clear it from last_sc_map if (obj_last_sc_map.get(act->get_location()) == act) { obj_last_sc_map.remove(act->get_location()); @@ -1703,7 +1793,7 @@ ClockVector * ModelExecution::computeMinimalCV() { //Thread 0 isn't a real thread, so skip it.. for(unsigned int i = 1;i < thread_map.size();i++) { Thread * t = thread_map[i]; - if (t->get_state() == THREAD_COMPLETED) + if (t->is_complete()) continue; thread_id_t tid = int_to_id(i); ClockVector * cv = get_cv(tid);