11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
18 #include "newfuzzer.h"
20 #define INITIAL_THREAD_ID 0
23 * Structure for holding small ModelChecker members that should be snapshotted
25 struct model_snapshot_members {
26 model_snapshot_members() :
27 /* First thread created will have id INITIAL_THREAD_ID */
28 next_thread_id(INITIAL_THREAD_ID),
29 used_sequence_numbers(0),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
49 /** @brief Constructor */
50 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
54 thread_map(2), /* We'll always need at least 2 threads */
59 condvar_waiters_map(),
63 thrd_last_fence_release(),
64 priv(new struct model_snapshot_members ()),
65 mo_graph(new CycleGraph()),
66 fuzzer(new NewFuzzer()),
69 /* Initialize a model-checker thread, for special ModelActions */
70 model_thread = new Thread(get_next_id());
71 add_thread(model_thread);
72 fuzzer->register_engine(m->get_history(), this);
73 scheduler->register_engine(this);
75 pthread_key_create(&pthreadkey, tlsdestructor);
79 /** @brief Destructor */
80 ModelExecution::~ModelExecution()
82 for (unsigned int i = 0;i < get_num_threads();i++)
83 delete get_thread(int_to_id(i));
89 int ModelExecution::get_execution_number() const
91 return model->get_execution_number();
94 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 2> * hash, void * ptr)
96 action_list_t *tmp = hash->get(ptr);
98 tmp = new action_list_t();
104 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
106 SnapVector<action_list_t> *tmp = hash->get(ptr);
108 tmp = new SnapVector<action_list_t>();
114 /** @return a thread ID for a new Thread */
115 thread_id_t ModelExecution::get_next_id()
117 return priv->next_thread_id++;
120 /** @return the number of user threads created during this execution */
121 unsigned int ModelExecution::get_num_threads() const
123 return priv->next_thread_id;
126 /** @return a sequence number for a new ModelAction */
127 modelclock_t ModelExecution::get_next_seq_num()
129 return ++priv->used_sequence_numbers;
132 /** @return a sequence number for a new ModelAction */
133 modelclock_t ModelExecution::get_curr_seq_num()
135 return priv->used_sequence_numbers;
138 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
139 void ModelExecution::restore_last_seq_num()
141 priv->used_sequence_numbers--;
145 * @brief Should the current action wake up a given thread?
147 * @param curr The current action
148 * @param thread The thread that we might wake up
149 * @return True, if we should wake up the sleeping thread; false otherwise
151 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
153 const ModelAction *asleep = thread->get_pending();
154 /* Don't allow partial RMW to wake anyone up */
157 /* Synchronizing actions may have been backtracked */
158 if (asleep->could_synchronize_with(curr))
160 /* All acquire/release fences and fence-acquire/store-release */
161 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
163 /* Fence-release + store can awake load-acquire on the same location */
164 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
165 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
166 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
169 /* The sleep is literally sleeping */
170 if (asleep->is_sleep()) {
171 if (fuzzer->shouldWake(asleep))
178 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
180 for (unsigned int i = 0;i < get_num_threads();i++) {
181 Thread *thr = get_thread(int_to_id(i));
182 if (scheduler->is_sleep_set(thr)) {
183 if (should_wake_up(curr, thr)) {
184 /* Remove this thread from sleep set */
185 scheduler->remove_sleep(thr);
186 if (thr->get_pending()->is_sleep())
187 thr->set_wakeup_state(true);
193 void ModelExecution::assert_bug(const char *msg)
195 priv->bugs.push_back(new bug_message(msg));
199 /** @return True, if any bugs have been reported for this execution */
200 bool ModelExecution::have_bug_reports() const
202 return priv->bugs.size() != 0;
205 SnapVector<bug_message *> * ModelExecution::get_bugs() const
211 * Check whether the current trace has triggered an assertion which should halt
214 * @return True, if the execution should be aborted; false otherwise
216 bool ModelExecution::has_asserted() const
218 return priv->asserted;
222 * Trigger a trace assertion which should cause this execution to be halted.
223 * This can be due to a detected bug or due to an infeasibility that should
226 void ModelExecution::set_assert()
228 priv->asserted = true;
232 * Check if we are in a deadlock. Should only be called at the end of an
233 * execution, although it should not give false positives in the middle of an
234 * execution (there should be some ENABLED thread).
236 * @return True if program is in a deadlock; false otherwise
238 bool ModelExecution::is_deadlocked() const
240 bool blocking_threads = false;
241 for (unsigned int i = 0;i < get_num_threads();i++) {
242 thread_id_t tid = int_to_id(i);
245 Thread *t = get_thread(tid);
246 if (!t->is_model_thread() && t->get_pending())
247 blocking_threads = true;
249 return blocking_threads;
253 * Check if this is a complete execution. That is, have all thread completed
254 * execution (rather than exiting because sleep sets have forced a redundant
257 * @return True if the execution is complete.
259 bool ModelExecution::is_complete_execution() const
261 for (unsigned int i = 0;i < get_num_threads();i++)
262 if (is_enabled(int_to_id(i)))
267 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
268 uint64_t value = *((const uint64_t *) location);
269 modelclock_t storeclock;
270 thread_id_t storethread;
271 getStoreThreadAndClock(location, &storethread, &storeclock);
272 setAtomicStoreFlag(location);
273 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
274 act->set_seq_number(storeclock);
275 add_normal_write_to_lists(act);
276 add_write_to_lists(act);
277 w_modification_order(act);
278 model->get_history()->process_action(act, act->get_tid());
283 * Processes a read model action.
284 * @param curr is the read model action to process.
285 * @param rf_set is the set of model actions we can possibly read from
286 * @return True if processing this read updates the mo_graph.
288 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
290 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
291 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
292 if (hasnonatomicstore) {
293 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
294 rf_set->push_back(nonatomicstore);
297 // Remove writes that violate read modification order
300 while (i < rf_set->size()) {
301 ModelAction * rf = (*rf_set)[i];
302 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
303 (*rf_set)[i] = rf_set->back();
310 int index = fuzzer->selectWrite(curr, rf_set);
312 ModelAction *rf = (*rf_set)[index];
315 bool canprune = false;
316 if (r_modification_order(curr, rf, priorset, &canprune)) {
317 for(unsigned int i=0;i<priorset->size();i++) {
318 mo_graph->addEdge((*priorset)[i], rf);
321 get_thread(curr)->set_return_value(curr->get_return_value());
323 if (canprune && curr->get_type() == ATOMIC_READ) {
324 int tid = id_to_int(curr->get_tid());
325 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
326 curr->setThrdMapRef(NULL);
331 (*rf_set)[index] = rf_set->back();
337 * Processes a lock, trylock, or unlock model action. @param curr is
338 * the read model action to process.
340 * The try lock operation checks whether the lock is taken. If not,
341 * it falls to the normal lock operation case. If so, it returns
344 * The lock operation has already been checked that it is enabled, so
345 * it just grabs the lock and synchronizes with the previous unlock.
347 * The unlock operation has to re-enable all of the threads that are
348 * waiting on the lock.
350 * @return True if synchronization was updated; false otherwise
352 bool ModelExecution::process_mutex(ModelAction *curr)
354 cdsc::mutex *mutex = curr->get_mutex();
355 struct cdsc::mutex_state *state = NULL;
358 state = mutex->get_state();
360 switch (curr->get_type()) {
361 case ATOMIC_TRYLOCK: {
362 bool success = !state->locked;
363 curr->set_try_lock(success);
365 get_thread(curr)->set_return_value(0);
368 get_thread(curr)->set_return_value(1);
370 //otherwise fall into the lock case
372 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
373 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
374 // assert_bug("Lock access before initialization");
375 state->locked = get_thread(curr);
376 ModelAction *unlock = get_last_unlock(curr);
377 //synchronize with the previous unlock statement
378 if (unlock != NULL) {
379 synchronize(unlock, curr);
385 //TODO: DOESN'T REALLY IMPLEMENT SPURIOUS WAKEUPS CORRECTLY
386 if (fuzzer->shouldWait(curr)) {
387 /* wake up the other threads */
388 for (unsigned int i = 0;i < get_num_threads();i++) {
389 Thread *t = get_thread(int_to_id(i));
390 Thread *curr_thrd = get_thread(curr);
391 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
395 /* unlock the lock - after checking who was waiting on it */
396 state->locked = NULL;
398 /* disable this thread */
399 get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
400 scheduler->sleep(get_thread(curr));
405 case ATOMIC_TIMEDWAIT:
406 case ATOMIC_UNLOCK: {
407 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY
408 //FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME
409 //AS NORMAL WAITS...THINK ABOUT PROBABILITIES
410 //THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE
411 //PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL
412 //WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY
413 //FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS
414 //MUST EVENMTUALLY RELEASE...
416 /* wake up the other threads */
417 for (unsigned int i = 0;i < get_num_threads();i++) {
418 Thread *t = get_thread(int_to_id(i));
419 Thread *curr_thrd = get_thread(curr);
420 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
424 /* unlock the lock - after checking who was waiting on it */
425 state->locked = NULL;
428 case ATOMIC_NOTIFY_ALL: {
429 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
430 //activate all the waiting threads
431 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
432 scheduler->wake(get_thread(rit->getVal()));
437 case ATOMIC_NOTIFY_ONE: {
438 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
439 if (waiters->size() != 0) {
440 Thread * thread = fuzzer->selectNotify(waiters);
441 scheduler->wake(thread);
453 * Process a write ModelAction
454 * @param curr The ModelAction to process
455 * @return True if the mo_graph was updated or promises were resolved
457 void ModelExecution::process_write(ModelAction *curr)
459 w_modification_order(curr);
460 get_thread(curr)->set_return_value(VALUE_NONE);
464 * Process a fence ModelAction
465 * @param curr The ModelAction to process
466 * @return True if synchronization was updated
468 bool ModelExecution::process_fence(ModelAction *curr)
471 * fence-relaxed: no-op
472 * fence-release: only log the occurence (not in this function), for
473 * use in later synchronization
474 * fence-acquire (this function): search for hypothetical release
476 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
478 bool updated = false;
479 if (curr->is_acquire()) {
480 action_list_t *list = &action_trace;
481 sllnode<ModelAction *> * rit;
482 /* Find X : is_read(X) && X --sb-> curr */
483 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
484 ModelAction *act = rit->getVal();
487 if (act->get_tid() != curr->get_tid())
489 /* Stop at the beginning of the thread */
490 if (act->is_thread_start())
492 /* Stop once we reach a prior fence-acquire */
493 if (act->is_fence() && act->is_acquire())
497 /* read-acquire will find its own release sequences */
498 if (act->is_acquire())
501 /* Establish hypothetical release sequences */
502 ClockVector *cv = get_hb_from_write(act->get_reads_from());
503 if (cv != NULL && curr->get_cv()->merge(cv))
511 * @brief Process the current action for thread-related activity
513 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
514 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
515 * synchronization, etc. This function is a no-op for non-THREAD actions
516 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
518 * @param curr The current action
519 * @return True if synchronization was updated or a thread completed
521 void ModelExecution::process_thread_action(ModelAction *curr)
523 switch (curr->get_type()) {
524 case THREAD_CREATE: {
525 thrd_t *thrd = (thrd_t *)curr->get_location();
526 struct thread_params *params = (struct thread_params *)curr->get_value();
527 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
528 curr->set_thread_operand(th);
530 th->set_creation(curr);
533 case PTHREAD_CREATE: {
534 (*(uint32_t *)curr->get_location()) = pthread_counter++;
536 struct pthread_params *params = (struct pthread_params *)curr->get_value();
537 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
538 curr->set_thread_operand(th);
540 th->set_creation(curr);
542 if ( pthread_map.size() < pthread_counter )
543 pthread_map.resize( pthread_counter );
544 pthread_map[ pthread_counter-1 ] = th;
549 Thread *blocking = curr->get_thread_operand();
550 ModelAction *act = get_last_action(blocking->get_id());
551 synchronize(act, curr);
555 Thread *blocking = curr->get_thread_operand();
556 ModelAction *act = get_last_action(blocking->get_id());
557 synchronize(act, curr);
558 break; // WL: to be add (modified)
561 case THREADONLY_FINISH:
562 case THREAD_FINISH: {
563 Thread *th = get_thread(curr);
564 if (curr->get_type() == THREAD_FINISH &&
565 th == model->getInitThread()) {
571 /* Wake up any joining threads */
572 for (unsigned int i = 0;i < get_num_threads();i++) {
573 Thread *waiting = get_thread(int_to_id(i));
574 if (waiting->waiting_on() == th &&
575 waiting->get_pending()->is_thread_join())
576 scheduler->wake(waiting);
585 Thread *th = get_thread(curr);
586 th->set_pending(curr);
587 scheduler->add_sleep(th);
596 * Initialize the current action by performing one or more of the following
597 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
598 * manipulating backtracking sets, allocating and
599 * initializing clock vectors, and computing the promises to fulfill.
601 * @param curr The current action, as passed from the user context; may be
602 * freed/invalidated after the execution of this function, with a different
603 * action "returned" its place (pass-by-reference)
604 * @return True if curr is a newly-explored action; false otherwise
606 bool ModelExecution::initialize_curr_action(ModelAction **curr)
608 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
609 ModelAction *newcurr = process_rmw(*curr);
615 ModelAction *newcurr = *curr;
617 newcurr->set_seq_number(get_next_seq_num());
618 /* Always compute new clock vector */
619 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
621 /* Assign most recent release fence */
622 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
624 return true; /* This was a new ModelAction */
629 * @brief Establish reads-from relation between two actions
631 * Perform basic operations involved with establishing a concrete rf relation,
632 * including setting the ModelAction data and checking for release sequences.
634 * @param act The action that is reading (must be a read)
635 * @param rf The action from which we are reading (must be a write)
637 * @return True if this read established synchronization
640 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
643 ASSERT(rf->is_write());
645 act->set_read_from(rf);
646 if (act->is_acquire()) {
647 ClockVector *cv = get_hb_from_write(rf);
650 act->get_cv()->merge(cv);
655 * @brief Synchronizes two actions
657 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
658 * This function performs the synchronization as well as providing other hooks
659 * for other checks along with synchronization.
661 * @param first The left-hand side of the synchronizes-with relation
662 * @param second The right-hand side of the synchronizes-with relation
663 * @return True if the synchronization was successful (i.e., was consistent
664 * with the execution order); false otherwise
666 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
668 if (*second < *first) {
669 ASSERT(0); //This should not happend
672 return second->synchronize_with(first);
676 * @brief Check whether a model action is enabled.
678 * Checks whether an operation would be successful (i.e., is a lock already
679 * locked, or is the joined thread already complete).
681 * For yield-blocking, yields are never enabled.
683 * @param curr is the ModelAction to check whether it is enabled.
684 * @return a bool that indicates whether the action is enabled.
686 bool ModelExecution::check_action_enabled(ModelAction *curr) {
687 if (curr->is_lock()) {
688 cdsc::mutex *lock = curr->get_mutex();
689 struct cdsc::mutex_state *state = lock->get_state();
692 } else if (curr->is_thread_join()) {
693 Thread *blocking = curr->get_thread_operand();
694 if (!blocking->is_complete()) {
697 } else if (curr->is_sleep()) {
698 if (!fuzzer->shouldSleep(curr))
706 * This is the heart of the model checker routine. It performs model-checking
707 * actions corresponding to a given "current action." Among other processes, it
708 * calculates reads-from relationships, updates synchronization clock vectors,
709 * forms a memory_order constraints graph, and handles replay/backtrack
710 * execution when running permutations of previously-observed executions.
712 * @param curr The current action to process
713 * @return The ModelAction that is actually executed; may be different than
716 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
719 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
720 bool newly_explored = initialize_curr_action(&curr);
724 wake_up_sleeping_actions(curr);
726 SnapVector<ModelAction *> * rf_set = NULL;
727 /* Build may_read_from set for newly-created actions */
728 if (newly_explored && curr->is_read())
729 rf_set = build_may_read_from(curr);
731 if (curr->is_read() && !second_part_of_rmw) {
732 process_read(curr, rf_set);
735 ASSERT(rf_set == NULL);
737 /* Add the action to lists */
738 if (!second_part_of_rmw)
739 add_action_to_lists(curr);
741 if (curr->is_write())
742 add_write_to_lists(curr);
744 process_thread_action(curr);
746 if (curr->is_write())
749 if (curr->is_fence())
752 if (curr->is_mutex_op())
758 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
759 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
760 ModelAction *lastread = get_last_action(act->get_tid());
761 lastread->process_rmw(act);
763 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
769 * @brief Updates the mo_graph with the constraints imposed from the current
772 * Basic idea is the following: Go through each other thread and find
773 * the last action that happened before our read. Two cases:
775 * -# The action is a write: that write must either occur before
776 * the write we read from or be the write we read from.
777 * -# The action is a read: the write that that action read from
778 * must occur before the write we read from or be the same write.
780 * @param curr The current action. Must be a read.
781 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
782 * @param check_only If true, then only check whether the current action satisfies
783 * read modification order or not, without modifiying priorset and canprune.
785 * @return True if modification order edges were added; false otherwise
788 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
789 SnapVector<ModelAction *> * priorset, bool * canprune, bool check_only)
791 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
792 ASSERT(curr->is_read());
794 /* Last SC fence in the current thread */
795 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
797 int tid = curr->get_tid();
799 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
800 if ((int)thrd_lists->size() <= tid) {
801 uint oldsize = thrd_lists->size();
802 thrd_lists->resize(priv->next_thread_id);
803 for(uint i = oldsize;i < priv->next_thread_id;i++)
804 new (&(*thrd_lists)[i]) action_list_t();
807 ModelAction *prev_same_thread = NULL;
808 /* Iterate over all threads */
809 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
810 /* Last SC fence in thread tid */
811 ModelAction *last_sc_fence_thread_local = NULL;
813 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
815 /* Last SC fence in thread tid, before last SC fence in current thread */
816 ModelAction *last_sc_fence_thread_before = NULL;
817 if (last_sc_fence_local)
818 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
820 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
821 if (prev_same_thread != NULL &&
822 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
823 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
827 /* Iterate over actions in thread, starting from most recent */
828 action_list_t *list = &(*thrd_lists)[tid];
829 sllnode<ModelAction *> * rit;
830 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
831 ModelAction *act = rit->getVal();
836 /* Don't want to add reflexive edges on 'rf' */
837 if (act->equals(rf)) {
838 if (act->happens_before(curr))
844 if (act->is_write()) {
845 /* C++, Section 29.3 statement 5 */
846 if (curr->is_seqcst() && last_sc_fence_thread_local &&
847 *act < *last_sc_fence_thread_local) {
848 if (mo_graph->checkReachable(rf, act))
851 priorset->push_back(act);
854 /* C++, Section 29.3 statement 4 */
855 else if (act->is_seqcst() && last_sc_fence_local &&
856 *act < *last_sc_fence_local) {
857 if (mo_graph->checkReachable(rf, act))
860 priorset->push_back(act);
863 /* C++, Section 29.3 statement 6 */
864 else if (last_sc_fence_thread_before &&
865 *act < *last_sc_fence_thread_before) {
866 if (mo_graph->checkReachable(rf, act))
869 priorset->push_back(act);
875 * Include at most one act per-thread that "happens
878 if (act->happens_before(curr)) {
880 if (last_sc_fence_local == NULL ||
881 (*last_sc_fence_local < *act)) {
882 prev_same_thread = act;
885 if (act->is_write()) {
886 if (mo_graph->checkReachable(rf, act))
889 priorset->push_back(act);
891 ModelAction *prevrf = act->get_reads_from();
892 if (!prevrf->equals(rf)) {
893 if (mo_graph->checkReachable(rf, prevrf))
896 priorset->push_back(prevrf);
898 if (act->get_tid() == curr->get_tid()) {
899 //Can prune curr from obj list
913 * Updates the mo_graph with the constraints imposed from the current write.
915 * Basic idea is the following: Go through each other thread and find
916 * the lastest action that happened before our write. Two cases:
918 * (1) The action is a write => that write must occur before
921 * (2) The action is a read => the write that that action read from
922 * must occur before the current write.
924 * This method also handles two other issues:
926 * (I) Sequential Consistency: Making sure that if the current write is
927 * seq_cst, that it occurs after the previous seq_cst write.
929 * (II) Sending the write back to non-synchronizing reads.
931 * @param curr The current action. Must be a write.
932 * @param send_fv A vector for stashing reads to which we may pass our future
933 * value. If NULL, then don't record any future values.
934 * @return True if modification order edges were added; false otherwise
936 void ModelExecution::w_modification_order(ModelAction *curr)
938 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
940 ASSERT(curr->is_write());
942 SnapList<ModelAction *> edgeset;
944 if (curr->is_seqcst()) {
945 /* We have to at least see the last sequentially consistent write,
946 so we are initialized. */
947 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
948 if (last_seq_cst != NULL) {
949 edgeset.push_back(last_seq_cst);
951 //update map for next query
952 obj_last_sc_map.put(curr->get_location(), curr);
955 /* Last SC fence in the current thread */
956 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
958 /* Iterate over all threads */
959 for (i = 0;i < thrd_lists->size();i++) {
960 /* Last SC fence in thread i, before last SC fence in current thread */
961 ModelAction *last_sc_fence_thread_before = NULL;
962 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
963 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
965 /* Iterate over actions in thread, starting from most recent */
966 action_list_t *list = &(*thrd_lists)[i];
967 sllnode<ModelAction*>* rit;
968 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
969 ModelAction *act = rit->getVal();
972 * 1) If RMW and it actually read from something, then we
973 * already have all relevant edges, so just skip to next
976 * 2) If RMW and it didn't read from anything, we should
977 * whatever edge we can get to speed up convergence.
979 * 3) If normal write, we need to look at earlier actions, so
980 * continue processing list.
982 if (curr->is_rmw()) {
983 if (curr->get_reads_from() != NULL)
991 /* C++, Section 29.3 statement 7 */
992 if (last_sc_fence_thread_before && act->is_write() &&
993 *act < *last_sc_fence_thread_before) {
994 edgeset.push_back(act);
999 * Include at most one act per-thread that "happens
1002 if (act->happens_before(curr)) {
1004 * Note: if act is RMW, just add edge:
1006 * The following edge should be handled elsewhere:
1007 * readfrom(act) --mo--> act
1009 if (act->is_write())
1010 edgeset.push_back(act);
1011 else if (act->is_read()) {
1012 //if previous read accessed a null, just keep going
1013 edgeset.push_back(act->get_reads_from());
1019 mo_graph->addEdges(&edgeset, curr);
1024 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1025 * some constraints. This method checks one the following constraint (others
1026 * require compiler support):
1028 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1029 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1031 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1033 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1035 /* Iterate over all threads */
1036 for (i = 0;i < thrd_lists->size();i++) {
1037 const ModelAction *write_after_read = NULL;
1039 /* Iterate over actions in thread, starting from most recent */
1040 action_list_t *list = &(*thrd_lists)[i];
1041 sllnode<ModelAction *>* rit;
1042 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1043 ModelAction *act = rit->getVal();
1045 /* Don't disallow due to act == reader */
1046 if (!reader->happens_before(act) || reader == act)
1048 else if (act->is_write())
1049 write_after_read = act;
1050 else if (act->is_read() && act->get_reads_from() != NULL)
1051 write_after_read = act->get_reads_from();
1054 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1061 * Computes the clock vector that happens before propagates from this write.
1063 * @param rf The action that might be part of a release sequence. Must be a
1065 * @return ClockVector of happens before relation.
1068 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1069 SnapVector<ModelAction *> * processset = NULL;
1070 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1071 ASSERT(rf->is_write());
1072 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1074 if (processset == NULL)
1075 processset = new SnapVector<ModelAction *>();
1076 processset->push_back(rf);
1079 int i = (processset == NULL) ? 0 : processset->size();
1081 ClockVector * vec = NULL;
1083 if (rf->get_rfcv() != NULL) {
1084 vec = rf->get_rfcv();
1085 } else if (rf->is_acquire() && rf->is_release()) {
1087 } else if (rf->is_release() && !rf->is_rmw()) {
1089 } else if (rf->is_release()) {
1090 //have rmw that is release and doesn't have a rfcv
1091 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1094 //operation that isn't release
1095 if (rf->get_last_fence_release()) {
1097 vec = rf->get_last_fence_release()->get_cv();
1099 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1105 rf = (*processset)[i];
1109 if (processset != NULL)
1115 * Performs various bookkeeping operations for the current ModelAction. For
1116 * instance, adds action to the per-object, per-thread action vector and to the
1117 * action trace list of all thread actions.
1119 * @param act is the ModelAction to add.
1121 void ModelExecution::add_action_to_lists(ModelAction *act)
1123 int tid = id_to_int(act->get_tid());
1124 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1125 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1126 act->setActionRef(list->add_back(act));
1129 // Update action trace, a total order of all actions
1130 act->setTraceRef(action_trace.add_back(act));
1133 // Update obj_thrd_map, a per location, per thread, order of actions
1134 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1135 if ((int)vec->size() <= tid) {
1136 uint oldsize = vec->size();
1137 vec->resize(priv->next_thread_id);
1138 for(uint i = oldsize;i < priv->next_thread_id;i++)
1139 new (&(*vec)[i]) action_list_t();
1141 act->setThrdMapRef((*vec)[tid].add_back(act));
1143 // Update thrd_last_action, the last action taken by each thread
1144 if ((int)thrd_last_action.size() <= tid)
1145 thrd_last_action.resize(get_num_threads());
1146 thrd_last_action[tid] = act;
1148 // Update thrd_last_fence_release, the last release fence taken by each thread
1149 if (act->is_fence() && act->is_release()) {
1150 if ((int)thrd_last_fence_release.size() <= tid)
1151 thrd_last_fence_release.resize(get_num_threads());
1152 thrd_last_fence_release[tid] = act;
1155 if (act->is_wait()) {
1156 void *mutex_loc = (void *) act->get_value();
1157 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1159 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1160 if ((int)vec->size() <= tid) {
1161 uint oldsize = vec->size();
1162 vec->resize(priv->next_thread_id);
1163 for(uint i = oldsize;i < priv->next_thread_id;i++)
1164 new (&(*vec)[i]) action_list_t();
1166 act->setThrdMapRef((*vec)[tid].add_back(act));
1170 sllnode<ModelAction *>* insertIntoActionList(action_list_t *list, ModelAction *act) {
1171 sllnode<ModelAction*> * rit = list->end();
1172 modelclock_t next_seq = act->get_seq_number();
1173 if (rit == NULL || (rit->getVal()->get_seq_number() == next_seq))
1174 return list->add_back(act);
1176 for(;rit != NULL;rit=rit->getPrev()) {
1177 if (rit->getVal()->get_seq_number() == next_seq) {
1178 return list->insertAfter(rit, act);
1185 sllnode<ModelAction *>* insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1186 sllnode<ModelAction*> * rit = list->end();
1187 modelclock_t next_seq = act->get_seq_number();
1189 act->create_cv(NULL);
1191 } else if (rit->getVal()->get_seq_number() == next_seq) {
1192 act->create_cv(rit->getVal());
1193 return list->add_back(act);
1195 for(;rit != NULL;rit=rit->getPrev()) {
1196 if (rit->getVal()->get_seq_number() == next_seq) {
1197 act->create_cv(rit->getVal());
1198 return list->insertAfter(rit, act);
1206 * Performs various bookkeeping operations for a normal write. The
1207 * complication is that we are typically inserting a normal write
1208 * lazily, so we need to insert it into the middle of lists.
1210 * @param act is the ModelAction to add.
1213 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1215 int tid = id_to_int(act->get_tid());
1216 act->setTraceRef(insertIntoActionListAndSetCV(&action_trace, act));
1218 // Update obj_thrd_map, a per location, per thread, order of actions
1219 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1220 if (tid >= (int)vec->size()) {
1221 uint oldsize =vec->size();
1222 vec->resize(priv->next_thread_id);
1223 for(uint i=oldsize;i<priv->next_thread_id;i++)
1224 new (&(*vec)[i]) action_list_t();
1226 act->setThrdMapRef(insertIntoActionList(&(*vec)[tid],act));
1228 ModelAction * lastact = thrd_last_action[tid];
1229 // Update thrd_last_action, the last action taken by each thrad
1230 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1231 thrd_last_action[tid] = act;
1235 void ModelExecution::add_write_to_lists(ModelAction *write) {
1236 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1237 int tid = id_to_int(write->get_tid());
1238 if (tid >= (int)vec->size()) {
1239 uint oldsize =vec->size();
1240 vec->resize(priv->next_thread_id);
1241 for(uint i=oldsize;i<priv->next_thread_id;i++)
1242 new (&(*vec)[i]) action_list_t();
1244 write->setActionRef((*vec)[tid].add_back(write));
1248 * @brief Get the last action performed by a particular Thread
1249 * @param tid The thread ID of the Thread in question
1250 * @return The last action in the thread
1252 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1254 int threadid = id_to_int(tid);
1255 if (threadid < (int)thrd_last_action.size())
1256 return thrd_last_action[id_to_int(tid)];
1262 * @brief Get the last fence release performed by a particular Thread
1263 * @param tid The thread ID of the Thread in question
1264 * @return The last fence release in the thread, if one exists; NULL otherwise
1266 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1268 int threadid = id_to_int(tid);
1269 if (threadid < (int)thrd_last_fence_release.size())
1270 return thrd_last_fence_release[id_to_int(tid)];
1276 * Gets the last memory_order_seq_cst write (in the total global sequence)
1277 * performed on a particular object (i.e., memory location), not including the
1279 * @param curr The current ModelAction; also denotes the object location to
1281 * @return The last seq_cst write
1283 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1285 void *location = curr->get_location();
1286 return obj_last_sc_map.get(location);
1290 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1291 * performed in a particular thread, prior to a particular fence.
1292 * @param tid The ID of the thread to check
1293 * @param before_fence The fence from which to begin the search; if NULL, then
1294 * search for the most recent fence in the thread.
1295 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1297 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1299 /* All fences should have location FENCE_LOCATION */
1300 action_list_t *list = obj_map.get(FENCE_LOCATION);
1305 sllnode<ModelAction*>* rit = list->end();
1308 for (;rit != NULL;rit=rit->getPrev())
1309 if (rit->getVal() == before_fence)
1312 ASSERT(rit->getVal() == before_fence);
1316 for (;rit != NULL;rit=rit->getPrev()) {
1317 ModelAction *act = rit->getVal();
1318 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1325 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1326 * location). This function identifies the mutex according to the current
1327 * action, which is presumed to perform on the same mutex.
1328 * @param curr The current ModelAction; also denotes the object location to
1330 * @return The last unlock operation
1332 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1334 void *location = curr->get_location();
1336 action_list_t *list = obj_map.get(location);
1340 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1341 sllnode<ModelAction*>* rit;
1342 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1343 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1344 return rit->getVal();
1348 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1350 ModelAction *parent = get_last_action(tid);
1352 parent = get_thread(tid)->get_creation();
1357 * Returns the clock vector for a given thread.
1358 * @param tid The thread whose clock vector we want
1359 * @return Desired clock vector
1361 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1363 ModelAction *firstaction=get_parent_action(tid);
1364 return firstaction != NULL ? firstaction->get_cv() : NULL;
1367 bool valequals(uint64_t val1, uint64_t val2, int size) {
1370 return ((uint8_t)val1) == ((uint8_t)val2);
1372 return ((uint16_t)val1) == ((uint16_t)val2);
1374 return ((uint32_t)val1) == ((uint32_t)val2);
1384 * Build up an initial set of all past writes that this 'read' action may read
1385 * from, as well as any previously-observed future values that must still be valid.
1387 * @param curr is the current ModelAction that we are exploring; it must be a
1390 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1392 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1394 ASSERT(curr->is_read());
1396 ModelAction *last_sc_write = NULL;
1398 if (curr->is_seqcst())
1399 last_sc_write = get_last_seq_cst_write(curr);
1401 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1403 /* Iterate over all threads */
1404 if (thrd_lists != NULL)
1405 for (i = 0;i < thrd_lists->size();i++) {
1406 /* Iterate over actions in thread, starting from most recent */
1407 action_list_t *list = &(*thrd_lists)[i];
1408 sllnode<ModelAction *> * rit;
1409 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1410 ModelAction *act = rit->getVal();
1415 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1416 bool allow_read = true;
1418 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1421 /* Need to check whether we will have two RMW reading from the same value */
1422 if (curr->is_rmwr()) {
1423 /* It is okay if we have a failing CAS */
1424 if (!curr->is_rmwrcas() ||
1425 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1426 //Need to make sure we aren't the second RMW
1427 CycleNode * node = mo_graph->getNode_noCreate(act);
1428 if (node != NULL && node->getRMW() != NULL) {
1429 //we are the second RMW
1436 /* Only add feasible reads */
1437 rf_set->push_back(act);
1440 /* Include at most one act per-thread that "happens before" curr */
1441 if (act->happens_before(curr))
1446 if (DBG_ENABLED()) {
1447 model_print("Reached read action:\n");
1449 model_print("End printing read_from_past\n");
1454 static void print_list(action_list_t *list)
1456 sllnode<ModelAction*> *it;
1458 model_print("------------------------------------------------------------------------------------\n");
1459 model_print("# t Action type MO Location Value Rf CV\n");
1460 model_print("------------------------------------------------------------------------------------\n");
1462 unsigned int hash = 0;
1464 for (it = list->begin();it != NULL;it=it->getNext()) {
1465 const ModelAction *act = it->getVal();
1466 if (act->get_seq_number() > 0)
1468 hash = hash^(hash<<3)^(it->getVal()->hash());
1470 model_print("HASH %u\n", hash);
1471 model_print("------------------------------------------------------------------------------------\n");
1474 #if SUPPORT_MOD_ORDER_DUMP
1475 void ModelExecution::dumpGraph(char *filename)
1478 sprintf(buffer, "%s.dot", filename);
1479 FILE *file = fopen(buffer, "w");
1480 fprintf(file, "digraph %s {\n", filename);
1481 mo_graph->dumpNodes(file);
1482 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1484 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1485 ModelAction *act = it->getVal();
1486 if (act->is_read()) {
1487 mo_graph->dot_print_node(file, act);
1488 mo_graph->dot_print_edge(file,
1489 act->get_reads_from(),
1491 "label=\"rf\", color=red, weight=2");
1493 if (thread_array[act->get_tid()]) {
1494 mo_graph->dot_print_edge(file,
1495 thread_array[id_to_int(act->get_tid())],
1497 "label=\"sb\", color=blue, weight=400");
1500 thread_array[act->get_tid()] = act;
1502 fprintf(file, "}\n");
1503 model_free(thread_array);
1508 /** @brief Prints an execution trace summary. */
1509 void ModelExecution::print_summary()
1511 #if SUPPORT_MOD_ORDER_DUMP
1512 char buffername[100];
1513 sprintf(buffername, "exec%04u", get_execution_number());
1514 mo_graph->dumpGraphToFile(buffername);
1515 sprintf(buffername, "graph%04u", get_execution_number());
1516 dumpGraph(buffername);
1519 model_print("Execution trace %d:", get_execution_number());
1520 if (scheduler->all_threads_sleeping())
1521 model_print(" SLEEP-SET REDUNDANT");
1522 if (have_bug_reports())
1523 model_print(" DETECTED BUG(S)");
1527 print_list(&action_trace);
1533 * Add a Thread to the system for the first time. Should only be called once
1535 * @param t The Thread to add
1537 void ModelExecution::add_thread(Thread *t)
1539 unsigned int i = id_to_int(t->get_id());
1540 if (i >= thread_map.size())
1541 thread_map.resize(i + 1);
1543 if (!t->is_model_thread())
1544 scheduler->add_thread(t);
1548 * @brief Get a Thread reference by its ID
1549 * @param tid The Thread's ID
1550 * @return A Thread reference
1552 Thread * ModelExecution::get_thread(thread_id_t tid) const
1554 unsigned int i = id_to_int(tid);
1555 if (i < thread_map.size())
1556 return thread_map[i];
1561 * @brief Get a reference to the Thread in which a ModelAction was executed
1562 * @param act The ModelAction
1563 * @return A Thread reference
1565 Thread * ModelExecution::get_thread(const ModelAction *act) const
1567 return get_thread(act->get_tid());
1571 * @brief Get a Thread reference by its pthread ID
1572 * @param index The pthread's ID
1573 * @return A Thread reference
1575 Thread * ModelExecution::get_pthread(pthread_t pid) {
1581 uint32_t thread_id = x.v;
1582 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1587 * @brief Check if a Thread is currently enabled
1588 * @param t The Thread to check
1589 * @return True if the Thread is currently enabled
1591 bool ModelExecution::is_enabled(Thread *t) const
1593 return scheduler->is_enabled(t);
1597 * @brief Check if a Thread is currently enabled
1598 * @param tid The ID of the Thread to check
1599 * @return True if the Thread is currently enabled
1601 bool ModelExecution::is_enabled(thread_id_t tid) const
1603 return scheduler->is_enabled(tid);
1607 * @brief Select the next thread to execute based on the curren action
1609 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1610 * actions should be followed by the execution of their child thread. In either
1611 * case, the current action should determine the next thread schedule.
1613 * @param curr The current action
1614 * @return The next thread to run, if the current action will determine this
1615 * selection; otherwise NULL
1617 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1619 /* Do not split atomic RMW */
1620 if (curr->is_rmwr() && !paused_by_fuzzer(curr))
1621 return get_thread(curr);
1622 /* Follow CREATE with the created thread */
1623 /* which is not needed, because model.cc takes care of this */
1624 if (curr->get_type() == THREAD_CREATE)
1625 return curr->get_thread_operand();
1626 if (curr->get_type() == PTHREAD_CREATE) {
1627 return curr->get_thread_operand();
1632 /** @param act A read atomic action */
1633 bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
1635 ASSERT(act->is_read());
1637 // Actions paused by fuzzer have their sequence number reset to 0
1638 return act->get_seq_number() == 0;
1642 * Takes the next step in the execution, if possible.
1643 * @param curr The current step to take
1644 * @return Returns the next Thread to run, if any; NULL if this execution
1647 Thread * ModelExecution::take_step(ModelAction *curr)
1649 Thread *curr_thrd = get_thread(curr);
1650 ASSERT(curr_thrd->get_state() == THREAD_READY);
1652 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1653 curr = check_current_action(curr);
1656 /* Process this action in ModelHistory for records */
1657 model->get_history()->process_action( curr, curr->get_tid() );
1659 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1660 scheduler->remove_thread(curr_thrd);
1662 return action_select_next_thread(curr);
1665 void ModelExecution::removeAction(ModelAction *act) {
1667 sllnode<ModelAction *> * listref = act->getTraceRef();
1668 if (listref != NULL) {
1669 action_trace.erase(listref);
1673 sllnode<ModelAction *> * listref = act->getThrdMapRef();
1674 if (listref != NULL) {
1675 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1676 (*vec)[act->get_tid()].erase(listref);
1679 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1680 sllnode<ModelAction *> * listref = act->getActionRef();
1681 if (listref != NULL) {
1682 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1683 list->erase(listref);
1685 } else if (act->is_wait()) {
1686 sllnode<ModelAction *> * listref = act->getActionRef();
1687 if (listref != NULL) {
1688 void *mutex_loc = (void *) act->get_value();
1689 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1691 } else if (act->is_write()) {
1692 sllnode<ModelAction *> * listref = act->getActionRef();
1693 if (listref != NULL) {
1694 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1695 (*vec)[act->get_tid()].erase(listref);
1697 //Remove from Cyclegraph
1698 mo_graph->freeAction(act);
1702 ClockVector * ModelExecution::computeMinimalCV() {
1703 ClockVector *cvmin = NULL;
1704 //Thread 0 isn't a real thread, so skip it..
1705 for(unsigned int i = 1;i < thread_map.size();i++) {
1706 Thread * t = thread_map[i];
1707 if (t->get_state() == THREAD_COMPLETED)
1709 thread_id_t tid = int_to_id(i);
1710 ClockVector * cv = get_cv(tid);
1712 cvmin = new ClockVector(cv, NULL);
1714 cvmin->minmerge(cv);
1720 //How often to check for memory
1721 //How much of the trace to always keep
1722 //Whether to sacrifice completeness...i.e., remove visible writes
1724 void ModelExecution::collectActions() {
1725 //Compute minimal clock vector for all live threads
1726 ClockVector *cvmin = computeMinimalCV();
1728 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1729 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1731 //Next walk action trace... When we hit an action, see if it is
1732 //invisible (e.g., earlier than the first before the minimum
1733 //clock for the thread... if so erase it and all previous
1734 //actions in cyclegraph
1735 sllnode<ModelAction*> * it;
1736 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1737 ModelAction *act = it->getVal();
1738 modelclock_t actseq = act->get_seq_number();
1740 //See if we are done
1741 if (actseq > maxtofree)
1744 thread_id_t act_tid = act->get_tid();
1745 modelclock_t tid_clock = cvmin->getClock(act_tid);
1746 if (actseq <= tid_clock || params->removevisible) {
1747 ModelAction * write;
1748 if (act->is_write()) {
1750 } else if (act->is_read()) {
1751 write = act->get_reads_from();
1755 //Mark everything earlier in MO graph to be freed
1756 CycleNode * cn = mo_graph->getNode_noCreate(write);
1758 queue->push_back(cn);
1759 while(!queue->empty()) {
1760 CycleNode * node = queue->back();
1762 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1763 CycleNode * prevnode = node->getInEdge(i);
1764 ModelAction * prevact = prevnode->getAction();
1765 if (prevact->get_type() != READY_FREE) {
1766 prevact->set_free();
1767 queue->push_back(prevnode);
1774 for (;it != NULL;) {
1775 ModelAction *act = it->getVal();
1776 //Do iteration early since we may delete node...
1778 if (act->is_read()) {
1779 if (act->get_reads_from()->is_free()) {
1783 const ModelAction *rel_fence =act->get_last_fence_release();
1784 if (rel_fence != NULL) {
1785 modelclock_t relfenceseq = rel_fence->get_seq_number();
1786 thread_id_t relfence_tid = rel_fence->get_tid();
1787 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1788 //Remove references to irrelevant release fences
1789 if (relfenceseq <= tid_clock)
1790 act->set_last_fence_release(NULL);
1793 } else if (act->is_free()) {
1796 } else if (act->is_write()) {
1797 //Do nothing with write that hasn't been marked to be freed
1798 } else if (act->is_fence()) {
1799 //Note that acquire fences can always be safely
1800 //removed, but could incur extra overheads in
1801 //traversals. Removing them before the cvmin seems
1802 //like a good compromise.
1804 //Release fences before the cvmin don't do anything
1805 //because everyone has already synchronized.
1807 //Sequentially fences before cvmin are redundant
1808 //because happens-before will enforce same
1811 modelclock_t actseq = act->get_seq_number();
1812 thread_id_t act_tid = act->get_tid();
1813 modelclock_t tid_clock = cvmin->getClock(act_tid);
1814 if (actseq <= tid_clock) {
1819 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish
1820 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1821 //need to keep most recent unlock/wait for each lock
1822 if(act->is_unlock() || act->is_wait()) {
1823 ModelAction * lastlock = get_last_unlock(act);
1824 if (lastlock != act) {
1841 Fuzzer * ModelExecution::getFuzzer() {