12 #include "clockvector.h"
13 #include "cyclegraph.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
19 #include "newfuzzer.h"
22 static unsigned int atomic_load_count = 0;
23 static unsigned int atomic_store_count = 0;
24 static unsigned int atomic_rmw_count = 0;
26 static unsigned int atomic_fence_count = 0;
27 static unsigned int atomic_lock_count = 0;
28 static unsigned int atomic_trylock_count = 0;
29 static unsigned int atomic_unlock_count = 0;
30 static unsigned int atomic_notify_count = 0;
31 static unsigned int atomic_wait_count = 0;
32 static unsigned int atomic_timedwait_count = 0;
36 * Structure for holding small ModelChecker members that should be snapshotted
38 struct model_snapshot_members {
39 model_snapshot_members() :
40 /* First thread created will have id INITIAL_THREAD_ID */
41 next_thread_id(INITIAL_THREAD_ID),
42 used_sequence_numbers(0),
47 ~model_snapshot_members() {
48 for (unsigned int i = 0;i < bugs.size();i++)
53 unsigned int next_thread_id;
54 modelclock_t used_sequence_numbers;
55 SnapVector<bug_message *> bugs;
56 /** @brief Incorrectly-ordered synchronization was made */
62 /** @brief Constructor */
63 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
67 thread_map(2), /* We'll always need at least 2 threads */
72 condvar_waiters_map(),
79 thrd_last_fence_release(),
80 priv(new struct model_snapshot_members ()),
81 mo_graph(new CycleGraph()),
83 fuzzer(new NewFuzzer()),
89 /* Initialize a model-checker thread, for special ModelActions */
90 fuzzer->register_engine(m, this);
91 scheduler->register_engine(this);
93 pthread_key_create(&pthreadkey, tlsdestructor);
97 /** @brief Destructor */
98 ModelExecution::~ModelExecution()
100 for (unsigned int i = 0;i < get_num_threads();i++)
101 delete get_thread(int_to_id(i));
107 int ModelExecution::get_execution_number() const
109 return model->get_execution_number();
112 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
114 SnapVector<action_list_t> *tmp = hash->get(ptr);
116 tmp = new SnapVector<action_list_t>();
122 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
124 simple_action_list_t *tmp = hash->get(ptr);
126 tmp = new simple_action_list_t();
132 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
134 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
136 tmp = new SnapVector<simple_action_list_t>();
143 * When vectors of action lists are reallocated due to resize, the root address of
144 * action lists may change. Hence we need to fix the parent pointer of the children
147 static void fixup_action_list(SnapVector<action_list_t> * vec)
149 for (uint i = 0;i < vec->size();i++) {
150 action_list_t * list = &(*vec)[i];
157 static inline void record_atomic_stats(ModelAction * act)
159 switch (act->get_type()) {
161 atomic_store_count++;
170 atomic_fence_count++;
176 atomic_trylock_count++;
179 atomic_unlock_count++;
181 case ATOMIC_NOTIFY_ONE:
182 case ATOMIC_NOTIFY_ALL:
183 atomic_notify_count++;
188 case ATOMIC_TIMEDWAIT:
189 atomic_timedwait_count++;
195 void print_atomic_accesses()
197 model_print("atomic store count: %u\n", atomic_store_count);
198 model_print("atomic load count: %u\n", atomic_load_count);
199 model_print("atomic rmw count: %u\n", atomic_rmw_count);
201 model_print("atomic fence count: %u\n", atomic_fence_count);
202 model_print("atomic lock count: %u\n", atomic_lock_count);
203 model_print("atomic trylock count: %u\n", atomic_trylock_count);
204 model_print("atomic unlock count: %u\n", atomic_unlock_count);
205 model_print("atomic notify count: %u\n", atomic_notify_count);
206 model_print("atomic wait count: %u\n", atomic_wait_count);
207 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
210 /** @return a thread ID for a new Thread */
211 thread_id_t ModelExecution::get_next_id()
213 return priv->next_thread_id++;
216 /** @return the number of user threads created during this execution */
217 unsigned int ModelExecution::get_num_threads() const
219 return priv->next_thread_id;
222 /** @return a sequence number for a new ModelAction */
223 modelclock_t ModelExecution::get_next_seq_num()
225 return ++priv->used_sequence_numbers;
228 /** @return a sequence number for a new ModelAction */
229 modelclock_t ModelExecution::get_curr_seq_num()
231 return priv->used_sequence_numbers;
234 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
235 void ModelExecution::restore_last_seq_num()
237 priv->used_sequence_numbers--;
241 * @brief Should the current action wake up a given thread?
243 * @param curr The current action
244 * @param thread The thread that we might wake up
245 * @return True, if we should wake up the sleeping thread; false otherwise
247 bool ModelExecution::should_wake_up(const ModelAction * asleep) const
249 /* The sleep is literally sleeping */
250 switch (asleep->get_type()) {
252 if (fuzzer->shouldWake(asleep))
256 case ATOMIC_TIMEDWAIT:
257 if (fuzzer->waitShouldWakeUp(asleep))
267 void ModelExecution::wake_up_sleeping_actions()
269 for (unsigned int i = 0;i < get_num_threads();i++) {
270 thread_id_t tid = int_to_id(i);
271 if (scheduler->is_sleep_set(tid)) {
272 Thread *thr = get_thread(tid);
273 ModelAction * pending = thr->get_pending();
274 if (should_wake_up(pending)) {
275 /* Remove this thread from sleep set */
276 scheduler->remove_sleep(thr);
278 if (pending->is_sleep()) {
279 thr->set_wakeup_state(true);
280 } else if (pending->is_wait()) {
281 thr->set_wakeup_state(true);
282 /* Remove this thread from list of waiters */
283 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
284 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
285 if (rit->getVal()->get_tid() == tid) {
291 /* Set ETIMEDOUT error */
292 if (pending->is_timedwait())
293 thr->set_return_value(ETIMEDOUT);
300 void ModelExecution::assert_bug(const char *msg)
302 priv->bugs.push_back(new bug_message(msg));
306 /** @return True, if any bugs have been reported for this execution */
307 bool ModelExecution::have_bug_reports() const
309 return priv->bugs.size() != 0;
312 SnapVector<bug_message *> * ModelExecution::get_bugs() const
318 * Check whether the current trace has triggered an assertion which should halt
321 * @return True, if the execution should be aborted; false otherwise
323 bool ModelExecution::has_asserted() const
325 return priv->asserted;
329 * Trigger a trace assertion which should cause this execution to be halted.
330 * This can be due to a detected bug or due to an infeasibility that should
333 void ModelExecution::set_assert()
335 priv->asserted = true;
339 * Check if we are in a deadlock. Should only be called at the end of an
340 * execution, although it should not give false positives in the middle of an
341 * execution (there should be some ENABLED thread).
343 * @return True if program is in a deadlock; false otherwise
345 bool ModelExecution::is_deadlocked() const
347 bool blocking_threads = false;
348 for (unsigned int i = 0;i < get_num_threads();i++) {
349 thread_id_t tid = int_to_id(i);
352 Thread *t = get_thread(tid);
353 if (!t->is_model_thread() && t->get_pending())
354 blocking_threads = true;
356 return blocking_threads;
360 * Check if this is a complete execution. That is, have all thread completed
361 * execution (rather than exiting because sleep sets have forced a redundant
364 * @return True if the execution is complete.
366 bool ModelExecution::is_complete_execution() const
368 for (unsigned int i = 0;i < get_num_threads();i++)
369 if (is_enabled(int_to_id(i)))
374 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
375 uint64_t value = *((const uint64_t *) location);
376 modelclock_t storeclock;
377 thread_id_t storethread;
378 getStoreThreadAndClock(location, &storethread, &storeclock);
379 setAtomicStoreFlag(location);
380 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
381 act->set_seq_number(storeclock);
382 add_normal_write_to_lists(act);
383 add_write_to_lists(act);
384 w_modification_order(act);
386 model->get_history()->process_action(act, act->get_tid());
392 * Processes a read model action.
393 * @param curr is the read model action to process.
394 * @param rf_set is the set of model actions we can possibly read from
395 * @return True if the read can be pruned from the thread map list.
397 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
399 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
400 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
401 if (hasnonatomicstore) {
402 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
403 rf_set->push_back(nonatomicstore);
406 // Remove writes that violate read modification order
409 while (i < rf_set->size()) {
410 ModelAction * rf = (*rf_set)[i];
411 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
412 (*rf_set)[i] = rf_set->back();
419 int index = fuzzer->selectWrite(curr, rf_set);
421 ModelAction *rf = (*rf_set)[index];
424 bool canprune = false;
425 if (r_modification_order(curr, rf, priorset, &canprune)) {
426 for(unsigned int i=0;i<priorset->size();i++) {
427 mo_graph->addEdge((*priorset)[i], rf);
430 get_thread(curr)->set_return_value(rf->get_write_value());
432 //Update acquire fence clock vector
433 ClockVector * hbcv = get_hb_from_write(rf);
435 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
436 return canprune && (curr->get_type() == ATOMIC_READ);
439 (*rf_set)[index] = rf_set->back();
445 * Processes a lock, trylock, or unlock model action. @param curr is
446 * the read model action to process.
448 * The try lock operation checks whether the lock is taken. If not,
449 * it falls to the normal lock operation case. If so, it returns
452 * The lock operation has already been checked that it is enabled, so
453 * it just grabs the lock and synchronizes with the previous unlock.
455 * The unlock operation has to re-enable all of the threads that are
456 * waiting on the lock.
458 * @return True if synchronization was updated; false otherwise
460 bool ModelExecution::process_mutex(ModelAction *curr)
462 cdsc::mutex *mutex = curr->get_mutex();
463 struct cdsc::mutex_state *state = NULL;
466 state = mutex->get_state();
468 switch (curr->get_type()) {
469 case ATOMIC_TRYLOCK: {
470 bool success = !state->locked;
471 curr->set_try_lock(success);
473 get_thread(curr)->set_return_value(0);
476 get_thread(curr)->set_return_value(1);
478 //otherwise fall into the lock case
480 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
481 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
482 // assert_bug("Lock access before initialization");
484 // TODO: lock count for recursive mutexes
485 state->locked = get_thread(curr);
486 ModelAction *unlock = get_last_unlock(curr);
487 //synchronize with the previous unlock statement
488 if (unlock != NULL) {
489 synchronize(unlock, curr);
495 Thread *curr_thrd = get_thread(curr);
496 /* wake up the other threads */
497 for (unsigned int i = 0;i < get_num_threads();i++) {
498 Thread *t = get_thread(int_to_id(i));
499 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
503 /* unlock the lock - after checking who was waiting on it */
504 state->locked = NULL;
506 /* disable this thread */
507 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
508 waiters->push_back(curr);
509 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
511 if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously,
512 scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET
514 scheduler->sleep(curr_thrd);
518 case ATOMIC_TIMEDWAIT: {
519 Thread *curr_thrd = get_thread(curr);
520 if (!fuzzer->randomizeWaitTime(curr)) {
521 curr_thrd->set_return_value(ETIMEDOUT);
525 /* wake up the other threads */
526 for (unsigned int i = 0;i < get_num_threads();i++) {
527 Thread *t = get_thread(int_to_id(i));
528 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
532 /* unlock the lock - after checking who was waiting on it */
533 state->locked = NULL;
535 /* disable this thread */
536 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
537 waiters->push_back(curr);
538 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
539 scheduler->add_sleep(curr_thrd);
542 case ATOMIC_UNLOCK: {
543 // TODO: lock count for recursive mutexes
544 /* wake up the other threads */
545 Thread *curr_thrd = get_thread(curr);
546 for (unsigned int i = 0;i < get_num_threads();i++) {
547 Thread *t = get_thread(int_to_id(i));
548 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
552 /* unlock the lock - after checking who was waiting on it */
553 state->locked = NULL;
556 case ATOMIC_NOTIFY_ALL: {
557 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
558 //activate all the waiting threads
559 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
560 Thread * thread = get_thread(rit->getVal());
561 if (thread->get_state() != THREAD_COMPLETED)
562 scheduler->wake(thread);
563 thread->set_wakeup_state(true);
568 case ATOMIC_NOTIFY_ONE: {
569 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
570 if (waiters->size() != 0) {
571 Thread * thread = fuzzer->selectNotify(waiters);
572 if (thread->get_state() != THREAD_COMPLETED)
573 scheduler->wake(thread);
574 thread->set_wakeup_state(true);
586 * Process a write ModelAction
587 * @param curr The ModelAction to process
588 * @return True if the mo_graph was updated or promises were resolved
590 void ModelExecution::process_write(ModelAction *curr)
592 w_modification_order(curr);
593 get_thread(curr)->set_return_value(VALUE_NONE);
597 * Process a fence ModelAction
598 * @param curr The ModelAction to process
599 * @return True if synchronization was updated
601 void ModelExecution::process_fence(ModelAction *curr)
604 * fence-relaxed: no-op
605 * fence-release: only log the occurence (not in this function), for
606 * use in later synchronization
607 * fence-acquire (this function): search for hypothetical release
609 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
611 if (curr->is_acquire()) {
612 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
617 * @brief Process the current action for thread-related activity
619 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
620 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
621 * synchronization, etc. This function is a no-op for non-THREAD actions
622 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
624 * @param curr The current action
625 * @return True if synchronization was updated or a thread completed
627 void ModelExecution::process_thread_action(ModelAction *curr)
629 switch (curr->get_type()) {
630 case THREAD_CREATE: {
631 thrd_t *thrd = (thrd_t *)curr->get_location();
632 struct thread_params *params = (struct thread_params *)curr->get_value();
633 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
634 curr->set_thread_operand(th);
636 th->set_creation(curr);
639 case PTHREAD_CREATE: {
640 (*(uint32_t *)curr->get_location()) = pthread_counter++;
642 struct pthread_params *params = (struct pthread_params *)curr->get_value();
643 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
644 curr->set_thread_operand(th);
646 th->set_creation(curr);
648 if ( pthread_map.size() < pthread_counter )
649 pthread_map.resize( pthread_counter );
650 pthread_map[ pthread_counter-1 ] = th;
655 Thread *blocking = curr->get_thread_operand();
656 ModelAction *act = get_last_action(blocking->get_id());
657 synchronize(act, curr);
661 Thread *blocking = curr->get_thread_operand();
662 ModelAction *act = get_last_action(blocking->get_id());
663 synchronize(act, curr);
667 case THREADONLY_FINISH:
668 case THREAD_FINISH: {
669 Thread *th = get_thread(curr);
670 if (curr->get_type() == THREAD_FINISH &&
671 th == model->getInitThread()) {
677 /* Wake up any joining threads */
678 for (unsigned int i = 0;i < get_num_threads();i++) {
679 Thread *waiting = get_thread(int_to_id(i));
680 if (waiting->waiting_on() == th &&
681 waiting->get_pending()->is_thread_join())
682 scheduler->wake(waiting);
691 Thread *th = get_thread(curr);
692 th->set_pending(curr);
693 scheduler->add_sleep(th);
702 * Initialize the current action by performing one or more of the following
703 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
704 * manipulating backtracking sets, allocating and
705 * initializing clock vectors, and computing the promises to fulfill.
707 * @param curr The current action, as passed from the user context; may be
708 * freed/invalidated after the execution of this function, with a different
709 * action "returned" its place (pass-by-reference)
710 * @return True if curr is a newly-explored action; false otherwise
712 bool ModelExecution::initialize_curr_action(ModelAction **curr)
714 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
715 ModelAction *newcurr = process_rmw(*curr);
721 ModelAction *newcurr = *curr;
723 newcurr->set_seq_number(get_next_seq_num());
724 /* Always compute new clock vector */
725 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
727 /* Assign most recent release fence */
728 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
730 return true; /* This was a new ModelAction */
735 * @brief Establish reads-from relation between two actions
737 * Perform basic operations involved with establishing a concrete rf relation,
738 * including setting the ModelAction data and checking for release sequences.
740 * @param act The action that is reading (must be a read)
741 * @param rf The action from which we are reading (must be a write)
743 * @return True if this read established synchronization
746 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
749 ASSERT(rf->is_write());
751 act->set_read_from(rf);
752 if (act->is_acquire()) {
753 ClockVector *cv = get_hb_from_write(rf);
756 act->get_cv()->merge(cv);
761 * @brief Synchronizes two actions
763 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
764 * This function performs the synchronization as well as providing other hooks
765 * for other checks along with synchronization.
767 * @param first The left-hand side of the synchronizes-with relation
768 * @param second The right-hand side of the synchronizes-with relation
769 * @return True if the synchronization was successful (i.e., was consistent
770 * with the execution order); false otherwise
772 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
774 if (*second < *first) {
775 ASSERT(0); //This should not happend
778 return second->synchronize_with(first);
782 * @brief Check whether a model action is enabled.
784 * Checks whether an operation would be successful (i.e., is a lock already
785 * locked, or is the joined thread already complete).
787 * For yield-blocking, yields are never enabled.
789 * @param curr is the ModelAction to check whether it is enabled.
790 * @return a bool that indicates whether the action is enabled.
792 bool ModelExecution::check_action_enabled(ModelAction *curr) {
793 switch (curr->get_type()) {
795 cdsc::mutex *lock = curr->get_mutex();
796 struct cdsc::mutex_state *state = lock->get_state();
798 Thread *lock_owner = (Thread *)state->locked;
799 Thread *curr_thread = get_thread(curr);
800 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
810 Thread *blocking = curr->get_thread_operand();
811 if (!blocking->is_complete()) {
817 if (!fuzzer->shouldSleep(curr))
829 * This is the heart of the model checker routine. It performs model-checking
830 * actions corresponding to a given "current action." Among other processes, it
831 * calculates reads-from relationships, updates synchronization clock vectors,
832 * forms a memory_order constraints graph, and handles replay/backtrack
833 * execution when running permutations of previously-observed executions.
835 * @param curr The current action to process
836 * @return The ModelAction that is actually executed; may be different than
839 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
842 bool newly_explored = initialize_curr_action(&curr);
846 wake_up_sleeping_actions();
848 SnapVector<ModelAction *> * rf_set = NULL;
849 bool canprune = false;
850 /* Build may_read_from set for newly-created actions */
851 if (curr->is_read() && newly_explored) {
852 rf_set = build_may_read_from(curr);
853 canprune = process_read(curr, rf_set);
856 ASSERT(rf_set == NULL);
858 /* Add the action to lists if not the second part of a rmw */
859 if (newly_explored) {
861 record_atomic_stats(curr);
863 add_action_to_lists(curr, canprune);
866 if (curr->is_write())
867 add_write_to_lists(curr);
869 process_thread_action(curr);
871 if (curr->is_write())
874 if (curr->is_fence())
877 if (curr->is_mutex_op())
883 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
884 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
885 ModelAction *lastread = get_last_action(act->get_tid());
886 lastread->process_rmw(act);
888 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
894 * @brief Updates the mo_graph with the constraints imposed from the current
897 * Basic idea is the following: Go through each other thread and find
898 * the last action that happened before our read. Two cases:
900 * -# The action is a write: that write must either occur before
901 * the write we read from or be the write we read from.
902 * -# The action is a read: the write that that action read from
903 * must occur before the write we read from or be the same write.
905 * @param curr The current action. Must be a read.
906 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
907 * @param check_only If true, then only check whether the current action satisfies
908 * read modification order or not, without modifiying priorset and canprune.
910 * @return True if modification order edges were added; false otherwise
913 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
914 SnapVector<ModelAction *> * priorset, bool * canprune)
916 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
917 ASSERT(curr->is_read());
919 /* Last SC fence in the current thread */
920 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
922 int tid = curr->get_tid();
924 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
925 if ((int)thrd_lists->size() <= tid) {
926 uint oldsize = thrd_lists->size();
927 thrd_lists->resize(priv->next_thread_id);
928 for(uint i = oldsize;i < priv->next_thread_id;i++)
929 new (&(*thrd_lists)[i]) action_list_t();
931 fixup_action_list(thrd_lists);
934 ModelAction *prev_same_thread = NULL;
935 /* Iterate over all threads */
936 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
937 /* Last SC fence in thread tid */
938 ModelAction *last_sc_fence_thread_local = NULL;
940 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
942 /* Last SC fence in thread tid, before last SC fence in current thread */
943 ModelAction *last_sc_fence_thread_before = NULL;
944 if (last_sc_fence_local)
945 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
947 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
948 if (prev_same_thread != NULL &&
949 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
950 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
954 /* Iterate over actions in thread, starting from most recent */
955 action_list_t *list = &(*thrd_lists)[tid];
956 sllnode<ModelAction *> * rit;
957 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
958 ModelAction *act = rit->getVal();
963 /* Don't want to add reflexive edges on 'rf' */
964 if (act->equals(rf)) {
965 if (act->happens_before(curr))
971 if (act->is_write()) {
972 /* C++, Section 29.3 statement 5 */
973 if (curr->is_seqcst() && last_sc_fence_thread_local &&
974 *act < *last_sc_fence_thread_local) {
975 if (mo_graph->checkReachable(rf, act))
977 priorset->push_back(act);
980 /* C++, Section 29.3 statement 4 */
981 else if (act->is_seqcst() && last_sc_fence_local &&
982 *act < *last_sc_fence_local) {
983 if (mo_graph->checkReachable(rf, act))
985 priorset->push_back(act);
988 /* C++, Section 29.3 statement 6 */
989 else if (last_sc_fence_thread_before &&
990 *act < *last_sc_fence_thread_before) {
991 if (mo_graph->checkReachable(rf, act))
993 priorset->push_back(act);
999 * Include at most one act per-thread that "happens
1002 if (act->happens_before(curr)) {
1004 if (last_sc_fence_local == NULL ||
1005 (*last_sc_fence_local < *act)) {
1006 prev_same_thread = act;
1009 if (act->is_write()) {
1010 if (mo_graph->checkReachable(rf, act))
1012 priorset->push_back(act);
1014 ModelAction *prevrf = act->get_reads_from();
1015 if (!prevrf->equals(rf)) {
1016 if (mo_graph->checkReachable(rf, prevrf))
1018 priorset->push_back(prevrf);
1020 if (act->get_tid() == curr->get_tid()) {
1021 //Can prune curr from obj list
1034 * Updates the mo_graph with the constraints imposed from the current write.
1036 * Basic idea is the following: Go through each other thread and find
1037 * the lastest action that happened before our write. Two cases:
1039 * (1) The action is a write => that write must occur before
1042 * (2) The action is a read => the write that that action read from
1043 * must occur before the current write.
1045 * This method also handles two other issues:
1047 * (I) Sequential Consistency: Making sure that if the current write is
1048 * seq_cst, that it occurs after the previous seq_cst write.
1050 * (II) Sending the write back to non-synchronizing reads.
1052 * @param curr The current action. Must be a write.
1053 * @param send_fv A vector for stashing reads to which we may pass our future
1054 * value. If NULL, then don't record any future values.
1055 * @return True if modification order edges were added; false otherwise
1057 void ModelExecution::w_modification_order(ModelAction *curr)
1059 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1061 ASSERT(curr->is_write());
1063 SnapList<ModelAction *> edgeset;
1065 if (curr->is_seqcst()) {
1066 /* We have to at least see the last sequentially consistent write,
1067 so we are initialized. */
1068 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1069 if (last_seq_cst != NULL) {
1070 edgeset.push_back(last_seq_cst);
1072 //update map for next query
1073 obj_last_sc_map.put(curr->get_location(), curr);
1076 /* Last SC fence in the current thread */
1077 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1079 /* Iterate over all threads */
1080 for (i = 0;i < thrd_lists->size();i++) {
1081 /* Last SC fence in thread i, before last SC fence in current thread */
1082 ModelAction *last_sc_fence_thread_before = NULL;
1083 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1084 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1086 /* Iterate over actions in thread, starting from most recent */
1087 action_list_t *list = &(*thrd_lists)[i];
1088 sllnode<ModelAction*>* rit;
1089 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1090 ModelAction *act = rit->getVal();
1093 * 1) If RMW and it actually read from something, then we
1094 * already have all relevant edges, so just skip to next
1097 * 2) If RMW and it didn't read from anything, we should
1098 * whatever edge we can get to speed up convergence.
1100 * 3) If normal write, we need to look at earlier actions, so
1101 * continue processing list.
1103 if (curr->is_rmw()) {
1104 if (curr->get_reads_from() != NULL)
1112 /* C++, Section 29.3 statement 7 */
1113 if (last_sc_fence_thread_before && act->is_write() &&
1114 *act < *last_sc_fence_thread_before) {
1115 edgeset.push_back(act);
1120 * Include at most one act per-thread that "happens
1123 if (act->happens_before(curr)) {
1125 * Note: if act is RMW, just add edge:
1127 * The following edge should be handled elsewhere:
1128 * readfrom(act) --mo--> act
1130 if (act->is_write())
1131 edgeset.push_back(act);
1132 else if (act->is_read()) {
1133 //if previous read accessed a null, just keep going
1134 edgeset.push_back(act->get_reads_from());
1140 mo_graph->addEdges(&edgeset, curr);
1145 * Computes the clock vector that happens before propagates from this write.
1147 * @param rf The action that might be part of a release sequence. Must be a
1149 * @return ClockVector of happens before relation.
1152 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1153 SnapVector<ModelAction *> * processset = NULL;
1154 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1155 ASSERT(rf->is_write());
1156 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1158 if (processset == NULL)
1159 processset = new SnapVector<ModelAction *>();
1160 processset->push_back(rf);
1163 int i = (processset == NULL) ? 0 : processset->size();
1165 ClockVector * vec = NULL;
1167 if (rf->get_rfcv() != NULL) {
1168 vec = rf->get_rfcv();
1169 } else if (rf->is_acquire() && rf->is_release()) {
1171 } else if (rf->is_release() && !rf->is_rmw()) {
1173 } else if (rf->is_release()) {
1174 //have rmw that is release and doesn't have a rfcv
1175 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1178 //operation that isn't release
1179 if (rf->get_last_fence_release()) {
1181 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1183 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1187 vec = new ClockVector(NULL, NULL);
1190 vec = new ClockVector(vec, NULL);
1197 rf = (*processset)[i];
1201 if (processset != NULL)
1207 * Performs various bookkeeping operations for the current ModelAction. For
1208 * instance, adds action to the per-object, per-thread action vector and to the
1209 * action trace list of all thread actions.
1211 * @param act is the ModelAction to add.
1213 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1215 int tid = id_to_int(act->get_tid());
1216 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1217 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1218 act->setActionRef(list->add_back(act));
1221 // Update action trace, a total order of all actions
1222 action_trace.addAction(act);
1225 // Update obj_thrd_map, a per location, per thread, order of actions
1226 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1227 if ((int)vec->size() <= tid) {
1228 uint oldsize = vec->size();
1229 vec->resize(priv->next_thread_id);
1230 for(uint i = oldsize;i < priv->next_thread_id;i++)
1231 new (&(*vec)[i]) action_list_t();
1233 fixup_action_list(vec);
1235 if (!canprune && (act->is_read() || act->is_write()))
1236 (*vec)[tid].addAction(act);
1238 // Update thrd_last_action, the last action taken by each thread
1239 if ((int)thrd_last_action.size() <= tid)
1240 thrd_last_action.resize(get_num_threads());
1241 thrd_last_action[tid] = act;
1243 // Update thrd_last_fence_release, the last release fence taken by each thread
1244 if (act->is_fence() && act->is_release()) {
1245 if ((int)thrd_last_fence_release.size() <= tid)
1246 thrd_last_fence_release.resize(get_num_threads());
1247 thrd_last_fence_release[tid] = act;
1250 if (act->is_wait()) {
1251 void *mutex_loc = (void *) act->get_value();
1252 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1256 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1257 list->addAction(act);
1260 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1261 act->create_cv(NULL);
1262 list->addAction(act);
1266 * Performs various bookkeeping operations for a normal write. The
1267 * complication is that we are typically inserting a normal write
1268 * lazily, so we need to insert it into the middle of lists.
1270 * @param act is the ModelAction to add.
1273 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1275 int tid = id_to_int(act->get_tid());
1276 insertIntoActionListAndSetCV(&action_trace, act);
1278 // Update obj_thrd_map, a per location, per thread, order of actions
1279 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1280 if (tid >= (int)vec->size()) {
1281 uint oldsize =vec->size();
1282 vec->resize(priv->next_thread_id);
1283 for(uint i=oldsize;i<priv->next_thread_id;i++)
1284 new (&(*vec)[i]) action_list_t();
1286 fixup_action_list(vec);
1288 insertIntoActionList(&(*vec)[tid],act);
1290 ModelAction * lastact = thrd_last_action[tid];
1291 // Update thrd_last_action, the last action taken by each thrad
1292 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1293 thrd_last_action[tid] = act;
1297 void ModelExecution::add_write_to_lists(ModelAction *write) {
1298 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1299 int tid = id_to_int(write->get_tid());
1300 if (tid >= (int)vec->size()) {
1301 uint oldsize =vec->size();
1302 vec->resize(priv->next_thread_id);
1303 for(uint i=oldsize;i<priv->next_thread_id;i++)
1304 new (&(*vec)[i]) simple_action_list_t();
1306 write->setActionRef((*vec)[tid].add_back(write));
1310 * @brief Get the last action performed by a particular Thread
1311 * @param tid The thread ID of the Thread in question
1312 * @return The last action in the thread
1314 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1316 int threadid = id_to_int(tid);
1317 if (threadid < (int)thrd_last_action.size())
1318 return thrd_last_action[id_to_int(tid)];
1324 * @brief Get the last fence release performed by a particular Thread
1325 * @param tid The thread ID of the Thread in question
1326 * @return The last fence release in the thread, if one exists; NULL otherwise
1328 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1330 int threadid = id_to_int(tid);
1331 if (threadid < (int)thrd_last_fence_release.size())
1332 return thrd_last_fence_release[id_to_int(tid)];
1338 * Gets the last memory_order_seq_cst write (in the total global sequence)
1339 * performed on a particular object (i.e., memory location), not including the
1341 * @param curr The current ModelAction; also denotes the object location to
1343 * @return The last seq_cst write
1345 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1347 void *location = curr->get_location();
1348 return obj_last_sc_map.get(location);
1352 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1353 * performed in a particular thread, prior to a particular fence.
1354 * @param tid The ID of the thread to check
1355 * @param before_fence The fence from which to begin the search; if NULL, then
1356 * search for the most recent fence in the thread.
1357 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1359 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1361 /* All fences should have location FENCE_LOCATION */
1362 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1367 sllnode<ModelAction*>* rit = list->end();
1370 for (;rit != NULL;rit=rit->getPrev())
1371 if (rit->getVal() == before_fence)
1374 ASSERT(rit->getVal() == before_fence);
1378 for (;rit != NULL;rit=rit->getPrev()) {
1379 ModelAction *act = rit->getVal();
1380 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1387 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1388 * location). This function identifies the mutex according to the current
1389 * action, which is presumed to perform on the same mutex.
1390 * @param curr The current ModelAction; also denotes the object location to
1392 * @return The last unlock operation
1394 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1396 void *location = curr->get_location();
1398 simple_action_list_t *list = obj_map.get(location);
1402 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1403 sllnode<ModelAction*>* rit;
1404 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1405 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1406 return rit->getVal();
1410 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1412 ModelAction *parent = get_last_action(tid);
1414 parent = get_thread(tid)->get_creation();
1419 * Returns the clock vector for a given thread.
1420 * @param tid The thread whose clock vector we want
1421 * @return Desired clock vector
1423 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1425 ModelAction *firstaction=get_parent_action(tid);
1426 return firstaction != NULL ? firstaction->get_cv() : NULL;
1429 bool valequals(uint64_t val1, uint64_t val2, int size) {
1432 return ((uint8_t)val1) == ((uint8_t)val2);
1434 return ((uint16_t)val1) == ((uint16_t)val2);
1436 return ((uint32_t)val1) == ((uint32_t)val2);
1446 * Build up an initial set of all past writes that this 'read' action may read
1447 * from, as well as any previously-observed future values that must still be valid.
1449 * @param curr is the current ModelAction that we are exploring; it must be a
1452 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1454 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1456 ASSERT(curr->is_read());
1458 ModelAction *last_sc_write = NULL;
1460 if (curr->is_seqcst())
1461 last_sc_write = get_last_seq_cst_write(curr);
1463 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1465 /* Iterate over all threads */
1466 if (thrd_lists != NULL)
1467 for (i = 0;i < thrd_lists->size();i++) {
1468 /* Iterate over actions in thread, starting from most recent */
1469 simple_action_list_t *list = &(*thrd_lists)[i];
1470 sllnode<ModelAction *> * rit;
1471 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1472 ModelAction *act = rit->getVal();
1477 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1478 bool allow_read = true;
1480 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1483 /* Need to check whether we will have two RMW reading from the same value */
1484 if (curr->is_rmwr()) {
1485 /* It is okay if we have a failing CAS */
1486 if (!curr->is_rmwrcas() ||
1487 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1488 //Need to make sure we aren't the second RMW
1489 CycleNode * node = mo_graph->getNode_noCreate(act);
1490 if (node != NULL && node->getRMW() != NULL) {
1491 //we are the second RMW
1498 /* Only add feasible reads */
1499 rf_set->push_back(act);
1502 /* Include at most one act per-thread that "happens before" curr */
1503 if (act->happens_before(curr))
1508 if (DBG_ENABLED()) {
1509 model_print("Reached read action:\n");
1511 model_print("End printing read_from_past\n");
1516 static void print_list(action_list_t *list)
1518 sllnode<ModelAction*> *it;
1520 model_print("------------------------------------------------------------------------------------\n");
1521 model_print("# t Action type MO Location Value Rf CV\n");
1522 model_print("------------------------------------------------------------------------------------\n");
1524 unsigned int hash = 0;
1526 for (it = list->begin();it != NULL;it=it->getNext()) {
1527 const ModelAction *act = it->getVal();
1528 if (act->get_seq_number() > 0)
1530 hash = hash^(hash<<3)^(it->getVal()->hash());
1532 model_print("HASH %u\n", hash);
1533 model_print("------------------------------------------------------------------------------------\n");
1536 #if SUPPORT_MOD_ORDER_DUMP
1537 void ModelExecution::dumpGraph(char *filename)
1540 sprintf(buffer, "%s.dot", filename);
1541 FILE *file = fopen(buffer, "w");
1542 fprintf(file, "digraph %s {\n", filename);
1543 mo_graph->dumpNodes(file);
1544 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1546 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1547 ModelAction *act = it->getVal();
1548 if (act->is_read()) {
1549 mo_graph->dot_print_node(file, act);
1550 mo_graph->dot_print_edge(file,
1551 act->get_reads_from(),
1553 "label=\"rf\", color=red, weight=2");
1555 if (thread_array[act->get_tid()]) {
1556 mo_graph->dot_print_edge(file,
1557 thread_array[id_to_int(act->get_tid())],
1559 "label=\"sb\", color=blue, weight=400");
1562 thread_array[act->get_tid()] = act;
1564 fprintf(file, "}\n");
1565 model_free(thread_array);
1570 /** @brief Prints an execution trace summary. */
1571 void ModelExecution::print_summary()
1573 #if SUPPORT_MOD_ORDER_DUMP
1574 char buffername[100];
1575 sprintf(buffername, "exec%04u", get_execution_number());
1576 mo_graph->dumpGraphToFile(buffername);
1577 sprintf(buffername, "graph%04u", get_execution_number());
1578 dumpGraph(buffername);
1581 model_print("Execution trace %d:", get_execution_number());
1582 if (scheduler->all_threads_sleeping())
1583 model_print(" SLEEP-SET REDUNDANT");
1584 if (have_bug_reports())
1585 model_print(" DETECTED BUG(S)");
1589 print_list(&action_trace);
1594 void ModelExecution::print_tail()
1596 model_print("Execution trace %d:\n", get_execution_number());
1598 sllnode<ModelAction*> *it;
1600 model_print("------------------------------------------------------------------------------------\n");
1601 model_print("# t Action type MO Location Value Rf CV\n");
1602 model_print("------------------------------------------------------------------------------------\n");
1604 unsigned int hash = 0;
1608 SnapList<ModelAction *> list;
1609 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1610 if (counter > length)
1613 ModelAction * act = it->getVal();
1614 list.push_front(act);
1618 for (it = list.begin();it != NULL;it=it->getNext()) {
1619 const ModelAction *act = it->getVal();
1620 if (act->get_seq_number() > 0)
1622 hash = hash^(hash<<3)^(it->getVal()->hash());
1624 model_print("HASH %u\n", hash);
1625 model_print("------------------------------------------------------------------------------------\n");
1629 * Add a Thread to the system for the first time. Should only be called once
1631 * @param t The Thread to add
1633 void ModelExecution::add_thread(Thread *t)
1635 unsigned int i = id_to_int(t->get_id());
1636 if (i >= thread_map.size())
1637 thread_map.resize(i + 1);
1639 if (!t->is_model_thread())
1640 scheduler->add_thread(t);
1644 * @brief Get a Thread reference by its ID
1645 * @param tid The Thread's ID
1646 * @return A Thread reference
1648 Thread * ModelExecution::get_thread(thread_id_t tid) const
1650 unsigned int i = id_to_int(tid);
1651 if (i < thread_map.size())
1652 return thread_map[i];
1657 * @brief Get a reference to the Thread in which a ModelAction was executed
1658 * @param act The ModelAction
1659 * @return A Thread reference
1661 Thread * ModelExecution::get_thread(const ModelAction *act) const
1663 return get_thread(act->get_tid());
1667 * @brief Get a Thread reference by its pthread ID
1668 * @param index The pthread's ID
1669 * @return A Thread reference
1671 Thread * ModelExecution::get_pthread(pthread_t pid) {
1672 // pid 1 is reserved for the main thread, pthread ids should start from 2
1674 return get_thread(pid);
1681 uint32_t thread_id = x.v;
1683 if (thread_id < pthread_counter + 1)
1684 return pthread_map[thread_id];
1690 * @brief Check if a Thread is currently enabled
1691 * @param t The Thread to check
1692 * @return True if the Thread is currently enabled
1694 bool ModelExecution::is_enabled(Thread *t) const
1696 return scheduler->is_enabled(t);
1700 * @brief Check if a Thread is currently enabled
1701 * @param tid The ID of the Thread to check
1702 * @return True if the Thread is currently enabled
1704 bool ModelExecution::is_enabled(thread_id_t tid) const
1706 return scheduler->is_enabled(tid);
1710 * @brief Select the next thread to execute based on the curren action
1712 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1713 * actions should be followed by the execution of their child thread. In either
1714 * case, the current action should determine the next thread schedule.
1716 * @param curr The current action
1717 * @return The next thread to run, if the current action will determine this
1718 * selection; otherwise NULL
1720 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1722 /* Do not split atomic RMW */
1723 if (curr->is_rmwr())
1724 return get_thread(curr);
1725 /* Follow CREATE with the created thread */
1726 /* which is not needed, because model.cc takes care of this */
1727 if (curr->get_type() == THREAD_CREATE)
1728 return curr->get_thread_operand();
1729 if (curr->get_type() == PTHREAD_CREATE) {
1730 return curr->get_thread_operand();
1736 * Takes the next step in the execution, if possible.
1737 * @param curr The current step to take
1738 * @return Returns the next Thread to run, if any; NULL if this execution
1741 Thread * ModelExecution::take_step(ModelAction *curr)
1743 Thread *curr_thrd = get_thread(curr);
1744 ASSERT(curr_thrd->get_state() == THREAD_READY);
1746 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1747 curr = check_current_action(curr);
1750 /* Process this action in ModelHistory for records */
1752 model->get_history()->process_action( curr, curr->get_tid() );
1754 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1755 scheduler->remove_thread(curr_thrd);
1757 return action_select_next_thread(curr);
1760 /** This method removes references to an Action before we delete it. */
1762 void ModelExecution::removeAction(ModelAction *act) {
1764 action_trace.removeAction(act);
1767 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1768 (*vec)[act->get_tid()].removeAction(act);
1770 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1771 sllnode<ModelAction *> * listref = act->getActionRef();
1772 if (listref != NULL) {
1773 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1774 list->erase(listref);
1776 } else if (act->is_wait()) {
1777 sllnode<ModelAction *> * listref = act->getActionRef();
1778 if (listref != NULL) {
1779 void *mutex_loc = (void *) act->get_value();
1780 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1782 } else if (act->is_free()) {
1783 sllnode<ModelAction *> * listref = act->getActionRef();
1784 if (listref != NULL) {
1785 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1786 (*vec)[act->get_tid()].erase(listref);
1789 //Clear it from last_sc_map
1790 if (obj_last_sc_map.get(act->get_location()) == act) {
1791 obj_last_sc_map.remove(act->get_location());
1794 //Remove from Cyclegraph
1795 mo_graph->freeAction(act);
1799 /** Computes clock vector that all running threads have already synchronized to. */
1801 ClockVector * ModelExecution::computeMinimalCV() {
1802 ClockVector *cvmin = NULL;
1803 //Thread 0 isn't a real thread, so skip it..
1804 for(unsigned int i = 1;i < thread_map.size();i++) {
1805 Thread * t = thread_map[i];
1806 if (t->is_complete())
1808 thread_id_t tid = int_to_id(i);
1809 ClockVector * cv = get_cv(tid);
1811 cvmin = new ClockVector(cv, NULL);
1813 cvmin->minmerge(cv);
1819 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1821 void ModelExecution::fixupLastAct(ModelAction *act) {
1822 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1823 newact->set_seq_number(get_next_seq_num());
1824 newact->create_cv(act);
1825 newact->set_last_fence_release(act->get_last_fence_release());
1826 add_action_to_lists(newact, false);
1829 /** Compute which actions to free. */
1831 void ModelExecution::collectActions() {
1832 if (priv->used_sequence_numbers < params->traceminsize)
1835 //Compute minimal clock vector for all live threads
1836 ClockVector *cvmin = computeMinimalCV();
1837 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1838 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1840 //Next walk action trace... When we hit an action, see if it is
1841 //invisible (e.g., earlier than the first before the minimum
1842 //clock for the thread... if so erase it and all previous
1843 //actions in cyclegraph
1844 sllnode<ModelAction*> * it;
1845 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1846 ModelAction *act = it->getVal();
1847 modelclock_t actseq = act->get_seq_number();
1849 //See if we are done
1850 if (actseq > maxtofree)
1853 thread_id_t act_tid = act->get_tid();
1854 modelclock_t tid_clock = cvmin->getClock(act_tid);
1856 //Free if it is invisible or we have set a flag to remove visible actions.
1857 if (actseq <= tid_clock || params->removevisible) {
1858 ModelAction * write;
1859 if (act->is_write()) {
1861 } else if (act->is_read()) {
1862 write = act->get_reads_from();
1866 //Mark everything earlier in MO graph to be freed
1867 CycleNode * cn = mo_graph->getNode_noCreate(write);
1869 queue->push_back(cn);
1870 while(!queue->empty()) {
1871 CycleNode * node = queue->back();
1873 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1874 CycleNode * prevnode = node->getInEdge(i);
1875 ModelAction * prevact = prevnode->getAction();
1876 if (prevact->get_type() != READY_FREE) {
1877 prevact->set_free();
1878 queue->push_back(prevnode);
1886 //We may need to remove read actions in the window we don't delete to preserve correctness.
1888 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1889 ModelAction *act = it2->getVal();
1890 //Do iteration early in case we delete the act
1892 bool islastact = false;
1893 ModelAction *lastact = get_last_action(act->get_tid());
1894 if (act == lastact) {
1895 Thread * th = get_thread(act);
1896 islastact = !th->is_complete();
1899 if (act->is_read()) {
1900 if (act->get_reads_from()->is_free()) {
1901 if (act->is_rmw()) {
1902 //Weaken a RMW from a freed store to a write
1903 act->set_type(ATOMIC_WRITE);
1915 //If we don't delete the action, we should remove references to release fences
1917 const ModelAction *rel_fence =act->get_last_fence_release();
1918 if (rel_fence != NULL) {
1919 modelclock_t relfenceseq = rel_fence->get_seq_number();
1920 thread_id_t relfence_tid = rel_fence->get_tid();
1921 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1922 //Remove references to irrelevant release fences
1923 if (relfenceseq <= tid_clock)
1924 act->set_last_fence_release(NULL);
1927 //Now we are in the window of old actions that we remove if possible
1928 for (;it != NULL;) {
1929 ModelAction *act = it->getVal();
1930 //Do iteration early since we may delete node...
1932 bool islastact = false;
1933 ModelAction *lastact = get_last_action(act->get_tid());
1934 if (act == lastact) {
1935 Thread * th = get_thread(act);
1936 islastact = !th->is_complete();
1939 if (act->is_read()) {
1940 if (act->get_reads_from()->is_free()) {
1941 if (act->is_rmw()) {
1942 act->set_type(ATOMIC_WRITE);
1952 } else if (act->is_free()) {
1959 } else if (act->is_write()) {
1960 //Do nothing with write that hasn't been marked to be freed
1961 } else if (islastact) {
1962 //Keep the last action for non-read/write actions
1963 } else if (act->is_fence()) {
1964 //Note that acquire fences can always be safely
1965 //removed, but could incur extra overheads in
1966 //traversals. Removing them before the cvmin seems
1967 //like a good compromise.
1969 //Release fences before the cvmin don't do anything
1970 //because everyone has already synchronized.
1972 //Sequentially fences before cvmin are redundant
1973 //because happens-before will enforce same
1976 modelclock_t actseq = act->get_seq_number();
1977 thread_id_t act_tid = act->get_tid();
1978 modelclock_t tid_clock = cvmin->getClock(act_tid);
1979 if (actseq <= tid_clock) {
1981 // Remove reference to act from thrd_last_fence_release
1982 int thread_id = id_to_int( act->get_tid() );
1983 if (thrd_last_fence_release[thread_id] == act) {
1984 thrd_last_fence_release[thread_id] = NULL;
1990 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1991 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1992 //need to keep most recent unlock/wait for each lock
1993 if(act->is_unlock() || act->is_wait()) {
1994 ModelAction * lastlock = get_last_unlock(act);
1995 if (lastlock != act) {
2000 } else if (act->is_create()) {
2001 if (act->get_thread_operand()->is_complete()) {
2013 //If we don't delete the action, we should remove references to release fences
2014 const ModelAction *rel_fence =act->get_last_fence_release();
2015 if (rel_fence != NULL) {
2016 modelclock_t relfenceseq = rel_fence->get_seq_number();
2017 thread_id_t relfence_tid = rel_fence->get_tid();
2018 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2019 //Remove references to irrelevant release fences
2020 if (relfenceseq <= tid_clock)
2021 act->set_last_fence_release(NULL);
2029 Fuzzer * ModelExecution::getFuzzer() {