12 #include "clockvector.h"
13 #include "cyclegraph.h"
15 #include "threads-model.h"
16 #include "bugmessage.h"
20 static unsigned int atomic_load_count = 0;
21 static unsigned int atomic_store_count = 0;
22 static unsigned int atomic_rmw_count = 0;
24 static unsigned int atomic_fence_count = 0;
25 static unsigned int atomic_lock_count = 0;
26 static unsigned int atomic_trylock_count = 0;
27 static unsigned int atomic_unlock_count = 0;
28 static unsigned int atomic_notify_count = 0;
29 static unsigned int atomic_wait_count = 0;
30 static unsigned int atomic_timedwait_count = 0;
34 * Structure for holding small ModelChecker members that should be snapshotted
36 struct model_snapshot_members {
37 model_snapshot_members() :
38 /* First thread created will have id INITIAL_THREAD_ID */
39 next_thread_id(INITIAL_THREAD_ID),
40 used_sequence_numbers(0),
45 ~model_snapshot_members() {
46 for (unsigned int i = 0;i < bugs.size();i++)
51 unsigned int next_thread_id;
52 modelclock_t used_sequence_numbers;
53 SnapVector<bug_message *> bugs;
54 /** @brief Incorrectly-ordered synchronization was made */
60 /** @brief Constructor */
61 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
65 thread_map(2), /* We'll always need at least 2 threads */
70 condvar_waiters_map(),
77 thrd_last_fence_release(),
78 priv(new struct model_snapshot_members ()),
79 mo_graph(new CycleGraph()),
83 /* Initialize a model-checker thread, for special ModelActions */
84 model_thread = new Thread(get_next_id());
85 add_thread(model_thread);
86 fuzzer->register_engine(m, this);
87 scheduler->register_engine(this);
89 pthread_key_create(&pthreadkey, tlsdestructor);
93 /** @brief Destructor */
94 ModelExecution::~ModelExecution()
96 for (unsigned int i = INITIAL_THREAD_ID;i < get_num_threads();i++)
97 delete get_thread(int_to_id(i));
103 int ModelExecution::get_execution_number() const
105 return model->get_execution_number();
108 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 2> * hash, void * ptr)
110 SnapVector<action_list_t> *tmp = hash->get(ptr);
112 tmp = new SnapVector<action_list_t>();
118 static simple_action_list_t * get_safe_ptr_action(HashTable<const void *, simple_action_list_t *, uintptr_t, 2> * hash, void * ptr)
120 simple_action_list_t *tmp = hash->get(ptr);
122 tmp = new simple_action_list_t();
128 static SnapVector<simple_action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<simple_action_list_t> *, uintptr_t, 2> * hash, void * ptr)
130 SnapVector<simple_action_list_t> *tmp = hash->get(ptr);
132 tmp = new SnapVector<simple_action_list_t>();
139 * When vectors of action lists are reallocated due to resize, the root address of
140 * action lists may change. Hence we need to fix the parent pointer of the children
143 static void fixup_action_list(SnapVector<action_list_t> * vec)
145 for (uint i = 0;i < vec->size();i++) {
146 action_list_t * list = &(*vec)[i];
153 static inline void record_atomic_stats(ModelAction * act)
155 switch (act->get_type()) {
157 atomic_store_count++;
166 atomic_fence_count++;
172 atomic_trylock_count++;
175 atomic_unlock_count++;
177 case ATOMIC_NOTIFY_ONE:
178 case ATOMIC_NOTIFY_ALL:
179 atomic_notify_count++;
184 case ATOMIC_TIMEDWAIT:
185 atomic_timedwait_count++;
191 void print_atomic_accesses()
193 model_print("atomic store count: %u\n", atomic_store_count);
194 model_print("atomic load count: %u\n", atomic_load_count);
195 model_print("atomic rmw count: %u\n", atomic_rmw_count);
197 model_print("atomic fence count: %u\n", atomic_fence_count);
198 model_print("atomic lock count: %u\n", atomic_lock_count);
199 model_print("atomic trylock count: %u\n", atomic_trylock_count);
200 model_print("atomic unlock count: %u\n", atomic_unlock_count);
201 model_print("atomic notify count: %u\n", atomic_notify_count);
202 model_print("atomic wait count: %u\n", atomic_wait_count);
203 model_print("atomic timedwait count: %u\n", atomic_timedwait_count);
206 /** @return a thread ID for a new Thread */
207 thread_id_t ModelExecution::get_next_id()
209 return priv->next_thread_id++;
212 /** @return the number of user threads created during this execution */
213 unsigned int ModelExecution::get_num_threads() const
215 return priv->next_thread_id;
218 /** @return a sequence number for a new ModelAction */
219 modelclock_t ModelExecution::get_next_seq_num()
221 return ++priv->used_sequence_numbers;
224 /** @return a sequence number for a new ModelAction */
225 modelclock_t ModelExecution::get_curr_seq_num()
227 return priv->used_sequence_numbers;
230 /** Restore the last used sequence number when actions of a thread are postponed by Fuzzer */
231 void ModelExecution::restore_last_seq_num()
233 priv->used_sequence_numbers--;
237 * @brief Should the current action wake up a given thread?
239 * @param curr The current action
240 * @param thread The thread that we might wake up
241 * @return True, if we should wake up the sleeping thread; false otherwise
243 bool ModelExecution::should_wake_up(const ModelAction * asleep) const
245 /* The sleep is literally sleeping */
246 switch (asleep->get_type()) {
248 if (fuzzer->shouldWake(asleep))
252 case ATOMIC_TIMEDWAIT:
253 if (fuzzer->waitShouldWakeUp(asleep))
263 void ModelExecution::wake_up_sleeping_actions()
265 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
266 thread_id_t tid = int_to_id(i);
267 if (scheduler->is_sleep_set(tid)) {
268 Thread *thr = get_thread(tid);
269 ModelAction * pending = thr->get_pending();
270 if (should_wake_up(pending)) {
271 /* Remove this thread from sleep set */
272 scheduler->remove_sleep(thr);
274 if (pending->is_sleep()) {
275 thr->set_wakeup_state(true);
276 } else if (pending->is_wait()) {
277 thr->set_wakeup_state(true);
278 /* Remove this thread from list of waiters */
279 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, pending->get_location());
280 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
281 if (rit->getVal()->get_tid() == tid) {
287 /* Set ETIMEDOUT error */
288 if (pending->is_timedwait())
289 thr->set_return_value(ETIMEDOUT);
296 void ModelExecution::assert_bug(const char *msg)
298 priv->bugs.push_back(new bug_message(msg));
302 /** @return True, if any bugs have been reported for this execution */
303 bool ModelExecution::have_bug_reports() const
305 return priv->bugs.size() != 0;
308 SnapVector<bug_message *> * ModelExecution::get_bugs() const
314 * Check whether the current trace has triggered an assertion which should halt
317 * @return True, if the execution should be aborted; false otherwise
319 bool ModelExecution::has_asserted() const
321 return priv->asserted;
325 * Trigger a trace assertion which should cause this execution to be halted.
326 * This can be due to a detected bug or due to an infeasibility that should
329 void ModelExecution::set_assert()
331 priv->asserted = true;
335 * Check if we are in a deadlock. Should only be called at the end of an
336 * execution, although it should not give false positives in the middle of an
337 * execution (there should be some ENABLED thread).
339 * @return True if program is in a deadlock; false otherwise
341 bool ModelExecution::is_deadlocked() const
343 bool blocking_threads = false;
344 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
345 thread_id_t tid = int_to_id(i);
348 Thread *t = get_thread(tid);
349 if (!t->is_model_thread() && t->get_pending())
350 blocking_threads = true;
352 return blocking_threads;
356 * Check if this is a complete execution. That is, have all thread completed
357 * execution (rather than exiting because sleep sets have forced a redundant
360 * @return True if the execution is complete.
362 bool ModelExecution::is_complete_execution() const
364 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++)
365 if (is_enabled(int_to_id(i)))
370 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
371 uint64_t value = *((const uint64_t *) location);
372 modelclock_t storeclock;
373 thread_id_t storethread;
374 getStoreThreadAndClock(location, &storethread, &storeclock);
375 setAtomicStoreFlag(location);
376 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
377 act->set_seq_number(storeclock);
378 add_normal_write_to_lists(act);
379 add_write_to_lists(act);
380 w_modification_order(act);
385 * Processes a read model action.
386 * @param curr is the read model action to process.
387 * @param rf_set is the set of model actions we can possibly read from
388 * @return True if the read can be pruned from the thread map list.
390 bool ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
392 SnapVector<ModelAction *> * priorset = new SnapVector<ModelAction *>();
393 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
394 if (hasnonatomicstore) {
395 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
396 rf_set->push_back(nonatomicstore);
399 // Remove writes that violate read modification order
402 while (i < rf_set->size()) {
403 ModelAction * rf = (*rf_set)[i];
404 if (!r_modification_order(curr, rf, NULL, NULL, true)) {
405 (*rf_set)[i] = rf_set->back();
412 int index = fuzzer->selectWrite(curr, rf_set);
414 ModelAction *rf = (*rf_set)[index];
417 bool canprune = false;
418 if (r_modification_order(curr, rf, priorset, &canprune)) {
419 for(unsigned int i=0;i<priorset->size();i++) {
420 mo_graph->addEdge((*priorset)[i], rf);
423 get_thread(curr)->set_return_value(rf->get_write_value());
425 //Update acquire fence clock vector
426 ClockVector * hbcv = get_hb_from_write(rf);
428 get_thread(curr)->get_acq_fence_cv()->merge(hbcv);
429 return canprune && (curr->get_type() == ATOMIC_READ);
432 (*rf_set)[index] = rf_set->back();
438 * Processes a lock, trylock, or unlock model action. @param curr is
439 * the read model action to process.
441 * The try lock operation checks whether the lock is taken. If not,
442 * it falls to the normal lock operation case. If so, it returns
445 * The lock operation has already been checked that it is enabled, so
446 * it just grabs the lock and synchronizes with the previous unlock.
448 * The unlock operation has to re-enable all of the threads that are
449 * waiting on the lock.
451 * @return True if synchronization was updated; false otherwise
453 bool ModelExecution::process_mutex(ModelAction *curr)
455 cdsc::mutex *mutex = curr->get_mutex();
456 struct cdsc::mutex_state *state = NULL;
459 state = mutex->get_state();
461 switch (curr->get_type()) {
462 case ATOMIC_TRYLOCK: {
463 bool success = !state->locked;
464 curr->set_try_lock(success);
466 get_thread(curr)->set_return_value(0);
469 get_thread(curr)->set_return_value(1);
471 //otherwise fall into the lock case
473 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
474 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
475 // assert_bug("Lock access before initialization");
477 // TODO: lock count for recursive mutexes
478 state->locked = get_thread(curr);
479 ModelAction *unlock = get_last_unlock(curr);
480 //synchronize with the previous unlock statement
481 if (unlock != NULL) {
482 synchronize(unlock, curr);
488 Thread *curr_thrd = get_thread(curr);
489 /* wake up the other threads */
490 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
491 Thread *t = get_thread(int_to_id(i));
492 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
496 /* unlock the lock - after checking who was waiting on it */
497 state->locked = NULL;
499 /* disable this thread */
500 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
501 waiters->push_back(curr);
502 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
504 if (fuzzer->waitShouldFail(curr)) // If wait should fail spuriously,
505 scheduler->add_sleep(curr_thrd); // place this thread into THREAD_SLEEP_SET
507 scheduler->sleep(curr_thrd);
511 case ATOMIC_TIMEDWAIT: {
512 Thread *curr_thrd = get_thread(curr);
513 if (!fuzzer->randomizeWaitTime(curr)) {
514 curr_thrd->set_return_value(ETIMEDOUT);
518 /* wake up the other threads */
519 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
520 Thread *t = get_thread(int_to_id(i));
521 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
525 /* unlock the lock - after checking who was waiting on it */
526 state->locked = NULL;
528 /* disable this thread */
529 simple_action_list_t * waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
530 waiters->push_back(curr);
531 curr_thrd->set_pending(curr); // Forbid this thread to stash a new action
532 scheduler->add_sleep(curr_thrd);
535 case ATOMIC_UNLOCK: {
536 // TODO: lock count for recursive mutexes
537 /* wake up the other threads */
538 Thread *curr_thrd = get_thread(curr);
539 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
540 Thread *t = get_thread(int_to_id(i));
541 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
545 /* unlock the lock - after checking who was waiting on it */
546 state->locked = NULL;
549 case ATOMIC_NOTIFY_ALL: {
550 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
551 //activate all the waiting threads
552 for (sllnode<ModelAction *> * rit = waiters->begin();rit != NULL;rit=rit->getNext()) {
553 Thread * thread = get_thread(rit->getVal());
554 if (thread->get_state() != THREAD_COMPLETED)
555 scheduler->wake(thread);
556 thread->set_wakeup_state(true);
561 case ATOMIC_NOTIFY_ONE: {
562 simple_action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
563 if (waiters->size() != 0) {
564 Thread * thread = fuzzer->selectNotify(waiters);
565 if (thread->get_state() != THREAD_COMPLETED)
566 scheduler->wake(thread);
567 thread->set_wakeup_state(true);
579 * Process a write ModelAction
580 * @param curr The ModelAction to process
581 * @return True if the mo_graph was updated or promises were resolved
583 void ModelExecution::process_write(ModelAction *curr)
585 w_modification_order(curr);
586 get_thread(curr)->set_return_value(VALUE_NONE);
590 * Process a fence ModelAction
591 * @param curr The ModelAction to process
592 * @return True if synchronization was updated
594 void ModelExecution::process_fence(ModelAction *curr)
597 * fence-relaxed: no-op
598 * fence-release: only log the occurence (not in this function), for
599 * use in later synchronization
600 * fence-acquire (this function): search for hypothetical release
602 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
604 if (curr->is_acquire()) {
605 curr->get_cv()->merge(get_thread(curr)->get_acq_fence_cv());
610 * @brief Process the current action for thread-related activity
612 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
613 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
614 * synchronization, etc. This function is a no-op for non-THREAD actions
615 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
617 * @param curr The current action
618 * @return True if synchronization was updated or a thread completed
620 void ModelExecution::process_thread_action(ModelAction *curr)
622 switch (curr->get_type()) {
623 case THREAD_CREATE: {
624 thrd_t *thrd = (thrd_t *)curr->get_location();
625 struct thread_params *params = (struct thread_params *)curr->get_value();
626 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
627 curr->set_thread_operand(th);
629 th->set_creation(curr);
632 case PTHREAD_CREATE: {
633 (*(uint32_t *)curr->get_location()) = pthread_counter++;
635 struct pthread_params *params = (struct pthread_params *)curr->get_value();
636 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
637 curr->set_thread_operand(th);
639 th->set_creation(curr);
641 if ( pthread_map.size() < pthread_counter )
642 pthread_map.resize( pthread_counter );
643 pthread_map[ pthread_counter-1 ] = th;
648 Thread *blocking = curr->get_thread_operand();
649 ModelAction *act = get_last_action(blocking->get_id());
650 synchronize(act, curr);
654 Thread *blocking = curr->get_thread_operand();
655 ModelAction *act = get_last_action(blocking->get_id());
656 synchronize(act, curr);
660 case THREADONLY_FINISH:
661 case THREAD_FINISH: {
662 Thread *th = get_thread(curr);
663 if (curr->get_type() == THREAD_FINISH &&
664 th == model->getInitThread()) {
670 /* Wake up any joining threads */
671 for (unsigned int i = MAIN_THREAD_ID;i < get_num_threads();i++) {
672 Thread *waiting = get_thread(int_to_id(i));
673 if (waiting->waiting_on() == th &&
674 waiting->get_pending()->is_thread_join())
675 scheduler->wake(waiting);
684 Thread *th = get_thread(curr);
685 th->set_pending(curr);
686 scheduler->add_sleep(th);
695 * Initialize the current action by performing one or more of the following
696 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
697 * manipulating backtracking sets, allocating and
698 * initializing clock vectors, and computing the promises to fulfill.
700 * @param curr The current action, as passed from the user context; may be
701 * freed/invalidated after the execution of this function, with a different
702 * action "returned" its place (pass-by-reference)
703 * @return True if curr is a newly-explored action; false otherwise
705 bool ModelExecution::initialize_curr_action(ModelAction **curr)
707 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
708 ModelAction *newcurr = process_rmw(*curr);
714 ModelAction *newcurr = *curr;
716 newcurr->set_seq_number(get_next_seq_num());
717 /* Always compute new clock vector */
718 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
720 /* Assign most recent release fence */
721 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
723 return true; /* This was a new ModelAction */
728 * @brief Establish reads-from relation between two actions
730 * Perform basic operations involved with establishing a concrete rf relation,
731 * including setting the ModelAction data and checking for release sequences.
733 * @param act The action that is reading (must be a read)
734 * @param rf The action from which we are reading (must be a write)
736 * @return True if this read established synchronization
739 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
742 ASSERT(rf->is_write());
744 act->set_read_from(rf);
745 if (act->is_acquire()) {
746 ClockVector *cv = get_hb_from_write(rf);
749 act->get_cv()->merge(cv);
754 * @brief Synchronizes two actions
756 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
757 * This function performs the synchronization as well as providing other hooks
758 * for other checks along with synchronization.
760 * @param first The left-hand side of the synchronizes-with relation
761 * @param second The right-hand side of the synchronizes-with relation
762 * @return True if the synchronization was successful (i.e., was consistent
763 * with the execution order); false otherwise
765 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
767 if (*second < *first) {
768 ASSERT(0); //This should not happend
771 return second->synchronize_with(first);
775 * @brief Check whether a model action is enabled.
777 * Checks whether an operation would be successful (i.e., is a lock already
778 * locked, or is the joined thread already complete).
780 * For yield-blocking, yields are never enabled.
782 * @param curr is the ModelAction to check whether it is enabled.
783 * @return a bool that indicates whether the action is enabled.
785 bool ModelExecution::check_action_enabled(ModelAction *curr) {
786 switch (curr->get_type()) {
788 cdsc::mutex *lock = curr->get_mutex();
789 struct cdsc::mutex_state *state = lock->get_state();
791 Thread *lock_owner = (Thread *)state->locked;
792 Thread *curr_thread = get_thread(curr);
793 if (lock_owner == curr_thread && state->type == PTHREAD_MUTEX_RECURSIVE) {
803 Thread *blocking = curr->get_thread_operand();
804 if (!blocking->is_complete()) {
810 if (!fuzzer->shouldSleep(curr))
822 * This is the heart of the model checker routine. It performs model-checking
823 * actions corresponding to a given "current action." Among other processes, it
824 * calculates reads-from relationships, updates synchronization clock vectors,
825 * forms a memory_order constraints graph, and handles replay/backtrack
826 * execution when running permutations of previously-observed executions.
828 * @param curr The current action to process
829 * @return The ModelAction that is actually executed; may be different than
832 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
835 bool newly_explored = initialize_curr_action(&curr);
839 wake_up_sleeping_actions();
841 SnapVector<ModelAction *> * rf_set = NULL;
842 bool canprune = false;
843 /* Build may_read_from set for newly-created actions */
844 if (curr->is_read() && newly_explored) {
845 rf_set = build_may_read_from(curr);
846 canprune = process_read(curr, rf_set);
849 ASSERT(rf_set == NULL);
851 /* Add the action to lists if not the second part of a rmw */
852 if (newly_explored) {
854 record_atomic_stats(curr);
856 add_action_to_lists(curr, canprune);
859 if (curr->is_write())
860 add_write_to_lists(curr);
862 process_thread_action(curr);
864 if (curr->is_write())
867 if (curr->is_fence())
870 if (curr->is_mutex_op())
876 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
877 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
878 ModelAction *lastread = get_last_action(act->get_tid());
879 lastread->process_rmw(act);
881 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
887 * @brief Updates the mo_graph with the constraints imposed from the current
890 * Basic idea is the following: Go through each other thread and find
891 * the last action that happened before our read. Two cases:
893 * -# The action is a write: that write must either occur before
894 * the write we read from or be the write we read from.
895 * -# The action is a read: the write that that action read from
896 * must occur before the write we read from or be the same write.
898 * @param curr The current action. Must be a read.
899 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
900 * @param check_only If true, then only check whether the current action satisfies
901 * read modification order or not, without modifiying priorset and canprune.
903 * @return True if modification order edges were added; false otherwise
906 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf,
907 SnapVector<ModelAction *> * priorset, bool * canprune)
909 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
910 ASSERT(curr->is_read());
912 /* Last SC fence in the current thread */
913 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
915 int tid = curr->get_tid();
917 /* Need to ensure thrd_lists is big enough because we have not added the curr actions yet. */
918 if ((int)thrd_lists->size() <= tid) {
919 uint oldsize = thrd_lists->size();
920 thrd_lists->resize(priv->next_thread_id);
921 for(uint i = oldsize;i < priv->next_thread_id;i++)
922 new (&(*thrd_lists)[i]) action_list_t();
924 fixup_action_list(thrd_lists);
927 ModelAction *prev_same_thread = NULL;
928 /* Iterate over all threads */
929 for (unsigned int i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
930 /* Last SC fence in thread tid */
931 ModelAction *last_sc_fence_thread_local = NULL;
933 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
935 /* Last SC fence in thread tid, before last SC fence in current thread */
936 ModelAction *last_sc_fence_thread_before = NULL;
937 if (last_sc_fence_local)
938 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
940 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
941 if (prev_same_thread != NULL &&
942 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
943 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
947 /* Iterate over actions in thread, starting from most recent */
948 action_list_t *list = &(*thrd_lists)[tid];
949 sllnode<ModelAction *> * rit;
950 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
951 ModelAction *act = rit->getVal();
956 /* Don't want to add reflexive edges on 'rf' */
957 if (act->equals(rf)) {
958 if (act->happens_before(curr))
964 if (act->is_write()) {
965 /* C++, Section 29.3 statement 5 */
966 if (curr->is_seqcst() && last_sc_fence_thread_local &&
967 *act < *last_sc_fence_thread_local) {
968 if (mo_graph->checkReachable(rf, act))
970 priorset->push_back(act);
973 /* C++, Section 29.3 statement 4 */
974 else if (act->is_seqcst() && last_sc_fence_local &&
975 *act < *last_sc_fence_local) {
976 if (mo_graph->checkReachable(rf, act))
978 priorset->push_back(act);
981 /* C++, Section 29.3 statement 6 */
982 else if (last_sc_fence_thread_before &&
983 *act < *last_sc_fence_thread_before) {
984 if (mo_graph->checkReachable(rf, act))
986 priorset->push_back(act);
992 * Include at most one act per-thread that "happens
995 if (act->happens_before(curr)) {
997 if (last_sc_fence_local == NULL ||
998 (*last_sc_fence_local < *act)) {
999 prev_same_thread = act;
1002 if (act->is_write()) {
1003 if (mo_graph->checkReachable(rf, act))
1005 priorset->push_back(act);
1007 ModelAction *prevrf = act->get_reads_from();
1008 if (!prevrf->equals(rf)) {
1009 if (mo_graph->checkReachable(rf, prevrf))
1011 priorset->push_back(prevrf);
1013 if (act->get_tid() == curr->get_tid()) {
1014 //Can prune curr from obj list
1027 * Updates the mo_graph with the constraints imposed from the current write.
1029 * Basic idea is the following: Go through each other thread and find
1030 * the lastest action that happened before our write. Two cases:
1032 * (1) The action is a write => that write must occur before
1035 * (2) The action is a read => the write that that action read from
1036 * must occur before the current write.
1038 * This method also handles two other issues:
1040 * (I) Sequential Consistency: Making sure that if the current write is
1041 * seq_cst, that it occurs after the previous seq_cst write.
1043 * (II) Sending the write back to non-synchronizing reads.
1045 * @param curr The current action. Must be a write.
1046 * @param send_fv A vector for stashing reads to which we may pass our future
1047 * value. If NULL, then don't record any future values.
1048 * @return True if modification order edges were added; false otherwise
1050 void ModelExecution::w_modification_order(ModelAction *curr)
1052 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1054 ASSERT(curr->is_write());
1056 SnapList<ModelAction *> edgeset;
1058 if (curr->is_seqcst()) {
1059 /* We have to at least see the last sequentially consistent write,
1060 so we are initialized. */
1061 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1062 if (last_seq_cst != NULL) {
1063 edgeset.push_back(last_seq_cst);
1065 //update map for next query
1066 obj_last_sc_map.put(curr->get_location(), curr);
1069 /* Last SC fence in the current thread */
1070 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1072 /* Iterate over all threads */
1073 for (i = 0;i < thrd_lists->size();i++) {
1074 /* Last SC fence in thread i, before last SC fence in current thread */
1075 ModelAction *last_sc_fence_thread_before = NULL;
1076 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1077 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1079 /* Iterate over actions in thread, starting from most recent */
1080 action_list_t *list = &(*thrd_lists)[i];
1081 sllnode<ModelAction*>* rit;
1082 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1083 ModelAction *act = rit->getVal();
1086 * 1) If RMW and it actually read from something, then we
1087 * already have all relevant edges, so just skip to next
1090 * 2) If RMW and it didn't read from anything, we should
1091 * whatever edge we can get to speed up convergence.
1093 * 3) If normal write, we need to look at earlier actions, so
1094 * continue processing list.
1096 if (curr->is_rmw()) {
1097 if (curr->get_reads_from() != NULL)
1105 /* C++, Section 29.3 statement 7 */
1106 if (last_sc_fence_thread_before && act->is_write() &&
1107 *act < *last_sc_fence_thread_before) {
1108 edgeset.push_back(act);
1113 * Include at most one act per-thread that "happens
1116 if (act->happens_before(curr)) {
1118 * Note: if act is RMW, just add edge:
1120 * The following edge should be handled elsewhere:
1121 * readfrom(act) --mo--> act
1123 if (act->is_write())
1124 edgeset.push_back(act);
1125 else if (act->is_read()) {
1126 //if previous read accessed a null, just keep going
1127 edgeset.push_back(act->get_reads_from());
1133 mo_graph->addEdges(&edgeset, curr);
1138 * Computes the clock vector that happens before propagates from this write.
1140 * @param rf The action that might be part of a release sequence. Must be a
1142 * @return ClockVector of happens before relation.
1145 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1146 SnapVector<ModelAction *> * processset = NULL;
1147 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1148 ASSERT(rf->is_write());
1149 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1151 if (processset == NULL)
1152 processset = new SnapVector<ModelAction *>();
1153 processset->push_back(rf);
1156 int i = (processset == NULL) ? 0 : processset->size();
1158 ClockVector * vec = NULL;
1160 if (rf->get_rfcv() != NULL) {
1161 vec = rf->get_rfcv();
1162 } else if (rf->is_acquire() && rf->is_release()) {
1164 } else if (rf->is_release() && !rf->is_rmw()) {
1166 } else if (rf->is_release()) {
1167 //have rmw that is release and doesn't have a rfcv
1168 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1171 //operation that isn't release
1172 if (rf->get_last_fence_release()) {
1174 vec = new ClockVector(rf->get_last_fence_release()->get_cv(), NULL);
1176 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1180 vec = new ClockVector(NULL, NULL);
1183 vec = new ClockVector(vec, NULL);
1190 rf = (*processset)[i];
1194 if (processset != NULL)
1200 * Performs various bookkeeping operations for the current ModelAction. For
1201 * instance, adds action to the per-object, per-thread action vector and to the
1202 * action trace list of all thread actions.
1204 * @param act is the ModelAction to add.
1206 void ModelExecution::add_action_to_lists(ModelAction *act, bool canprune)
1208 int tid = id_to_int(act->get_tid());
1209 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1210 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1211 act->setActionRef(list->add_back(act));
1214 // Update action trace, a total order of all actions
1215 action_trace.addAction(act);
1218 // Update obj_thrd_map, a per location, per thread, order of actions
1219 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1220 if ((int)vec->size() <= tid) {
1221 uint oldsize = vec->size();
1222 vec->resize(priv->next_thread_id);
1223 for(uint i = oldsize;i < priv->next_thread_id;i++)
1224 new (&(*vec)[i]) action_list_t();
1226 fixup_action_list(vec);
1228 if (!canprune && (act->is_read() || act->is_write()))
1229 (*vec)[tid].addAction(act);
1231 // Update thrd_last_action, the last action taken by each thread
1232 if ((int)thrd_last_action.size() <= tid)
1233 thrd_last_action.resize(get_num_threads());
1234 thrd_last_action[tid] = act;
1236 // Update thrd_last_fence_release, the last release fence taken by each thread
1237 if (act->is_fence() && act->is_release()) {
1238 if ((int)thrd_last_fence_release.size() <= tid)
1239 thrd_last_fence_release.resize(get_num_threads());
1240 thrd_last_fence_release[tid] = act;
1243 if (act->is_wait()) {
1244 void *mutex_loc = (void *) act->get_value();
1245 act->setActionRef(get_safe_ptr_action(&obj_map, mutex_loc)->add_back(act));
1249 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1250 list->addAction(act);
1253 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1254 act->create_cv(NULL);
1255 list->addAction(act);
1259 * Performs various bookkeeping operations for a normal write. The
1260 * complication is that we are typically inserting a normal write
1261 * lazily, so we need to insert it into the middle of lists.
1263 * @param act is the ModelAction to add.
1266 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1268 int tid = id_to_int(act->get_tid());
1269 insertIntoActionListAndSetCV(&action_trace, act);
1271 // Update obj_thrd_map, a per location, per thread, order of actions
1272 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1273 if (tid >= (int)vec->size()) {
1274 uint oldsize =vec->size();
1275 vec->resize(priv->next_thread_id);
1276 for(uint i=oldsize;i<priv->next_thread_id;i++)
1277 new (&(*vec)[i]) action_list_t();
1279 fixup_action_list(vec);
1281 insertIntoActionList(&(*vec)[tid],act);
1283 ModelAction * lastact = thrd_last_action[tid];
1284 // Update thrd_last_action, the last action taken by each thrad
1285 if (lastact == NULL || lastact->get_seq_number() == act->get_seq_number())
1286 thrd_last_action[tid] = act;
1290 void ModelExecution::add_write_to_lists(ModelAction *write) {
1291 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1292 int tid = id_to_int(write->get_tid());
1293 if (tid >= (int)vec->size()) {
1294 uint oldsize =vec->size();
1295 vec->resize(priv->next_thread_id);
1296 for(uint i=oldsize;i<priv->next_thread_id;i++)
1297 new (&(*vec)[i]) simple_action_list_t();
1299 write->setActionRef((*vec)[tid].add_back(write));
1303 * @brief Get the last action performed by a particular Thread
1304 * @param tid The thread ID of the Thread in question
1305 * @return The last action in the thread
1307 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1309 int threadid = id_to_int(tid);
1310 if (threadid < (int)thrd_last_action.size())
1311 return thrd_last_action[id_to_int(tid)];
1317 * @brief Get the last fence release performed by a particular Thread
1318 * @param tid The thread ID of the Thread in question
1319 * @return The last fence release in the thread, if one exists; NULL otherwise
1321 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1323 int threadid = id_to_int(tid);
1324 if (threadid < (int)thrd_last_fence_release.size())
1325 return thrd_last_fence_release[id_to_int(tid)];
1331 * Gets the last memory_order_seq_cst write (in the total global sequence)
1332 * performed on a particular object (i.e., memory location), not including the
1334 * @param curr The current ModelAction; also denotes the object location to
1336 * @return The last seq_cst write
1338 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1340 void *location = curr->get_location();
1341 return obj_last_sc_map.get(location);
1345 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1346 * performed in a particular thread, prior to a particular fence.
1347 * @param tid The ID of the thread to check
1348 * @param before_fence The fence from which to begin the search; if NULL, then
1349 * search for the most recent fence in the thread.
1350 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1352 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1354 /* All fences should have location FENCE_LOCATION */
1355 simple_action_list_t *list = obj_map.get(FENCE_LOCATION);
1360 sllnode<ModelAction*>* rit = list->end();
1363 for (;rit != NULL;rit=rit->getPrev())
1364 if (rit->getVal() == before_fence)
1367 ASSERT(rit->getVal() == before_fence);
1371 for (;rit != NULL;rit=rit->getPrev()) {
1372 ModelAction *act = rit->getVal();
1373 if (act->is_fence() && (tid == act->get_tid()) && act->is_seqcst())
1380 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1381 * location). This function identifies the mutex according to the current
1382 * action, which is presumed to perform on the same mutex.
1383 * @param curr The current ModelAction; also denotes the object location to
1385 * @return The last unlock operation
1387 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1389 void *location = curr->get_location();
1391 simple_action_list_t *list = obj_map.get(location);
1395 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1396 sllnode<ModelAction*>* rit;
1397 for (rit = list->end();rit != NULL;rit=rit->getPrev())
1398 if (rit->getVal()->is_unlock() || rit->getVal()->is_wait())
1399 return rit->getVal();
1403 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1405 ModelAction *parent = get_last_action(tid);
1407 parent = get_thread(tid)->get_creation();
1412 * Returns the clock vector for a given thread.
1413 * @param tid The thread whose clock vector we want
1414 * @return Desired clock vector
1416 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1418 ModelAction *firstaction=get_parent_action(tid);
1419 return firstaction != NULL ? firstaction->get_cv() : NULL;
1422 bool valequals(uint64_t val1, uint64_t val2, int size) {
1425 return ((uint8_t)val1) == ((uint8_t)val2);
1427 return ((uint16_t)val1) == ((uint16_t)val2);
1429 return ((uint32_t)val1) == ((uint32_t)val2);
1439 * Build up an initial set of all past writes that this 'read' action may read
1440 * from, as well as any previously-observed future values that must still be valid.
1442 * @param curr is the current ModelAction that we are exploring; it must be a
1445 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1447 SnapVector<simple_action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1449 ASSERT(curr->is_read());
1451 ModelAction *last_sc_write = NULL;
1453 if (curr->is_seqcst())
1454 last_sc_write = get_last_seq_cst_write(curr);
1456 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1458 /* Iterate over all threads */
1459 if (thrd_lists != NULL)
1460 for (i = 0;i < thrd_lists->size();i++) {
1461 /* Iterate over actions in thread, starting from most recent */
1462 simple_action_list_t *list = &(*thrd_lists)[i];
1463 sllnode<ModelAction *> * rit;
1464 for (rit = list->end();rit != NULL;rit=rit->getPrev()) {
1465 ModelAction *act = rit->getVal();
1470 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1471 bool allow_read = true;
1473 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1476 /* Need to check whether we will have two RMW reading from the same value */
1477 if (curr->is_rmwr()) {
1478 /* It is okay if we have a failing CAS */
1479 if (!curr->is_rmwrcas() ||
1480 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1481 //Need to make sure we aren't the second RMW
1482 CycleNode * node = mo_graph->getNode_noCreate(act);
1483 if (node != NULL && node->getRMW() != NULL) {
1484 //we are the second RMW
1491 /* Only add feasible reads */
1492 rf_set->push_back(act);
1495 /* Include at most one act per-thread that "happens before" curr */
1496 if (act->happens_before(curr))
1501 if (DBG_ENABLED()) {
1502 model_print("Reached read action:\n");
1504 model_print("End printing read_from_past\n");
1509 static void print_list(action_list_t *list)
1511 sllnode<ModelAction*> *it;
1513 model_print("------------------------------------------------------------------------------------\n");
1514 model_print("# t Action type MO Location Value Rf CV\n");
1515 model_print("------------------------------------------------------------------------------------\n");
1517 unsigned int hash = 0;
1519 for (it = list->begin();it != NULL;it=it->getNext()) {
1520 const ModelAction *act = it->getVal();
1521 if (act->get_seq_number() > 0)
1523 hash = hash^(hash<<3)^(it->getVal()->hash());
1525 model_print("HASH %u\n", hash);
1526 model_print("------------------------------------------------------------------------------------\n");
1529 #if SUPPORT_MOD_ORDER_DUMP
1530 void ModelExecution::dumpGraph(char *filename)
1533 sprintf(buffer, "%s.dot", filename);
1534 FILE *file = fopen(buffer, "w");
1535 fprintf(file, "digraph %s {\n", filename);
1536 mo_graph->dumpNodes(file);
1537 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1539 for (sllnode<ModelAction*>* it = action_trace.begin();it != NULL;it=it->getNext()) {
1540 ModelAction *act = it->getVal();
1541 if (act->is_read()) {
1542 mo_graph->dot_print_node(file, act);
1543 mo_graph->dot_print_edge(file,
1544 act->get_reads_from(),
1546 "label=\"rf\", color=red, weight=2");
1548 if (thread_array[act->get_tid()]) {
1549 mo_graph->dot_print_edge(file,
1550 thread_array[id_to_int(act->get_tid())],
1552 "label=\"sb\", color=blue, weight=400");
1555 thread_array[act->get_tid()] = act;
1557 fprintf(file, "}\n");
1558 model_free(thread_array);
1563 /** @brief Prints an execution trace summary. */
1564 void ModelExecution::print_summary()
1566 #if SUPPORT_MOD_ORDER_DUMP
1567 char buffername[100];
1568 sprintf(buffername, "exec%04u", get_execution_number());
1569 mo_graph->dumpGraphToFile(buffername);
1570 sprintf(buffername, "graph%04u", get_execution_number());
1571 dumpGraph(buffername);
1574 model_print("Execution trace %d:", get_execution_number());
1575 if (scheduler->all_threads_sleeping())
1576 model_print(" SLEEP-SET REDUNDANT");
1577 if (have_bug_reports())
1578 model_print(" DETECTED BUG(S)");
1582 print_list(&action_trace);
1587 void ModelExecution::print_tail()
1589 model_print("Execution trace %d:\n", get_execution_number());
1591 sllnode<ModelAction*> *it;
1593 model_print("------------------------------------------------------------------------------------\n");
1594 model_print("# t Action type MO Location Value Rf CV\n");
1595 model_print("------------------------------------------------------------------------------------\n");
1597 unsigned int hash = 0;
1601 SnapList<ModelAction *> list;
1602 for (it = action_trace.end();it != NULL;it = it->getPrev()) {
1603 if (counter > length)
1606 ModelAction * act = it->getVal();
1607 list.push_front(act);
1611 for (it = list.begin();it != NULL;it=it->getNext()) {
1612 const ModelAction *act = it->getVal();
1613 if (act->get_seq_number() > 0)
1615 hash = hash^(hash<<3)^(it->getVal()->hash());
1617 model_print("HASH %u\n", hash);
1618 model_print("------------------------------------------------------------------------------------\n");
1622 * Add a Thread to the system for the first time. Should only be called once
1624 * @param t The Thread to add
1626 void ModelExecution::add_thread(Thread *t)
1628 unsigned int i = id_to_int(t->get_id());
1629 if (i >= thread_map.size())
1630 thread_map.resize(i + 1);
1632 if (!t->is_model_thread())
1633 scheduler->add_thread(t);
1637 * @brief Get a Thread reference by its ID
1638 * @param tid The Thread's ID
1639 * @return A Thread reference
1641 Thread * ModelExecution::get_thread(thread_id_t tid) const
1643 unsigned int i = id_to_int(tid);
1644 if (i < thread_map.size())
1645 return thread_map[i];
1650 * @brief Get a reference to the Thread in which a ModelAction was executed
1651 * @param act The ModelAction
1652 * @return A Thread reference
1654 Thread * ModelExecution::get_thread(const ModelAction *act) const
1656 return get_thread(act->get_tid());
1660 * @brief Get a Thread reference by its pthread ID
1661 * @param index The pthread's ID
1662 * @return A Thread reference
1664 Thread * ModelExecution::get_pthread(pthread_t pid) {
1665 // pid 1 is reserved for the main thread, pthread ids should start from 2
1667 return get_thread(pid);
1674 uint32_t thread_id = x.v;
1676 if (thread_id < pthread_counter + 1)
1677 return pthread_map[thread_id];
1683 * @brief Check if a Thread is currently enabled
1684 * @param t The Thread to check
1685 * @return True if the Thread is currently enabled
1687 bool ModelExecution::is_enabled(Thread *t) const
1689 return scheduler->is_enabled(t);
1693 * @brief Check if a Thread is currently enabled
1694 * @param tid The ID of the Thread to check
1695 * @return True if the Thread is currently enabled
1697 bool ModelExecution::is_enabled(thread_id_t tid) const
1699 return scheduler->is_enabled(tid);
1703 * @brief Select the next thread to execute based on the curren action
1705 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1706 * actions should be followed by the execution of their child thread. In either
1707 * case, the current action should determine the next thread schedule.
1709 * @param curr The current action
1710 * @return The next thread to run, if the current action will determine this
1711 * selection; otherwise NULL
1713 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1715 /* Do not split atomic RMW */
1716 if (curr->is_rmwr())
1717 return get_thread(curr);
1718 /* Follow CREATE with the created thread */
1719 /* which is not needed, because model.cc takes care of this */
1720 if (curr->get_type() == THREAD_CREATE)
1721 return curr->get_thread_operand();
1722 if (curr->get_type() == PTHREAD_CREATE) {
1723 return curr->get_thread_operand();
1729 * Takes the next step in the execution, if possible.
1730 * @param curr The current step to take
1731 * @return Returns the next Thread to run, if any; NULL if this execution
1734 Thread * ModelExecution::take_step(ModelAction *curr)
1736 Thread *curr_thrd = get_thread(curr);
1737 ASSERT(curr_thrd->get_state() == THREAD_READY);
1739 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1740 curr = check_current_action(curr);
1743 /* Process this action in ModelHistory for records */
1744 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1745 scheduler->remove_thread(curr_thrd);
1747 return action_select_next_thread(curr);
1750 /** This method removes references to an Action before we delete it. */
1752 void ModelExecution::removeAction(ModelAction *act) {
1754 action_trace.removeAction(act);
1757 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1758 (*vec)[act->get_tid()].removeAction(act);
1760 if ((act->is_fence() && act->is_seqcst()) || act->is_unlock()) {
1761 sllnode<ModelAction *> * listref = act->getActionRef();
1762 if (listref != NULL) {
1763 simple_action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1764 list->erase(listref);
1766 } else if (act->is_wait()) {
1767 sllnode<ModelAction *> * listref = act->getActionRef();
1768 if (listref != NULL) {
1769 void *mutex_loc = (void *) act->get_value();
1770 get_safe_ptr_action(&obj_map, mutex_loc)->erase(listref);
1772 } else if (act->is_free()) {
1773 sllnode<ModelAction *> * listref = act->getActionRef();
1774 if (listref != NULL) {
1775 SnapVector<simple_action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1776 (*vec)[act->get_tid()].erase(listref);
1779 //Clear it from last_sc_map
1780 if (obj_last_sc_map.get(act->get_location()) == act) {
1781 obj_last_sc_map.remove(act->get_location());
1784 //Remove from Cyclegraph
1785 mo_graph->freeAction(act);
1789 /** Computes clock vector that all running threads have already synchronized to. */
1791 ClockVector * ModelExecution::computeMinimalCV() {
1792 ClockVector *cvmin = NULL;
1793 //Thread 0 isn't a real thread, so skip it..
1794 for(unsigned int i = 1;i < thread_map.size();i++) {
1795 Thread * t = thread_map[i];
1796 if (t->is_complete())
1798 thread_id_t tid = int_to_id(i);
1799 ClockVector * cv = get_cv(tid);
1801 cvmin = new ClockVector(cv, NULL);
1803 cvmin->minmerge(cv);
1809 /** Sometimes we need to remove an action that is the most recent in the thread. This happens if it is mo before action in other threads. In that case we need to create a replacement latest ModelAction */
1811 void ModelExecution::fixupLastAct(ModelAction *act) {
1812 ModelAction *newact = new ModelAction(ATOMIC_NOP, std::memory_order_seq_cst, NULL, VALUE_NONE, get_thread(act->get_tid()));
1813 newact->set_seq_number(get_next_seq_num());
1814 newact->create_cv(act);
1815 newact->set_last_fence_release(act->get_last_fence_release());
1816 add_action_to_lists(newact, false);
1819 /** Compute which actions to free. */
1821 void ModelExecution::collectActions() {
1822 if (priv->used_sequence_numbers < params->traceminsize)
1825 //Compute minimal clock vector for all live threads
1826 ClockVector *cvmin = computeMinimalCV();
1827 SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
1828 modelclock_t maxtofree = priv->used_sequence_numbers - params->traceminsize;
1830 //Next walk action trace... When we hit an action, see if it is
1831 //invisible (e.g., earlier than the first before the minimum
1832 //clock for the thread... if so erase it and all previous
1833 //actions in cyclegraph
1834 sllnode<ModelAction*> * it;
1835 for (it = action_trace.begin();it != NULL;it=it->getNext()) {
1836 ModelAction *act = it->getVal();
1837 modelclock_t actseq = act->get_seq_number();
1839 //See if we are done
1840 if (actseq > maxtofree)
1843 thread_id_t act_tid = act->get_tid();
1844 modelclock_t tid_clock = cvmin->getClock(act_tid);
1846 //Free if it is invisible or we have set a flag to remove visible actions.
1847 if (actseq <= tid_clock || params->removevisible) {
1848 ModelAction * write;
1849 if (act->is_write()) {
1851 } else if (act->is_read()) {
1852 write = act->get_reads_from();
1856 //Mark everything earlier in MO graph to be freed
1857 CycleNode * cn = mo_graph->getNode_noCreate(write);
1859 queue->push_back(cn);
1860 while(!queue->empty()) {
1861 CycleNode * node = queue->back();
1863 for(unsigned int i=0;i<node->getNumInEdges();i++) {
1864 CycleNode * prevnode = node->getInEdge(i);
1865 ModelAction * prevact = prevnode->getAction();
1866 if (prevact->get_type() != READY_FREE) {
1867 prevact->set_free();
1868 queue->push_back(prevnode);
1876 //We may need to remove read actions in the window we don't delete to preserve correctness.
1878 for (sllnode<ModelAction*> * it2 = action_trace.end();it2 != it;) {
1879 ModelAction *act = it2->getVal();
1880 //Do iteration early in case we delete the act
1882 bool islastact = false;
1883 ModelAction *lastact = get_last_action(act->get_tid());
1884 if (act == lastact) {
1885 Thread * th = get_thread(act);
1886 islastact = !th->is_complete();
1889 if (act->is_read()) {
1890 if (act->get_reads_from()->is_free()) {
1891 if (act->is_rmw()) {
1892 //Weaken a RMW from a freed store to a write
1893 act->set_type(ATOMIC_WRITE);
1905 //If we don't delete the action, we should remove references to release fences
1907 const ModelAction *rel_fence =act->get_last_fence_release();
1908 if (rel_fence != NULL) {
1909 modelclock_t relfenceseq = rel_fence->get_seq_number();
1910 thread_id_t relfence_tid = rel_fence->get_tid();
1911 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
1912 //Remove references to irrelevant release fences
1913 if (relfenceseq <= tid_clock)
1914 act->set_last_fence_release(NULL);
1917 //Now we are in the window of old actions that we remove if possible
1918 for (;it != NULL;) {
1919 ModelAction *act = it->getVal();
1920 //Do iteration early since we may delete node...
1922 bool islastact = false;
1923 ModelAction *lastact = get_last_action(act->get_tid());
1924 if (act == lastact) {
1925 Thread * th = get_thread(act);
1926 islastact = !th->is_complete();
1929 if (act->is_read()) {
1930 if (act->get_reads_from()->is_free()) {
1931 if (act->is_rmw()) {
1932 act->set_type(ATOMIC_WRITE);
1942 } else if (act->is_free()) {
1949 } else if (act->is_write()) {
1950 //Do nothing with write that hasn't been marked to be freed
1951 } else if (islastact) {
1952 //Keep the last action for non-read/write actions
1953 } else if (act->is_fence()) {
1954 //Note that acquire fences can always be safely
1955 //removed, but could incur extra overheads in
1956 //traversals. Removing them before the cvmin seems
1957 //like a good compromise.
1959 //Release fences before the cvmin don't do anything
1960 //because everyone has already synchronized.
1962 //Sequentially fences before cvmin are redundant
1963 //because happens-before will enforce same
1966 modelclock_t actseq = act->get_seq_number();
1967 thread_id_t act_tid = act->get_tid();
1968 modelclock_t tid_clock = cvmin->getClock(act_tid);
1969 if (actseq <= tid_clock) {
1971 // Remove reference to act from thrd_last_fence_release
1972 int thread_id = id_to_int( act->get_tid() );
1973 if (thrd_last_fence_release[thread_id] == act) {
1974 thrd_last_fence_release[thread_id] = NULL;
1980 //need to deal with lock, annotation, wait, notify, thread create, start, join, yield, finish, nops
1981 //lock, notify thread create, thread finish, yield, finish are dead as soon as they are in the trace
1982 //need to keep most recent unlock/wait for each lock
1983 if(act->is_unlock() || act->is_wait()) {
1984 ModelAction * lastlock = get_last_unlock(act);
1985 if (lastlock != act) {
1990 } else if (act->is_create()) {
1991 if (act->get_thread_operand()->is_complete()) {
2003 //If we don't delete the action, we should remove references to release fences
2004 const ModelAction *rel_fence =act->get_last_fence_release();
2005 if (rel_fence != NULL) {
2006 modelclock_t relfenceseq = rel_fence->get_seq_number();
2007 thread_id_t relfence_tid = rel_fence->get_tid();
2008 modelclock_t tid_clock = cvmin->getClock(relfence_tid);
2009 //Remove references to irrelevant release fences
2010 if (relfenceseq <= tid_clock)
2011 act->set_last_fence_release(NULL);
2019 Fuzzer * ModelExecution::getFuzzer() {