11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
50 /** @brief Constructor */
51 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
56 thread_map(2), /* We'll always need at least 2 threads */
60 condvar_waiters_map(),
64 thrd_last_fence_release(),
65 priv(new struct model_snapshot_members ()),
66 mo_graph(new CycleGraph()),
69 thrd_func_inst_lists()
71 /* Initialize a model-checker thread, for special ModelActions */
72 model_thread = new Thread(get_next_id());
73 add_thread(model_thread);
74 scheduler->register_engine(this);
77 /** @brief Destructor */
78 ModelExecution::~ModelExecution()
80 for (unsigned int i = 0;i < get_num_threads();i++)
81 delete get_thread(int_to_id(i));
87 int ModelExecution::get_execution_number() const
89 return model->get_execution_number();
92 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
94 action_list_t *tmp = hash->get(ptr);
96 tmp = new action_list_t();
102 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
104 SnapVector<action_list_t> *tmp = hash->get(ptr);
106 tmp = new SnapVector<action_list_t>();
112 /** @return a thread ID for a new Thread */
113 thread_id_t ModelExecution::get_next_id()
115 return priv->next_thread_id++;
118 /** @return the number of user threads created during this execution */
119 unsigned int ModelExecution::get_num_threads() const
121 return priv->next_thread_id;
124 /** @return a sequence number for a new ModelAction */
125 modelclock_t ModelExecution::get_next_seq_num()
127 return ++priv->used_sequence_numbers;
131 * @brief Should the current action wake up a given thread?
133 * @param curr The current action
134 * @param thread The thread that we might wake up
135 * @return True, if we should wake up the sleeping thread; false otherwise
137 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
139 const ModelAction *asleep = thread->get_pending();
140 /* Don't allow partial RMW to wake anyone up */
143 /* Synchronizing actions may have been backtracked */
144 if (asleep->could_synchronize_with(curr))
146 /* All acquire/release fences and fence-acquire/store-release */
147 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
149 /* Fence-release + store can awake load-acquire on the same location */
150 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
151 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
152 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
158 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
160 for (unsigned int i = 0;i < get_num_threads();i++) {
161 Thread *thr = get_thread(int_to_id(i));
162 if (scheduler->is_sleep_set(thr)) {
163 if (should_wake_up(curr, thr))
164 /* Remove this thread from sleep set */
165 scheduler->remove_sleep(thr);
170 /** @brief Alert the model-checker that an incorrectly-ordered
171 * synchronization was made */
172 void ModelExecution::set_bad_synchronization()
174 priv->bad_synchronization = true;
177 bool ModelExecution::assert_bug(const char *msg)
179 priv->bugs.push_back(new bug_message(msg));
181 if (isfeasibleprefix()) {
188 /** @return True, if any bugs have been reported for this execution */
189 bool ModelExecution::have_bug_reports() const
191 return priv->bugs.size() != 0;
194 /** @return True, if any fatal bugs have been reported for this execution.
195 * Any bug other than a data race is considered a fatal bug. Data races
196 * are not considered fatal unless the number of races is exceeds
197 * a threshold (temporarily set as 15).
199 bool ModelExecution::have_fatal_bug_reports() const
201 return priv->bugs.size() != 0;
204 SnapVector<bug_message *> * ModelExecution::get_bugs() const
210 * Check whether the current trace has triggered an assertion which should halt
213 * @return True, if the execution should be aborted; false otherwise
215 bool ModelExecution::has_asserted() const
217 return priv->asserted;
221 * Trigger a trace assertion which should cause this execution to be halted.
222 * This can be due to a detected bug or due to an infeasibility that should
225 void ModelExecution::set_assert()
227 priv->asserted = true;
231 * Check if we are in a deadlock. Should only be called at the end of an
232 * execution, although it should not give false positives in the middle of an
233 * execution (there should be some ENABLED thread).
235 * @return True if program is in a deadlock; false otherwise
237 bool ModelExecution::is_deadlocked() const
239 bool blocking_threads = false;
240 for (unsigned int i = 0;i < get_num_threads();i++) {
241 thread_id_t tid = int_to_id(i);
244 Thread *t = get_thread(tid);
245 if (!t->is_model_thread() && t->get_pending())
246 blocking_threads = true;
248 return blocking_threads;
252 * Check if this is a complete execution. That is, have all thread completed
253 * execution (rather than exiting because sleep sets have forced a redundant
256 * @return True if the execution is complete.
258 bool ModelExecution::is_complete_execution() const
260 for (unsigned int i = 0;i < get_num_threads();i++)
261 if (is_enabled(int_to_id(i)))
266 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
267 uint64_t value = *((const uint64_t *) location);
268 modelclock_t storeclock;
269 thread_id_t storethread;
270 getStoreThreadAndClock(location, &storethread, &storeclock);
271 setAtomicStoreFlag(location);
272 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
273 act->set_seq_number(storeclock);
274 add_normal_write_to_lists(act);
275 add_write_to_lists(act);
276 w_modification_order(act);
282 * Processes a read model action.
283 * @param curr is the read model action to process.
284 * @param rf_set is the set of model actions we can possibly read from
285 * @return True if processing this read updates the mo_graph.
287 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
289 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
290 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
291 if (hasnonatomicstore) {
292 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
293 rf_set->push_back(nonatomicstore);
297 int index = fuzzer->selectWrite(curr, rf_set);
298 ModelAction *rf = (*rf_set)[index];
302 bool canprune = false;
303 if (r_modification_order(curr, rf, priorset, &canprune)) {
304 for(unsigned int i=0;i<priorset->size();i++) {
305 mo_graph->addEdge((*priorset)[i], rf);
308 get_thread(curr)->set_return_value(curr->get_return_value());
310 if (canprune && curr->get_type() == ATOMIC_READ) {
311 int tid = id_to_int(curr->get_tid());
312 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
317 (*rf_set)[index] = rf_set->back();
323 * Processes a lock, trylock, or unlock model action. @param curr is
324 * the read model action to process.
326 * The try lock operation checks whether the lock is taken. If not,
327 * it falls to the normal lock operation case. If so, it returns
330 * The lock operation has already been checked that it is enabled, so
331 * it just grabs the lock and synchronizes with the previous unlock.
333 * The unlock operation has to re-enable all of the threads that are
334 * waiting on the lock.
336 * @return True if synchronization was updated; false otherwise
338 bool ModelExecution::process_mutex(ModelAction *curr)
340 cdsc::mutex *mutex = curr->get_mutex();
341 struct cdsc::mutex_state *state = NULL;
344 state = mutex->get_state();
346 switch (curr->get_type()) {
347 case ATOMIC_TRYLOCK: {
348 bool success = !state->locked;
349 curr->set_try_lock(success);
351 get_thread(curr)->set_return_value(0);
354 get_thread(curr)->set_return_value(1);
356 //otherwise fall into the lock case
358 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
359 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
360 // assert_bug("Lock access before initialization");
361 state->locked = get_thread(curr);
362 ModelAction *unlock = get_last_unlock(curr);
363 //synchronize with the previous unlock statement
364 if (unlock != NULL) {
365 synchronize(unlock, curr);
371 case ATOMIC_UNLOCK: {
372 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
374 /* wake up the other threads */
375 for (unsigned int i = 0;i < get_num_threads();i++) {
376 Thread *t = get_thread(int_to_id(i));
377 Thread *curr_thrd = get_thread(curr);
378 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
382 /* unlock the lock - after checking who was waiting on it */
383 state->locked = NULL;
385 if (!curr->is_wait())
386 break;/* The rest is only for ATOMIC_WAIT */
390 case ATOMIC_NOTIFY_ALL: {
391 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
392 //activate all the waiting threads
393 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
394 scheduler->wake(get_thread(*rit));
399 case ATOMIC_NOTIFY_ONE: {
400 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
401 if (waiters->size() != 0) {
402 Thread * thread = fuzzer->selectNotify(waiters);
403 scheduler->wake(thread);
415 * Process a write ModelAction
416 * @param curr The ModelAction to process
417 * @return True if the mo_graph was updated or promises were resolved
419 void ModelExecution::process_write(ModelAction *curr)
421 w_modification_order(curr);
422 get_thread(curr)->set_return_value(VALUE_NONE);
426 * Process a fence ModelAction
427 * @param curr The ModelAction to process
428 * @return True if synchronization was updated
430 bool ModelExecution::process_fence(ModelAction *curr)
433 * fence-relaxed: no-op
434 * fence-release: only log the occurence (not in this function), for
435 * use in later synchronization
436 * fence-acquire (this function): search for hypothetical release
438 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
440 bool updated = false;
441 if (curr->is_acquire()) {
442 action_list_t *list = &action_trace;
443 action_list_t::reverse_iterator rit;
444 /* Find X : is_read(X) && X --sb-> curr */
445 for (rit = list->rbegin();rit != list->rend();rit++) {
446 ModelAction *act = *rit;
449 if (act->get_tid() != curr->get_tid())
451 /* Stop at the beginning of the thread */
452 if (act->is_thread_start())
454 /* Stop once we reach a prior fence-acquire */
455 if (act->is_fence() && act->is_acquire())
459 /* read-acquire will find its own release sequences */
460 if (act->is_acquire())
463 /* Establish hypothetical release sequences */
464 ClockVector *cv = get_hb_from_write(act->get_reads_from());
465 if (cv != NULL && curr->get_cv()->merge(cv))
473 * @brief Process the current action for thread-related activity
475 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
476 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
477 * synchronization, etc. This function is a no-op for non-THREAD actions
478 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
480 * @param curr The current action
481 * @return True if synchronization was updated or a thread completed
483 bool ModelExecution::process_thread_action(ModelAction *curr)
485 bool updated = false;
487 switch (curr->get_type()) {
488 case THREAD_CREATE: {
489 thrd_t *thrd = (thrd_t *)curr->get_location();
490 struct thread_params *params = (struct thread_params *)curr->get_value();
491 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
492 curr->set_thread_operand(th);
494 th->set_creation(curr);
497 case PTHREAD_CREATE: {
498 (*(uint32_t *)curr->get_location()) = pthread_counter++;
500 struct pthread_params *params = (struct pthread_params *)curr->get_value();
501 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
502 curr->set_thread_operand(th);
504 th->set_creation(curr);
506 if ( pthread_map.size() < pthread_counter )
507 pthread_map.resize( pthread_counter );
508 pthread_map[ pthread_counter-1 ] = th;
513 Thread *blocking = curr->get_thread_operand();
514 ModelAction *act = get_last_action(blocking->get_id());
515 synchronize(act, curr);
516 updated = true; /* trigger rel-seq checks */
520 Thread *blocking = curr->get_thread_operand();
521 ModelAction *act = get_last_action(blocking->get_id());
522 synchronize(act, curr);
523 updated = true; /* trigger rel-seq checks */
524 break; // WL: to be add (modified)
527 case THREAD_FINISH: {
528 Thread *th = get_thread(curr);
529 /* Wake up any joining threads */
530 for (unsigned int i = 0;i < get_num_threads();i++) {
531 Thread *waiting = get_thread(int_to_id(i));
532 if (waiting->waiting_on() == th &&
533 waiting->get_pending()->is_thread_join())
534 scheduler->wake(waiting);
537 updated = true; /* trigger rel-seq checks */
551 * Initialize the current action by performing one or more of the following
552 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
553 * manipulating backtracking sets, allocating and
554 * initializing clock vectors, and computing the promises to fulfill.
556 * @param curr The current action, as passed from the user context; may be
557 * freed/invalidated after the execution of this function, with a different
558 * action "returned" its place (pass-by-reference)
559 * @return True if curr is a newly-explored action; false otherwise
561 bool ModelExecution::initialize_curr_action(ModelAction **curr)
563 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
564 ModelAction *newcurr = process_rmw(*curr);
570 ModelAction *newcurr = *curr;
572 newcurr->set_seq_number(get_next_seq_num());
573 /* Always compute new clock vector */
574 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
576 /* Assign most recent release fence */
577 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
579 return true; /* This was a new ModelAction */
584 * @brief Establish reads-from relation between two actions
586 * Perform basic operations involved with establishing a concrete rf relation,
587 * including setting the ModelAction data and checking for release sequences.
589 * @param act The action that is reading (must be a read)
590 * @param rf The action from which we are reading (must be a write)
592 * @return True if this read established synchronization
595 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
598 ASSERT(rf->is_write());
600 act->set_read_from(rf);
601 if (act->is_acquire()) {
602 ClockVector *cv = get_hb_from_write(rf);
605 act->get_cv()->merge(cv);
610 * @brief Synchronizes two actions
612 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
613 * This function performs the synchronization as well as providing other hooks
614 * for other checks along with synchronization.
616 * @param first The left-hand side of the synchronizes-with relation
617 * @param second The right-hand side of the synchronizes-with relation
618 * @return True if the synchronization was successful (i.e., was consistent
619 * with the execution order); false otherwise
621 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
623 if (*second < *first) {
624 set_bad_synchronization();
627 return second->synchronize_with(first);
631 * @brief Check whether a model action is enabled.
633 * Checks whether an operation would be successful (i.e., is a lock already
634 * locked, or is the joined thread already complete).
636 * For yield-blocking, yields are never enabled.
638 * @param curr is the ModelAction to check whether it is enabled.
639 * @return a bool that indicates whether the action is enabled.
641 bool ModelExecution::check_action_enabled(ModelAction *curr) {
642 if (curr->is_lock()) {
643 cdsc::mutex *lock = curr->get_mutex();
644 struct cdsc::mutex_state *state = lock->get_state();
647 } else if (curr->is_thread_join()) {
648 Thread *blocking = curr->get_thread_operand();
649 if (!blocking->is_complete()) {
658 * This is the heart of the model checker routine. It performs model-checking
659 * actions corresponding to a given "current action." Among other processes, it
660 * calculates reads-from relationships, updates synchronization clock vectors,
661 * forms a memory_order constraints graph, and handles replay/backtrack
662 * execution when running permutations of previously-observed executions.
664 * @param curr The current action to process
665 * @return The ModelAction that is actually executed; may be different than
668 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
671 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
672 bool newly_explored = initialize_curr_action(&curr);
676 wake_up_sleeping_actions(curr);
678 /* Add the action to lists before any other model-checking tasks */
679 if (!second_part_of_rmw && curr->get_type() != NOOP)
680 add_action_to_lists(curr);
682 if (curr->is_write())
683 add_write_to_lists(curr);
685 SnapVector<ModelAction *> * rf_set = NULL;
686 /* Build may_read_from set for newly-created actions */
687 if (newly_explored && curr->is_read())
688 rf_set = build_may_read_from(curr);
690 process_thread_action(curr);
692 if (curr->is_read() && !second_part_of_rmw) {
693 process_read(curr, rf_set);
696 ASSERT(rf_set == NULL);
699 if (curr->is_write())
702 if (curr->is_fence())
705 if (curr->is_mutex_op())
712 * This is the strongest feasibility check available.
713 * @return whether the current trace (partial or complete) must be a prefix of
716 bool ModelExecution::isfeasibleprefix() const
718 return !is_infeasible();
722 * Print disagnostic information about an infeasible execution
723 * @param prefix A string to prefix the output with; if NULL, then a default
724 * message prefix will be provided
726 void ModelExecution::print_infeasibility(const char *prefix) const
730 if (priv->bad_synchronization)
731 ptr += sprintf(ptr, "[bad sw ordering]");
733 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
737 * Check if the current partial trace is infeasible. Does not check any
738 * end-of-execution flags, which might rule out the execution. Thus, this is
739 * useful only for ruling an execution as infeasible.
740 * @return whether the current partial trace is infeasible.
742 bool ModelExecution::is_infeasible() const
744 return priv->bad_synchronization;
747 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
748 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
749 ModelAction *lastread = get_last_action(act->get_tid());
750 lastread->process_rmw(act);
752 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
758 * @brief Updates the mo_graph with the constraints imposed from the current
761 * Basic idea is the following: Go through each other thread and find
762 * the last action that happened before our read. Two cases:
764 * -# The action is a write: that write must either occur before
765 * the write we read from or be the write we read from.
766 * -# The action is a read: the write that that action read from
767 * must occur before the write we read from or be the same write.
769 * @param curr The current action. Must be a read.
770 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
771 * @return True if modification order edges were added; false otherwise
774 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
776 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
778 ASSERT(curr->is_read());
780 /* Last SC fence in the current thread */
781 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
783 int tid = curr->get_tid();
784 ModelAction *prev_same_thread = NULL;
785 /* Iterate over all threads */
786 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
787 /* Last SC fence in thread tid */
788 ModelAction *last_sc_fence_thread_local = NULL;
790 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
792 /* Last SC fence in thread tid, before last SC fence in current thread */
793 ModelAction *last_sc_fence_thread_before = NULL;
794 if (last_sc_fence_local)
795 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
797 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
798 if (prev_same_thread != NULL &&
799 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
800 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
804 /* Iterate over actions in thread, starting from most recent */
805 action_list_t *list = &(*thrd_lists)[tid];
806 action_list_t::reverse_iterator rit;
807 for (rit = list->rbegin();rit != list->rend();rit++) {
808 ModelAction *act = *rit;
813 /* Don't want to add reflexive edges on 'rf' */
814 if (act->equals(rf)) {
815 if (act->happens_before(curr))
821 if (act->is_write()) {
822 /* C++, Section 29.3 statement 5 */
823 if (curr->is_seqcst() && last_sc_fence_thread_local &&
824 *act < *last_sc_fence_thread_local) {
825 if (mo_graph->checkReachable(rf, act))
827 priorset->push_back(act);
830 /* C++, Section 29.3 statement 4 */
831 else if (act->is_seqcst() && last_sc_fence_local &&
832 *act < *last_sc_fence_local) {
833 if (mo_graph->checkReachable(rf, act))
835 priorset->push_back(act);
838 /* C++, Section 29.3 statement 6 */
839 else if (last_sc_fence_thread_before &&
840 *act < *last_sc_fence_thread_before) {
841 if (mo_graph->checkReachable(rf, act))
843 priorset->push_back(act);
849 * Include at most one act per-thread that "happens
852 if (act->happens_before(curr)) {
854 if (last_sc_fence_local == NULL ||
855 (*last_sc_fence_local < *act)) {
856 prev_same_thread = act;
859 if (act->is_write()) {
860 if (mo_graph->checkReachable(rf, act))
862 priorset->push_back(act);
864 const ModelAction *prevrf = act->get_reads_from();
865 if (!prevrf->equals(rf)) {
866 if (mo_graph->checkReachable(rf, prevrf))
868 priorset->push_back(prevrf);
870 if (act->get_tid() == curr->get_tid()) {
871 //Can prune curr from obj list
884 * Updates the mo_graph with the constraints imposed from the current write.
886 * Basic idea is the following: Go through each other thread and find
887 * the lastest action that happened before our write. Two cases:
889 * (1) The action is a write => that write must occur before
892 * (2) The action is a read => the write that that action read from
893 * must occur before the current write.
895 * This method also handles two other issues:
897 * (I) Sequential Consistency: Making sure that if the current write is
898 * seq_cst, that it occurs after the previous seq_cst write.
900 * (II) Sending the write back to non-synchronizing reads.
902 * @param curr The current action. Must be a write.
903 * @param send_fv A vector for stashing reads to which we may pass our future
904 * value. If NULL, then don't record any future values.
905 * @return True if modification order edges were added; false otherwise
907 void ModelExecution::w_modification_order(ModelAction *curr)
909 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
911 ASSERT(curr->is_write());
913 if (curr->is_seqcst()) {
914 /* We have to at least see the last sequentially consistent write,
915 so we are initialized. */
916 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
917 if (last_seq_cst != NULL) {
918 mo_graph->addEdge(last_seq_cst, curr);
922 /* Last SC fence in the current thread */
923 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
925 /* Iterate over all threads */
926 for (i = 0;i < thrd_lists->size();i++) {
927 /* Last SC fence in thread i, before last SC fence in current thread */
928 ModelAction *last_sc_fence_thread_before = NULL;
929 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
930 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
932 /* Iterate over actions in thread, starting from most recent */
933 action_list_t *list = &(*thrd_lists)[i];
934 action_list_t::reverse_iterator rit;
935 bool force_edge = false;
936 for (rit = list->rbegin();rit != list->rend();rit++) {
937 ModelAction *act = *rit;
940 * 1) If RMW and it actually read from something, then we
941 * already have all relevant edges, so just skip to next
944 * 2) If RMW and it didn't read from anything, we should
945 * whatever edge we can get to speed up convergence.
947 * 3) If normal write, we need to look at earlier actions, so
948 * continue processing list.
951 if (curr->is_rmw()) {
952 if (curr->get_reads_from() != NULL)
960 /* C++, Section 29.3 statement 7 */
961 if (last_sc_fence_thread_before && act->is_write() &&
962 *act < *last_sc_fence_thread_before) {
963 mo_graph->addEdge(act, curr, force_edge);
968 * Include at most one act per-thread that "happens
971 if (act->happens_before(curr)) {
973 * Note: if act is RMW, just add edge:
975 * The following edge should be handled elsewhere:
976 * readfrom(act) --mo--> act
979 mo_graph->addEdge(act, curr, force_edge);
980 else if (act->is_read()) {
981 //if previous read accessed a null, just keep going
982 mo_graph->addEdge(act->get_reads_from(), curr, force_edge);
991 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
992 * some constraints. This method checks one the following constraint (others
993 * require compiler support):
995 * If X --hb-> Y --mo-> Z, then X should not read from Z.
996 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
998 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1000 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1002 /* Iterate over all threads */
1003 for (i = 0;i < thrd_lists->size();i++) {
1004 const ModelAction *write_after_read = NULL;
1006 /* Iterate over actions in thread, starting from most recent */
1007 action_list_t *list = &(*thrd_lists)[i];
1008 action_list_t::reverse_iterator rit;
1009 for (rit = list->rbegin();rit != list->rend();rit++) {
1010 ModelAction *act = *rit;
1012 /* Don't disallow due to act == reader */
1013 if (!reader->happens_before(act) || reader == act)
1015 else if (act->is_write())
1016 write_after_read = act;
1017 else if (act->is_read() && act->get_reads_from() != NULL)
1018 write_after_read = act->get_reads_from();
1021 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1028 * Computes the clock vector that happens before propagates from this write.
1030 * @param rf The action that might be part of a release sequence. Must be a
1032 * @return ClockVector of happens before relation.
1035 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1036 SnapVector<ModelAction *> * processset = NULL;
1037 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1038 ASSERT(rf->is_write());
1039 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1041 if (processset == NULL)
1042 processset = new SnapVector<ModelAction *>();
1043 processset->push_back(rf);
1046 int i = (processset == NULL) ? 0 : processset->size();
1048 ClockVector * vec = NULL;
1050 if (rf->get_rfcv() != NULL) {
1051 vec = rf->get_rfcv();
1052 } else if (rf->is_acquire() && rf->is_release()) {
1054 } else if (rf->is_release() && !rf->is_rmw()) {
1056 } else if (rf->is_release()) {
1057 //have rmw that is release and doesn't have a rfcv
1058 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1061 //operation that isn't release
1062 if (rf->get_last_fence_release()) {
1064 vec = rf->get_last_fence_release()->get_cv();
1066 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1072 rf = (*processset)[i];
1076 if (processset != NULL)
1082 * Performs various bookkeeping operations for the current ModelAction. For
1083 * instance, adds action to the per-object, per-thread action vector and to the
1084 * action trace list of all thread actions.
1086 * @param act is the ModelAction to add.
1088 void ModelExecution::add_action_to_lists(ModelAction *act)
1090 int tid = id_to_int(act->get_tid());
1091 ModelAction *uninit = NULL;
1093 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1094 if (list->empty() && act->is_atomic_var()) {
1095 uninit = get_uninitialized_action(act);
1096 uninit_id = id_to_int(uninit->get_tid());
1097 list->push_front(uninit);
1098 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1099 if (uninit_id >= (int)vec->size())
1100 vec->resize(uninit_id + 1);
1101 (*vec)[uninit_id].push_front(uninit);
1103 list->push_back(act);
1105 // Update action trace, a total order of all actions
1106 action_trace.push_back(act);
1108 action_trace.push_front(uninit);
1110 // Update obj_thrd_map, a per location, per thread, order of actions
1111 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1112 if (tid >= (int)vec->size())
1113 vec->resize(priv->next_thread_id);
1114 (*vec)[tid].push_back(act);
1116 (*vec)[uninit_id].push_front(uninit);
1118 // Update thrd_last_action, the last action taken by each thrad
1119 if ((int)thrd_last_action.size() <= tid)
1120 thrd_last_action.resize(get_num_threads());
1121 thrd_last_action[tid] = act;
1123 thrd_last_action[uninit_id] = uninit;
1125 // Update thrd_last_fence_release, the last release fence taken by each thread
1126 if (act->is_fence() && act->is_release()) {
1127 if ((int)thrd_last_fence_release.size() <= tid)
1128 thrd_last_fence_release.resize(get_num_threads());
1129 thrd_last_fence_release[tid] = act;
1132 if (act->is_wait()) {
1133 void *mutex_loc = (void *) act->get_value();
1134 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1136 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1137 if (tid >= (int)vec->size())
1138 vec->resize(priv->next_thread_id);
1139 (*vec)[tid].push_back(act);
1143 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1144 action_list_t::reverse_iterator rit = list->rbegin();
1145 modelclock_t next_seq = act->get_seq_number();
1146 if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
1147 list->push_back(act);
1149 for(;rit != list->rend();rit++) {
1150 if ((*rit)->get_seq_number() == next_seq) {
1151 action_list_t::iterator it = rit.base();
1152 list->insert(it, act);
1159 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1160 action_list_t::reverse_iterator rit = list->rbegin();
1161 modelclock_t next_seq = act->get_seq_number();
1162 if (rit == list->rend()) {
1163 act->create_cv(NULL);
1164 } else if ((*rit)->get_seq_number() == next_seq) {
1165 act->create_cv((*rit));
1166 list->push_back(act);
1168 for(;rit != list->rend();rit++) {
1169 if ((*rit)->get_seq_number() == next_seq) {
1170 act->create_cv((*rit));
1171 action_list_t::iterator it = rit.base();
1172 list->insert(it, act);
1180 * Performs various bookkeeping operations for a normal write. The
1181 * complication is that we are typically inserting a normal write
1182 * lazily, so we need to insert it into the middle of lists.
1184 * @param act is the ModelAction to add.
1187 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1189 int tid = id_to_int(act->get_tid());
1190 insertIntoActionListAndSetCV(&action_trace, act);
1192 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1193 insertIntoActionList(list, act);
1195 // Update obj_thrd_map, a per location, per thread, order of actions
1196 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1197 if (tid >= (int)vec->size())
1198 vec->resize(priv->next_thread_id);
1199 insertIntoActionList(&(*vec)[tid],act);
1201 // Update thrd_last_action, the last action taken by each thrad
1202 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1203 thrd_last_action[tid] = act;
1207 void ModelExecution::add_write_to_lists(ModelAction *write) {
1208 // Update seq_cst map
1209 if (write->is_seqcst())
1210 obj_last_sc_map.put(write->get_location(), write);
1212 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1213 int tid = id_to_int(write->get_tid());
1214 if (tid >= (int)vec->size())
1215 vec->resize(priv->next_thread_id);
1216 (*vec)[tid].push_back(write);
1220 * @brief Get the last action performed by a particular Thread
1221 * @param tid The thread ID of the Thread in question
1222 * @return The last action in the thread
1224 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1226 int threadid = id_to_int(tid);
1227 if (threadid < (int)thrd_last_action.size())
1228 return thrd_last_action[id_to_int(tid)];
1234 * @brief Get the last fence release performed by a particular Thread
1235 * @param tid The thread ID of the Thread in question
1236 * @return The last fence release in the thread, if one exists; NULL otherwise
1238 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1240 int threadid = id_to_int(tid);
1241 if (threadid < (int)thrd_last_fence_release.size())
1242 return thrd_last_fence_release[id_to_int(tid)];
1248 * Gets the last memory_order_seq_cst write (in the total global sequence)
1249 * performed on a particular object (i.e., memory location), not including the
1251 * @param curr The current ModelAction; also denotes the object location to
1253 * @return The last seq_cst write
1255 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1257 void *location = curr->get_location();
1258 return obj_last_sc_map.get(location);
1262 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1263 * performed in a particular thread, prior to a particular fence.
1264 * @param tid The ID of the thread to check
1265 * @param before_fence The fence from which to begin the search; if NULL, then
1266 * search for the most recent fence in the thread.
1267 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1269 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1271 /* All fences should have location FENCE_LOCATION */
1272 action_list_t *list = obj_map.get(FENCE_LOCATION);
1277 action_list_t::reverse_iterator rit = list->rbegin();
1280 for (;rit != list->rend();rit++)
1281 if (*rit == before_fence)
1284 ASSERT(*rit == before_fence);
1288 for (;rit != list->rend();rit++)
1289 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1295 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1296 * location). This function identifies the mutex according to the current
1297 * action, which is presumed to perform on the same mutex.
1298 * @param curr The current ModelAction; also denotes the object location to
1300 * @return The last unlock operation
1302 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1304 void *location = curr->get_location();
1306 action_list_t *list = obj_map.get(location);
1307 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1308 action_list_t::reverse_iterator rit;
1309 for (rit = list->rbegin();rit != list->rend();rit++)
1310 if ((*rit)->is_unlock() || (*rit)->is_wait())
1315 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1317 ModelAction *parent = get_last_action(tid);
1319 parent = get_thread(tid)->get_creation();
1324 * Returns the clock vector for a given thread.
1325 * @param tid The thread whose clock vector we want
1326 * @return Desired clock vector
1328 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1330 ModelAction *firstaction=get_parent_action(tid);
1331 return firstaction != NULL ? firstaction->get_cv() : NULL;
1334 bool valequals(uint64_t val1, uint64_t val2, int size) {
1337 return ((uint8_t)val1) == ((uint8_t)val2);
1339 return ((uint16_t)val1) == ((uint16_t)val2);
1341 return ((uint32_t)val1) == ((uint32_t)val2);
1351 * Build up an initial set of all past writes that this 'read' action may read
1352 * from, as well as any previously-observed future values that must still be valid.
1354 * @param curr is the current ModelAction that we are exploring; it must be a
1357 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1359 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1361 ASSERT(curr->is_read());
1363 ModelAction *last_sc_write = NULL;
1365 if (curr->is_seqcst())
1366 last_sc_write = get_last_seq_cst_write(curr);
1368 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1370 /* Iterate over all threads */
1371 for (i = 0;i < thrd_lists->size();i++) {
1372 /* Iterate over actions in thread, starting from most recent */
1373 action_list_t *list = &(*thrd_lists)[i];
1374 action_list_t::reverse_iterator rit;
1375 for (rit = list->rbegin();rit != list->rend();rit++) {
1376 ModelAction *act = *rit;
1381 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1382 bool allow_read = true;
1384 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1387 /* Need to check whether we will have two RMW reading from the same value */
1388 if (curr->is_rmwr()) {
1389 /* It is okay if we have a failing CAS */
1390 if (!curr->is_rmwrcas() ||
1391 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1392 //Need to make sure we aren't the second RMW
1393 CycleNode * node = mo_graph->getNode_noCreate(act);
1394 if (node != NULL && node->getRMW() != NULL) {
1395 //we are the second RMW
1402 /* Only add feasible reads */
1403 rf_set->push_back(act);
1406 /* Include at most one act per-thread that "happens before" curr */
1407 if (act->happens_before(curr))
1412 if (DBG_ENABLED()) {
1413 model_print("Reached read action:\n");
1415 model_print("End printing read_from_past\n");
1421 * @brief Get an action representing an uninitialized atomic
1423 * This function may create a new one.
1425 * @param curr The current action, which prompts the creation of an UNINIT action
1426 * @return A pointer to the UNINIT ModelAction
1428 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1430 ModelAction *act = curr->get_uninit_action();
1432 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1433 curr->set_uninit_action(act);
1435 act->create_cv(NULL);
1439 static void print_list(const action_list_t *list)
1441 action_list_t::const_iterator it;
1443 model_print("------------------------------------------------------------------------------------\n");
1444 model_print("# t Action type MO Location Value Rf CV\n");
1445 model_print("------------------------------------------------------------------------------------\n");
1447 unsigned int hash = 0;
1449 for (it = list->begin();it != list->end();it++) {
1450 const ModelAction *act = *it;
1451 if (act->get_seq_number() > 0)
1453 hash = hash^(hash<<3)^((*it)->hash());
1455 model_print("HASH %u\n", hash);
1456 model_print("------------------------------------------------------------------------------------\n");
1459 #if SUPPORT_MOD_ORDER_DUMP
1460 void ModelExecution::dumpGraph(char *filename) const
1463 sprintf(buffer, "%s.dot", filename);
1464 FILE *file = fopen(buffer, "w");
1465 fprintf(file, "digraph %s {\n", filename);
1466 mo_graph->dumpNodes(file);
1467 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1469 for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1470 ModelAction *act = *it;
1471 if (act->is_read()) {
1472 mo_graph->dot_print_node(file, act);
1473 mo_graph->dot_print_edge(file,
1474 act->get_reads_from(),
1476 "label=\"rf\", color=red, weight=2");
1478 if (thread_array[act->get_tid()]) {
1479 mo_graph->dot_print_edge(file,
1480 thread_array[id_to_int(act->get_tid())],
1482 "label=\"sb\", color=blue, weight=400");
1485 thread_array[act->get_tid()] = act;
1487 fprintf(file, "}\n");
1488 model_free(thread_array);
1493 /** @brief Prints an execution trace summary. */
1494 void ModelExecution::print_summary() const
1496 #if SUPPORT_MOD_ORDER_DUMP
1497 char buffername[100];
1498 sprintf(buffername, "exec%04u", get_execution_number());
1499 mo_graph->dumpGraphToFile(buffername);
1500 sprintf(buffername, "graph%04u", get_execution_number());
1501 dumpGraph(buffername);
1504 model_print("Execution trace %d:", get_execution_number());
1505 if (isfeasibleprefix()) {
1506 if (scheduler->all_threads_sleeping())
1507 model_print(" SLEEP-SET REDUNDANT");
1508 if (have_bug_reports())
1509 model_print(" DETECTED BUG(S)");
1511 print_infeasibility(" INFEASIBLE");
1514 print_list(&action_trace);
1520 * Add a Thread to the system for the first time. Should only be called once
1522 * @param t The Thread to add
1524 void ModelExecution::add_thread(Thread *t)
1526 unsigned int i = id_to_int(t->get_id());
1527 if (i >= thread_map.size())
1528 thread_map.resize(i + 1);
1530 if (!t->is_model_thread())
1531 scheduler->add_thread(t);
1535 * @brief Get a Thread reference by its ID
1536 * @param tid The Thread's ID
1537 * @return A Thread reference
1539 Thread * ModelExecution::get_thread(thread_id_t tid) const
1541 unsigned int i = id_to_int(tid);
1542 if (i < thread_map.size())
1543 return thread_map[i];
1548 * @brief Get a reference to the Thread in which a ModelAction was executed
1549 * @param act The ModelAction
1550 * @return A Thread reference
1552 Thread * ModelExecution::get_thread(const ModelAction *act) const
1554 return get_thread(act->get_tid());
1558 * @brief Get a Thread reference by its pthread ID
1559 * @param index The pthread's ID
1560 * @return A Thread reference
1562 Thread * ModelExecution::get_pthread(pthread_t pid) {
1568 uint32_t thread_id = x.v;
1569 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1574 * @brief Check if a Thread is currently enabled
1575 * @param t The Thread to check
1576 * @return True if the Thread is currently enabled
1578 bool ModelExecution::is_enabled(Thread *t) const
1580 return scheduler->is_enabled(t);
1584 * @brief Check if a Thread is currently enabled
1585 * @param tid The ID of the Thread to check
1586 * @return True if the Thread is currently enabled
1588 bool ModelExecution::is_enabled(thread_id_t tid) const
1590 return scheduler->is_enabled(tid);
1594 * @brief Select the next thread to execute based on the curren action
1596 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1597 * actions should be followed by the execution of their child thread. In either
1598 * case, the current action should determine the next thread schedule.
1600 * @param curr The current action
1601 * @return The next thread to run, if the current action will determine this
1602 * selection; otherwise NULL
1604 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1606 /* Do not split atomic RMW */
1607 if (curr->is_rmwr())
1608 return get_thread(curr);
1609 /* Follow CREATE with the created thread */
1610 /* which is not needed, because model.cc takes care of this */
1611 if (curr->get_type() == THREAD_CREATE)
1612 return curr->get_thread_operand();
1613 if (curr->get_type() == PTHREAD_CREATE) {
1614 return curr->get_thread_operand();
1620 * Takes the next step in the execution, if possible.
1621 * @param curr The current step to take
1622 * @return Returns the next Thread to run, if any; NULL if this execution
1625 Thread * ModelExecution::take_step(ModelAction *curr)
1627 Thread *curr_thrd = get_thread(curr);
1628 ASSERT(curr_thrd->get_state() == THREAD_READY);
1630 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1631 curr = check_current_action(curr);
1634 /* Process this action in ModelHistory for records*/
1635 model->get_history()->process_action( curr, curr_thrd->get_id() );
1637 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1638 scheduler->remove_thread(curr_thrd);
1640 return action_select_next_thread(curr);
1643 Fuzzer * ModelExecution::getFuzzer() {