11 #include "clockvector.h"
12 #include "cyclegraph.h"
14 #include "threads-model.h"
15 #include "bugmessage.h"
19 #define INITIAL_THREAD_ID 0
22 * Structure for holding small ModelChecker members that should be snapshotted
24 struct model_snapshot_members {
25 model_snapshot_members() :
26 /* First thread created will have id INITIAL_THREAD_ID */
27 next_thread_id(INITIAL_THREAD_ID),
28 used_sequence_numbers(0),
30 bad_synchronization(false),
34 ~model_snapshot_members() {
35 for (unsigned int i = 0;i < bugs.size();i++)
40 unsigned int next_thread_id;
41 modelclock_t used_sequence_numbers;
42 SnapVector<bug_message *> bugs;
43 /** @brief Incorrectly-ordered synchronization was made */
44 bool bad_synchronization;
50 /** @brief Constructor */
51 ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
56 thread_map(2), /* We'll always need at least 2 threads */
60 condvar_waiters_map(),
64 thrd_last_fence_release(),
65 priv(new struct model_snapshot_members ()),
66 mo_graph(new CycleGraph()),
69 thrd_func_inst_lists(),
72 /* Initialize a model-checker thread, for special ModelActions */
73 model_thread = new Thread(get_next_id());
74 add_thread(model_thread);
75 scheduler->register_engine(this);
78 /** @brief Destructor */
79 ModelExecution::~ModelExecution()
81 for (unsigned int i = 0;i < get_num_threads();i++)
82 delete get_thread(int_to_id(i));
88 int ModelExecution::get_execution_number() const
90 return model->get_execution_number();
93 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
95 action_list_t *tmp = hash->get(ptr);
97 tmp = new action_list_t();
103 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<const void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
105 SnapVector<action_list_t> *tmp = hash->get(ptr);
107 tmp = new SnapVector<action_list_t>();
113 /** @return a thread ID for a new Thread */
114 thread_id_t ModelExecution::get_next_id()
116 return priv->next_thread_id++;
119 /** @return the number of user threads created during this execution */
120 unsigned int ModelExecution::get_num_threads() const
122 return priv->next_thread_id;
125 /** @return a sequence number for a new ModelAction */
126 modelclock_t ModelExecution::get_next_seq_num()
128 return ++priv->used_sequence_numbers;
132 * @brief Should the current action wake up a given thread?
134 * @param curr The current action
135 * @param thread The thread that we might wake up
136 * @return True, if we should wake up the sleeping thread; false otherwise
138 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
140 const ModelAction *asleep = thread->get_pending();
141 /* Don't allow partial RMW to wake anyone up */
144 /* Synchronizing actions may have been backtracked */
145 if (asleep->could_synchronize_with(curr))
147 /* All acquire/release fences and fence-acquire/store-release */
148 if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
150 /* Fence-release + store can awake load-acquire on the same location */
151 if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
152 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
153 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
159 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
161 for (unsigned int i = 0;i < get_num_threads();i++) {
162 Thread *thr = get_thread(int_to_id(i));
163 if (scheduler->is_sleep_set(thr)) {
164 if (should_wake_up(curr, thr))
165 /* Remove this thread from sleep set */
166 scheduler->remove_sleep(thr);
171 /** @brief Alert the model-checker that an incorrectly-ordered
172 * synchronization was made */
173 void ModelExecution::set_bad_synchronization()
175 priv->bad_synchronization = true;
178 bool ModelExecution::assert_bug(const char *msg)
180 priv->bugs.push_back(new bug_message(msg));
182 if (isfeasibleprefix()) {
189 /** @return True, if any bugs have been reported for this execution */
190 bool ModelExecution::have_bug_reports() const
192 return priv->bugs.size() != 0;
195 /** @return True, if any fatal bugs have been reported for this execution.
196 * Any bug other than a data race is considered a fatal bug. Data races
197 * are not considered fatal unless the number of races is exceeds
198 * a threshold (temporarily set as 15).
200 bool ModelExecution::have_fatal_bug_reports() const
202 return priv->bugs.size() != 0;
205 SnapVector<bug_message *> * ModelExecution::get_bugs() const
211 * Check whether the current trace has triggered an assertion which should halt
214 * @return True, if the execution should be aborted; false otherwise
216 bool ModelExecution::has_asserted() const
218 return priv->asserted;
222 * Trigger a trace assertion which should cause this execution to be halted.
223 * This can be due to a detected bug or due to an infeasibility that should
226 void ModelExecution::set_assert()
228 priv->asserted = true;
232 * Check if we are in a deadlock. Should only be called at the end of an
233 * execution, although it should not give false positives in the middle of an
234 * execution (there should be some ENABLED thread).
236 * @return True if program is in a deadlock; false otherwise
238 bool ModelExecution::is_deadlocked() const
240 bool blocking_threads = false;
241 for (unsigned int i = 0;i < get_num_threads();i++) {
242 thread_id_t tid = int_to_id(i);
245 Thread *t = get_thread(tid);
246 if (!t->is_model_thread() && t->get_pending())
247 blocking_threads = true;
249 return blocking_threads;
253 * Check if this is a complete execution. That is, have all thread completed
254 * execution (rather than exiting because sleep sets have forced a redundant
257 * @return True if the execution is complete.
259 bool ModelExecution::is_complete_execution() const
261 for (unsigned int i = 0;i < get_num_threads();i++)
262 if (is_enabled(int_to_id(i)))
267 ModelAction * ModelExecution::convertNonAtomicStore(void * location) {
268 uint64_t value = *((const uint64_t *) location);
269 modelclock_t storeclock;
270 thread_id_t storethread;
271 getStoreThreadAndClock(location, &storethread, &storeclock);
272 setAtomicStoreFlag(location);
273 ModelAction * act = new ModelAction(NONATOMIC_WRITE, memory_order_relaxed, location, value, get_thread(storethread));
274 act->set_seq_number(storeclock);
275 add_normal_write_to_lists(act);
276 add_write_to_lists(act);
277 w_modification_order(act);
282 * Processes a read model action.
283 * @param curr is the read model action to process.
284 * @param rf_set is the set of model actions we can possibly read from
285 * @return True if processing this read updates the mo_graph.
287 void ModelExecution::process_read(ModelAction *curr, SnapVector<ModelAction *> * rf_set)
289 SnapVector<const ModelAction *> * priorset = new SnapVector<const ModelAction *>();
290 bool hasnonatomicstore = hasNonAtomicStore(curr->get_location());
291 if (hasnonatomicstore) {
292 ModelAction * nonatomicstore = convertNonAtomicStore(curr->get_location());
293 rf_set->push_back(nonatomicstore);
297 int index = fuzzer->selectWrite(curr, rf_set);
298 ModelAction *rf = (*rf_set)[index];
302 bool canprune = false;
303 if (r_modification_order(curr, rf, priorset, &canprune)) {
304 for(unsigned int i=0;i<priorset->size();i++) {
305 mo_graph->addEdge((*priorset)[i], rf);
308 get_thread(curr)->set_return_value(curr->get_return_value());
310 if (canprune && curr->get_type() == ATOMIC_READ) {
311 int tid = id_to_int(curr->get_tid());
312 (*obj_thrd_map.get(curr->get_location()))[tid].pop_back();
317 (*rf_set)[index] = rf_set->back();
323 * Processes a lock, trylock, or unlock model action. @param curr is
324 * the read model action to process.
326 * The try lock operation checks whether the lock is taken. If not,
327 * it falls to the normal lock operation case. If so, it returns
330 * The lock operation has already been checked that it is enabled, so
331 * it just grabs the lock and synchronizes with the previous unlock.
333 * The unlock operation has to re-enable all of the threads that are
334 * waiting on the lock.
336 * @return True if synchronization was updated; false otherwise
338 bool ModelExecution::process_mutex(ModelAction *curr)
340 cdsc::mutex *mutex = curr->get_mutex();
341 struct cdsc::mutex_state *state = NULL;
344 state = mutex->get_state();
346 switch (curr->get_type()) {
347 case ATOMIC_TRYLOCK: {
348 bool success = !state->locked;
349 curr->set_try_lock(success);
351 get_thread(curr)->set_return_value(0);
354 get_thread(curr)->set_return_value(1);
356 //otherwise fall into the lock case
358 //TODO: FIND SOME BETTER WAY TO CHECK LOCK INITIALIZED OR NOT
359 //if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
360 // assert_bug("Lock access before initialization");
361 state->locked = get_thread(curr);
362 ModelAction *unlock = get_last_unlock(curr);
363 //synchronize with the previous unlock statement
364 if (unlock != NULL) {
365 synchronize(unlock, curr);
371 case ATOMIC_UNLOCK: {
372 //TODO: FIX WAIT SITUATION...WAITS CAN SPURIOUSLY FAIL...TIMED WAITS SHOULD PROBABLY JUST BE THE SAME AS NORMAL WAITS...THINK ABOUT PROBABILITIES THOUGH....AS IN TIMED WAIT MUST FAIL TO GUARANTEE PROGRESS...NORMAL WAIT MAY FAIL...SO NEED NORMAL WAIT TO WORK CORRECTLY IN THE CASE IT SPURIOUSLY FAILS AND IN THE CASE IT DOESN'T... TIMED WAITS MUST EVENMTUALLY RELEASE...
374 /* wake up the other threads */
375 for (unsigned int i = 0;i < get_num_threads();i++) {
376 Thread *t = get_thread(int_to_id(i));
377 Thread *curr_thrd = get_thread(curr);
378 if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
382 /* unlock the lock - after checking who was waiting on it */
383 state->locked = NULL;
385 if (!curr->is_wait())
386 break;/* The rest is only for ATOMIC_WAIT */
390 case ATOMIC_NOTIFY_ALL: {
391 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
392 //activate all the waiting threads
393 for (action_list_t::iterator rit = waiters->begin();rit != waiters->end();rit++) {
394 scheduler->wake(get_thread(*rit));
399 case ATOMIC_NOTIFY_ONE: {
400 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
401 if (waiters->size() != 0) {
402 Thread * thread = fuzzer->selectNotify(waiters);
403 scheduler->wake(thread);
415 * Process a write ModelAction
416 * @param curr The ModelAction to process
417 * @return True if the mo_graph was updated or promises were resolved
419 void ModelExecution::process_write(ModelAction *curr)
421 w_modification_order(curr);
422 get_thread(curr)->set_return_value(VALUE_NONE);
426 * Process a fence ModelAction
427 * @param curr The ModelAction to process
428 * @return True if synchronization was updated
430 bool ModelExecution::process_fence(ModelAction *curr)
433 * fence-relaxed: no-op
434 * fence-release: only log the occurence (not in this function), for
435 * use in later synchronization
436 * fence-acquire (this function): search for hypothetical release
438 * fence-seq-cst: MO constraints formed in {r,w}_modification_order
440 bool updated = false;
441 if (curr->is_acquire()) {
442 action_list_t *list = &action_trace;
443 action_list_t::reverse_iterator rit;
444 /* Find X : is_read(X) && X --sb-> curr */
445 for (rit = list->rbegin();rit != list->rend();rit++) {
446 ModelAction *act = *rit;
449 if (act->get_tid() != curr->get_tid())
451 /* Stop at the beginning of the thread */
452 if (act->is_thread_start())
454 /* Stop once we reach a prior fence-acquire */
455 if (act->is_fence() && act->is_acquire())
459 /* read-acquire will find its own release sequences */
460 if (act->is_acquire())
463 /* Establish hypothetical release sequences */
464 ClockVector *cv = get_hb_from_write(act->get_reads_from());
465 if (cv != NULL && curr->get_cv()->merge(cv))
473 * @brief Process the current action for thread-related activity
475 * Performs current-action processing for a THREAD_* ModelAction. Proccesses
476 * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
477 * synchronization, etc. This function is a no-op for non-THREAD actions
478 * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
480 * @param curr The current action
481 * @return True if synchronization was updated or a thread completed
483 void ModelExecution::process_thread_action(ModelAction *curr)
485 switch (curr->get_type()) {
486 case THREAD_CREATE: {
487 thrd_t *thrd = (thrd_t *)curr->get_location();
488 struct thread_params *params = (struct thread_params *)curr->get_value();
489 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
490 curr->set_thread_operand(th);
492 th->set_creation(curr);
495 case PTHREAD_CREATE: {
496 (*(uint32_t *)curr->get_location()) = pthread_counter++;
498 struct pthread_params *params = (struct pthread_params *)curr->get_value();
499 Thread *th = new Thread(get_next_id(), NULL, params->func, params->arg, get_thread(curr));
500 curr->set_thread_operand(th);
502 th->set_creation(curr);
504 if ( pthread_map.size() < pthread_counter )
505 pthread_map.resize( pthread_counter );
506 pthread_map[ pthread_counter-1 ] = th;
511 Thread *blocking = curr->get_thread_operand();
512 ModelAction *act = get_last_action(blocking->get_id());
513 synchronize(act, curr);
517 Thread *blocking = curr->get_thread_operand();
518 ModelAction *act = get_last_action(blocking->get_id());
519 synchronize(act, curr);
520 break; // WL: to be add (modified)
523 case THREADONLY_FINISH:
524 case THREAD_FINISH: {
525 Thread *th = get_thread(curr);
526 if (curr->get_type() == THREAD_FINISH &&
527 th == model->getInitThread()) {
533 /* Wake up any joining threads */
534 for (unsigned int i = 0;i < get_num_threads();i++) {
535 Thread *waiting = get_thread(int_to_id(i));
536 if (waiting->waiting_on() == th &&
537 waiting->get_pending()->is_thread_join())
538 scheduler->wake(waiting);
552 * Initialize the current action by performing one or more of the following
553 * actions, as appropriate: merging RMWR and RMWC/RMW actions,
554 * manipulating backtracking sets, allocating and
555 * initializing clock vectors, and computing the promises to fulfill.
557 * @param curr The current action, as passed from the user context; may be
558 * freed/invalidated after the execution of this function, with a different
559 * action "returned" its place (pass-by-reference)
560 * @return True if curr is a newly-explored action; false otherwise
562 bool ModelExecution::initialize_curr_action(ModelAction **curr)
564 if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
565 ModelAction *newcurr = process_rmw(*curr);
571 ModelAction *newcurr = *curr;
573 newcurr->set_seq_number(get_next_seq_num());
574 /* Always compute new clock vector */
575 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
577 /* Assign most recent release fence */
578 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
580 return true; /* This was a new ModelAction */
585 * @brief Establish reads-from relation between two actions
587 * Perform basic operations involved with establishing a concrete rf relation,
588 * including setting the ModelAction data and checking for release sequences.
590 * @param act The action that is reading (must be a read)
591 * @param rf The action from which we are reading (must be a write)
593 * @return True if this read established synchronization
596 void ModelExecution::read_from(ModelAction *act, ModelAction *rf)
599 ASSERT(rf->is_write());
601 act->set_read_from(rf);
602 if (act->is_acquire()) {
603 ClockVector *cv = get_hb_from_write(rf);
606 act->get_cv()->merge(cv);
611 * @brief Synchronizes two actions
613 * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
614 * This function performs the synchronization as well as providing other hooks
615 * for other checks along with synchronization.
617 * @param first The left-hand side of the synchronizes-with relation
618 * @param second The right-hand side of the synchronizes-with relation
619 * @return True if the synchronization was successful (i.e., was consistent
620 * with the execution order); false otherwise
622 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
624 if (*second < *first) {
625 set_bad_synchronization();
628 return second->synchronize_with(first);
632 * @brief Check whether a model action is enabled.
634 * Checks whether an operation would be successful (i.e., is a lock already
635 * locked, or is the joined thread already complete).
637 * For yield-blocking, yields are never enabled.
639 * @param curr is the ModelAction to check whether it is enabled.
640 * @return a bool that indicates whether the action is enabled.
642 bool ModelExecution::check_action_enabled(ModelAction *curr) {
643 if (curr->is_lock()) {
644 cdsc::mutex *lock = curr->get_mutex();
645 struct cdsc::mutex_state *state = lock->get_state();
648 } else if (curr->is_thread_join()) {
649 Thread *blocking = curr->get_thread_operand();
650 if (!blocking->is_complete()) {
659 * This is the heart of the model checker routine. It performs model-checking
660 * actions corresponding to a given "current action." Among other processes, it
661 * calculates reads-from relationships, updates synchronization clock vectors,
662 * forms a memory_order constraints graph, and handles replay/backtrack
663 * execution when running permutations of previously-observed executions.
665 * @param curr The current action to process
666 * @return The ModelAction that is actually executed; may be different than
669 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
672 bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
673 bool newly_explored = initialize_curr_action(&curr);
677 wake_up_sleeping_actions(curr);
679 /* Add the action to lists before any other model-checking tasks */
680 if (!second_part_of_rmw && curr->get_type() != NOOP)
681 add_action_to_lists(curr);
683 if (curr->is_write())
684 add_write_to_lists(curr);
686 SnapVector<ModelAction *> * rf_set = NULL;
687 /* Build may_read_from set for newly-created actions */
688 if (newly_explored && curr->is_read())
689 rf_set = build_may_read_from(curr);
691 process_thread_action(curr);
693 if (curr->is_read() && !second_part_of_rmw) {
694 process_read(curr, rf_set);
697 ASSERT(rf_set == NULL);
700 if (curr->is_write())
703 if (curr->is_fence())
706 if (curr->is_mutex_op())
713 * This is the strongest feasibility check available.
714 * @return whether the current trace (partial or complete) must be a prefix of
717 bool ModelExecution::isfeasibleprefix() const
719 return !is_infeasible();
723 * Print disagnostic information about an infeasible execution
724 * @param prefix A string to prefix the output with; if NULL, then a default
725 * message prefix will be provided
727 void ModelExecution::print_infeasibility(const char *prefix) const
731 if (priv->bad_synchronization)
732 ptr += sprintf(ptr, "[bad sw ordering]");
734 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
738 * Check if the current partial trace is infeasible. Does not check any
739 * end-of-execution flags, which might rule out the execution. Thus, this is
740 * useful only for ruling an execution as infeasible.
741 * @return whether the current partial trace is infeasible.
743 bool ModelExecution::is_infeasible() const
745 return priv->bad_synchronization;
748 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
749 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
750 ModelAction *lastread = get_last_action(act->get_tid());
751 lastread->process_rmw(act);
753 mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
759 * @brief Updates the mo_graph with the constraints imposed from the current
762 * Basic idea is the following: Go through each other thread and find
763 * the last action that happened before our read. Two cases:
765 * -# The action is a write: that write must either occur before
766 * the write we read from or be the write we read from.
767 * -# The action is a read: the write that that action read from
768 * must occur before the write we read from or be the same write.
770 * @param curr The current action. Must be a read.
771 * @param rf The ModelAction or Promise that curr reads from. Must be a write.
772 * @return True if modification order edges were added; false otherwise
775 bool ModelExecution::r_modification_order(ModelAction *curr, const ModelAction *rf, SnapVector<const ModelAction *> * priorset, bool * canprune)
777 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
779 ASSERT(curr->is_read());
781 /* Last SC fence in the current thread */
782 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
784 int tid = curr->get_tid();
785 ModelAction *prev_same_thread = NULL;
786 /* Iterate over all threads */
787 for (i = 0;i < thrd_lists->size();i++, tid = (((unsigned int)(tid+1)) == thrd_lists->size()) ? 0 : tid + 1) {
788 /* Last SC fence in thread tid */
789 ModelAction *last_sc_fence_thread_local = NULL;
791 last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(tid), NULL);
793 /* Last SC fence in thread tid, before last SC fence in current thread */
794 ModelAction *last_sc_fence_thread_before = NULL;
795 if (last_sc_fence_local)
796 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(tid), last_sc_fence_local);
798 //Only need to iterate if either hb has changed for thread in question or SC fence after last operation...
799 if (prev_same_thread != NULL &&
800 (prev_same_thread->get_cv()->getClock(tid) == curr->get_cv()->getClock(tid)) &&
801 (last_sc_fence_thread_local == NULL || *last_sc_fence_thread_local < *prev_same_thread)) {
805 /* Iterate over actions in thread, starting from most recent */
806 action_list_t *list = &(*thrd_lists)[tid];
807 action_list_t::reverse_iterator rit;
808 for (rit = list->rbegin();rit != list->rend();rit++) {
809 ModelAction *act = *rit;
814 /* Don't want to add reflexive edges on 'rf' */
815 if (act->equals(rf)) {
816 if (act->happens_before(curr))
822 if (act->is_write()) {
823 /* C++, Section 29.3 statement 5 */
824 if (curr->is_seqcst() && last_sc_fence_thread_local &&
825 *act < *last_sc_fence_thread_local) {
826 if (mo_graph->checkReachable(rf, act))
828 priorset->push_back(act);
831 /* C++, Section 29.3 statement 4 */
832 else if (act->is_seqcst() && last_sc_fence_local &&
833 *act < *last_sc_fence_local) {
834 if (mo_graph->checkReachable(rf, act))
836 priorset->push_back(act);
839 /* C++, Section 29.3 statement 6 */
840 else if (last_sc_fence_thread_before &&
841 *act < *last_sc_fence_thread_before) {
842 if (mo_graph->checkReachable(rf, act))
844 priorset->push_back(act);
850 * Include at most one act per-thread that "happens
853 if (act->happens_before(curr)) {
855 if (last_sc_fence_local == NULL ||
856 (*last_sc_fence_local < *act)) {
857 prev_same_thread = act;
860 if (act->is_write()) {
861 if (mo_graph->checkReachable(rf, act))
863 priorset->push_back(act);
865 const ModelAction *prevrf = act->get_reads_from();
866 if (!prevrf->equals(rf)) {
867 if (mo_graph->checkReachable(rf, prevrf))
869 priorset->push_back(prevrf);
871 if (act->get_tid() == curr->get_tid()) {
872 //Can prune curr from obj list
885 * Updates the mo_graph with the constraints imposed from the current write.
887 * Basic idea is the following: Go through each other thread and find
888 * the lastest action that happened before our write. Two cases:
890 * (1) The action is a write => that write must occur before
893 * (2) The action is a read => the write that that action read from
894 * must occur before the current write.
896 * This method also handles two other issues:
898 * (I) Sequential Consistency: Making sure that if the current write is
899 * seq_cst, that it occurs after the previous seq_cst write.
901 * (II) Sending the write back to non-synchronizing reads.
903 * @param curr The current action. Must be a write.
904 * @param send_fv A vector for stashing reads to which we may pass our future
905 * value. If NULL, then don't record any future values.
906 * @return True if modification order edges were added; false otherwise
908 void ModelExecution::w_modification_order(ModelAction *curr)
910 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
912 ASSERT(curr->is_write());
914 SnapList<ModelAction *> edgeset;
916 if (curr->is_seqcst()) {
917 /* We have to at least see the last sequentially consistent write,
918 so we are initialized. */
919 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
920 if (last_seq_cst != NULL) {
921 edgeset.push_back(last_seq_cst);
923 //update map for next query
924 obj_last_sc_map.put(curr->get_location(), curr);
927 /* Last SC fence in the current thread */
928 ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
930 /* Iterate over all threads */
931 for (i = 0;i < thrd_lists->size();i++) {
932 /* Last SC fence in thread i, before last SC fence in current thread */
933 ModelAction *last_sc_fence_thread_before = NULL;
934 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
935 last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
937 /* Iterate over actions in thread, starting from most recent */
938 action_list_t *list = &(*thrd_lists)[i];
939 action_list_t::reverse_iterator rit;
940 for (rit = list->rbegin();rit != list->rend();rit++) {
941 ModelAction *act = *rit;
944 * 1) If RMW and it actually read from something, then we
945 * already have all relevant edges, so just skip to next
948 * 2) If RMW and it didn't read from anything, we should
949 * whatever edge we can get to speed up convergence.
951 * 3) If normal write, we need to look at earlier actions, so
952 * continue processing list.
954 if (curr->is_rmw()) {
955 if (curr->get_reads_from() != NULL)
963 /* C++, Section 29.3 statement 7 */
964 if (last_sc_fence_thread_before && act->is_write() &&
965 *act < *last_sc_fence_thread_before) {
966 edgeset.push_back(act);
971 * Include at most one act per-thread that "happens
974 if (act->happens_before(curr)) {
976 * Note: if act is RMW, just add edge:
978 * The following edge should be handled elsewhere:
979 * readfrom(act) --mo--> act
982 edgeset.push_back(act);
983 else if (act->is_read()) {
984 //if previous read accessed a null, just keep going
985 edgeset.push_back(act->get_reads_from());
991 mo_graph->addEdges(&edgeset, curr);
996 * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
997 * some constraints. This method checks one the following constraint (others
998 * require compiler support):
1000 * If X --hb-> Y --mo-> Z, then X should not read from Z.
1001 * If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1003 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1005 SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1007 /* Iterate over all threads */
1008 for (i = 0;i < thrd_lists->size();i++) {
1009 const ModelAction *write_after_read = NULL;
1011 /* Iterate over actions in thread, starting from most recent */
1012 action_list_t *list = &(*thrd_lists)[i];
1013 action_list_t::reverse_iterator rit;
1014 for (rit = list->rbegin();rit != list->rend();rit++) {
1015 ModelAction *act = *rit;
1017 /* Don't disallow due to act == reader */
1018 if (!reader->happens_before(act) || reader == act)
1020 else if (act->is_write())
1021 write_after_read = act;
1022 else if (act->is_read() && act->get_reads_from() != NULL)
1023 write_after_read = act->get_reads_from();
1026 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1033 * Computes the clock vector that happens before propagates from this write.
1035 * @param rf The action that might be part of a release sequence. Must be a
1037 * @return ClockVector of happens before relation.
1040 ClockVector * ModelExecution::get_hb_from_write(ModelAction *rf) const {
1041 SnapVector<ModelAction *> * processset = NULL;
1042 for ( ;rf != NULL;rf = rf->get_reads_from()) {
1043 ASSERT(rf->is_write());
1044 if (!rf->is_rmw() || (rf->is_acquire() && rf->is_release()) || rf->get_rfcv() != NULL)
1046 if (processset == NULL)
1047 processset = new SnapVector<ModelAction *>();
1048 processset->push_back(rf);
1051 int i = (processset == NULL) ? 0 : processset->size();
1053 ClockVector * vec = NULL;
1055 if (rf->get_rfcv() != NULL) {
1056 vec = rf->get_rfcv();
1057 } else if (rf->is_acquire() && rf->is_release()) {
1059 } else if (rf->is_release() && !rf->is_rmw()) {
1061 } else if (rf->is_release()) {
1062 //have rmw that is release and doesn't have a rfcv
1063 (vec = new ClockVector(vec, NULL))->merge(rf->get_cv());
1066 //operation that isn't release
1067 if (rf->get_last_fence_release()) {
1069 vec = rf->get_last_fence_release()->get_cv();
1071 (vec=new ClockVector(vec, NULL))->merge(rf->get_last_fence_release()->get_cv());
1077 rf = (*processset)[i];
1081 if (processset != NULL)
1087 * Performs various bookkeeping operations for the current ModelAction. For
1088 * instance, adds action to the per-object, per-thread action vector and to the
1089 * action trace list of all thread actions.
1091 * @param act is the ModelAction to add.
1093 void ModelExecution::add_action_to_lists(ModelAction *act)
1095 int tid = id_to_int(act->get_tid());
1096 ModelAction *uninit = NULL;
1098 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1099 if (list->empty() && act->is_atomic_var()) {
1100 uninit = get_uninitialized_action(act);
1101 uninit_id = id_to_int(uninit->get_tid());
1102 list->push_front(uninit);
1103 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, act->get_location());
1104 if (uninit_id >= (int)vec->size()) {
1105 int oldsize = (int) vec->size();
1106 vec->resize(uninit_id + 1);
1107 for(int i=oldsize; i<uninit_id+1; i++)
1108 new (&vec[i]) action_list_t();
1110 (*vec)[uninit_id].push_front(uninit);
1112 list->push_back(act);
1114 // Update action trace, a total order of all actions
1115 action_trace.push_back(act);
1117 action_trace.push_front(uninit);
1119 // Update obj_thrd_map, a per location, per thread, order of actions
1120 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1121 if (tid >= (int)vec->size()) {
1122 uint oldsize =vec->size();
1123 vec->resize(priv->next_thread_id);
1124 for(uint i=oldsize; i<priv->next_thread_id; i++)
1125 new (&vec[i]) action_list_t();
1127 (*vec)[tid].push_back(act);
1129 (*vec)[uninit_id].push_front(uninit);
1131 // Update thrd_last_action, the last action taken by each thrad
1132 if ((int)thrd_last_action.size() <= tid)
1133 thrd_last_action.resize(get_num_threads());
1134 thrd_last_action[tid] = act;
1136 thrd_last_action[uninit_id] = uninit;
1138 // Update thrd_last_fence_release, the last release fence taken by each thread
1139 if (act->is_fence() && act->is_release()) {
1140 if ((int)thrd_last_fence_release.size() <= tid)
1141 thrd_last_fence_release.resize(get_num_threads());
1142 thrd_last_fence_release[tid] = act;
1145 if (act->is_wait()) {
1146 void *mutex_loc = (void *) act->get_value();
1147 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
1149 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
1150 if (tid >= (int)vec->size()) {
1151 uint oldsize = vec->size();
1152 vec->resize(priv->next_thread_id);
1153 for(uint i=oldsize; i<priv->next_thread_id; i++)
1154 new (&vec[i]) action_list_t();
1156 (*vec)[tid].push_back(act);
1160 void insertIntoActionList(action_list_t *list, ModelAction *act) {
1161 action_list_t::reverse_iterator rit = list->rbegin();
1162 modelclock_t next_seq = act->get_seq_number();
1163 if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
1164 list->push_back(act);
1166 for(;rit != list->rend();rit++) {
1167 if ((*rit)->get_seq_number() == next_seq) {
1168 action_list_t::iterator it = rit.base();
1169 list->insert(it, act);
1176 void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
1177 action_list_t::reverse_iterator rit = list->rbegin();
1178 modelclock_t next_seq = act->get_seq_number();
1179 if (rit == list->rend()) {
1180 act->create_cv(NULL);
1181 } else if ((*rit)->get_seq_number() == next_seq) {
1182 act->create_cv((*rit));
1183 list->push_back(act);
1185 for(;rit != list->rend();rit++) {
1186 if ((*rit)->get_seq_number() == next_seq) {
1187 act->create_cv((*rit));
1188 action_list_t::iterator it = rit.base();
1189 list->insert(it, act);
1197 * Performs various bookkeeping operations for a normal write. The
1198 * complication is that we are typically inserting a normal write
1199 * lazily, so we need to insert it into the middle of lists.
1201 * @param act is the ModelAction to add.
1204 void ModelExecution::add_normal_write_to_lists(ModelAction *act)
1206 int tid = id_to_int(act->get_tid());
1207 insertIntoActionListAndSetCV(&action_trace, act);
1209 action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
1210 insertIntoActionList(list, act);
1212 // Update obj_thrd_map, a per location, per thread, order of actions
1213 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
1214 if (tid >= (int)vec->size()) {
1215 uint oldsize =vec->size();
1216 vec->resize(priv->next_thread_id);
1217 for(uint i=oldsize; i<priv->next_thread_id; i++)
1218 new (&vec[i]) action_list_t();
1220 insertIntoActionList(&(*vec)[tid],act);
1222 // Update thrd_last_action, the last action taken by each thrad
1223 if (thrd_last_action[tid]->get_seq_number() == act->get_seq_number())
1224 thrd_last_action[tid] = act;
1228 void ModelExecution::add_write_to_lists(ModelAction *write) {
1229 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_wr_thrd_map, write->get_location());
1230 int tid = id_to_int(write->get_tid());
1231 if (tid >= (int)vec->size()) {
1232 uint oldsize =vec->size();
1233 vec->resize(priv->next_thread_id);
1234 for(uint i=oldsize; i<priv->next_thread_id; i++)
1235 new (&vec[i]) action_list_t();
1237 (*vec)[tid].push_back(write);
1241 * @brief Get the last action performed by a particular Thread
1242 * @param tid The thread ID of the Thread in question
1243 * @return The last action in the thread
1245 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
1247 int threadid = id_to_int(tid);
1248 if (threadid < (int)thrd_last_action.size())
1249 return thrd_last_action[id_to_int(tid)];
1255 * @brief Get the last fence release performed by a particular Thread
1256 * @param tid The thread ID of the Thread in question
1257 * @return The last fence release in the thread, if one exists; NULL otherwise
1259 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
1261 int threadid = id_to_int(tid);
1262 if (threadid < (int)thrd_last_fence_release.size())
1263 return thrd_last_fence_release[id_to_int(tid)];
1269 * Gets the last memory_order_seq_cst write (in the total global sequence)
1270 * performed on a particular object (i.e., memory location), not including the
1272 * @param curr The current ModelAction; also denotes the object location to
1274 * @return The last seq_cst write
1276 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
1278 void *location = curr->get_location();
1279 return obj_last_sc_map.get(location);
1283 * Gets the last memory_order_seq_cst fence (in the total global sequence)
1284 * performed in a particular thread, prior to a particular fence.
1285 * @param tid The ID of the thread to check
1286 * @param before_fence The fence from which to begin the search; if NULL, then
1287 * search for the most recent fence in the thread.
1288 * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
1290 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
1292 /* All fences should have location FENCE_LOCATION */
1293 action_list_t *list = obj_map.get(FENCE_LOCATION);
1298 action_list_t::reverse_iterator rit = list->rbegin();
1301 for (;rit != list->rend();rit++)
1302 if (*rit == before_fence)
1305 ASSERT(*rit == before_fence);
1309 for (;rit != list->rend();rit++)
1310 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
1316 * Gets the last unlock operation performed on a particular mutex (i.e., memory
1317 * location). This function identifies the mutex according to the current
1318 * action, which is presumed to perform on the same mutex.
1319 * @param curr The current ModelAction; also denotes the object location to
1321 * @return The last unlock operation
1323 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
1325 void *location = curr->get_location();
1327 action_list_t *list = obj_map.get(location);
1328 /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
1329 action_list_t::reverse_iterator rit;
1330 for (rit = list->rbegin();rit != list->rend();rit++)
1331 if ((*rit)->is_unlock() || (*rit)->is_wait())
1336 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
1338 ModelAction *parent = get_last_action(tid);
1340 parent = get_thread(tid)->get_creation();
1345 * Returns the clock vector for a given thread.
1346 * @param tid The thread whose clock vector we want
1347 * @return Desired clock vector
1349 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
1351 ModelAction *firstaction=get_parent_action(tid);
1352 return firstaction != NULL ? firstaction->get_cv() : NULL;
1355 bool valequals(uint64_t val1, uint64_t val2, int size) {
1358 return ((uint8_t)val1) == ((uint8_t)val2);
1360 return ((uint16_t)val1) == ((uint16_t)val2);
1362 return ((uint32_t)val1) == ((uint32_t)val2);
1372 * Build up an initial set of all past writes that this 'read' action may read
1373 * from, as well as any previously-observed future values that must still be valid.
1375 * @param curr is the current ModelAction that we are exploring; it must be a
1378 SnapVector<ModelAction *> * ModelExecution::build_may_read_from(ModelAction *curr)
1380 SnapVector<action_list_t> *thrd_lists = obj_wr_thrd_map.get(curr->get_location());
1382 ASSERT(curr->is_read());
1384 ModelAction *last_sc_write = NULL;
1386 if (curr->is_seqcst())
1387 last_sc_write = get_last_seq_cst_write(curr);
1389 SnapVector<ModelAction *> * rf_set = new SnapVector<ModelAction *>();
1391 /* Iterate over all threads */
1392 for (i = 0;i < thrd_lists->size();i++) {
1393 /* Iterate over actions in thread, starting from most recent */
1394 action_list_t *list = &(*thrd_lists)[i];
1395 action_list_t::reverse_iterator rit;
1396 for (rit = list->rbegin();rit != list->rend();rit++) {
1397 ModelAction *act = *rit;
1402 /* Don't consider more than one seq_cst write if we are a seq_cst read. */
1403 bool allow_read = true;
1405 if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
1408 /* Need to check whether we will have two RMW reading from the same value */
1409 if (curr->is_rmwr()) {
1410 /* It is okay if we have a failing CAS */
1411 if (!curr->is_rmwrcas() ||
1412 valequals(curr->get_value(), act->get_value(), curr->getSize())) {
1413 //Need to make sure we aren't the second RMW
1414 CycleNode * node = mo_graph->getNode_noCreate(act);
1415 if (node != NULL && node->getRMW() != NULL) {
1416 //we are the second RMW
1423 /* Only add feasible reads */
1424 rf_set->push_back(act);
1427 /* Include at most one act per-thread that "happens before" curr */
1428 if (act->happens_before(curr))
1433 if (DBG_ENABLED()) {
1434 model_print("Reached read action:\n");
1436 model_print("End printing read_from_past\n");
1442 * @brief Get an action representing an uninitialized atomic
1444 * This function may create a new one.
1446 * @param curr The current action, which prompts the creation of an UNINIT action
1447 * @return A pointer to the UNINIT ModelAction
1449 ModelAction * ModelExecution::get_uninitialized_action(ModelAction *curr) const
1451 ModelAction *act = curr->get_uninit_action();
1453 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
1454 curr->set_uninit_action(act);
1456 act->create_cv(NULL);
1460 static void print_list(const action_list_t *list)
1462 action_list_t::const_iterator it;
1464 model_print("------------------------------------------------------------------------------------\n");
1465 model_print("# t Action type MO Location Value Rf CV\n");
1466 model_print("------------------------------------------------------------------------------------\n");
1468 unsigned int hash = 0;
1470 for (it = list->begin();it != list->end();it++) {
1471 const ModelAction *act = *it;
1472 if (act->get_seq_number() > 0)
1474 hash = hash^(hash<<3)^((*it)->hash());
1476 model_print("HASH %u\n", hash);
1477 model_print("------------------------------------------------------------------------------------\n");
1480 #if SUPPORT_MOD_ORDER_DUMP
1481 void ModelExecution::dumpGraph(char *filename) const
1484 sprintf(buffer, "%s.dot", filename);
1485 FILE *file = fopen(buffer, "w");
1486 fprintf(file, "digraph %s {\n", filename);
1487 mo_graph->dumpNodes(file);
1488 ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
1490 for (action_list_t::const_iterator it = action_trace.begin();it != action_trace.end();it++) {
1491 ModelAction *act = *it;
1492 if (act->is_read()) {
1493 mo_graph->dot_print_node(file, act);
1494 mo_graph->dot_print_edge(file,
1495 act->get_reads_from(),
1497 "label=\"rf\", color=red, weight=2");
1499 if (thread_array[act->get_tid()]) {
1500 mo_graph->dot_print_edge(file,
1501 thread_array[id_to_int(act->get_tid())],
1503 "label=\"sb\", color=blue, weight=400");
1506 thread_array[act->get_tid()] = act;
1508 fprintf(file, "}\n");
1509 model_free(thread_array);
1514 /** @brief Prints an execution trace summary. */
1515 void ModelExecution::print_summary() const
1517 #if SUPPORT_MOD_ORDER_DUMP
1518 char buffername[100];
1519 sprintf(buffername, "exec%04u", get_execution_number());
1520 mo_graph->dumpGraphToFile(buffername);
1521 sprintf(buffername, "graph%04u", get_execution_number());
1522 dumpGraph(buffername);
1525 model_print("Execution trace %d:", get_execution_number());
1526 if (isfeasibleprefix()) {
1527 if (scheduler->all_threads_sleeping())
1528 model_print(" SLEEP-SET REDUNDANT");
1529 if (have_bug_reports())
1530 model_print(" DETECTED BUG(S)");
1532 print_infeasibility(" INFEASIBLE");
1535 print_list(&action_trace);
1541 * Add a Thread to the system for the first time. Should only be called once
1543 * @param t The Thread to add
1545 void ModelExecution::add_thread(Thread *t)
1547 unsigned int i = id_to_int(t->get_id());
1548 if (i >= thread_map.size())
1549 thread_map.resize(i + 1);
1551 if (!t->is_model_thread())
1552 scheduler->add_thread(t);
1556 * @brief Get a Thread reference by its ID
1557 * @param tid The Thread's ID
1558 * @return A Thread reference
1560 Thread * ModelExecution::get_thread(thread_id_t tid) const
1562 unsigned int i = id_to_int(tid);
1563 if (i < thread_map.size())
1564 return thread_map[i];
1569 * @brief Get a reference to the Thread in which a ModelAction was executed
1570 * @param act The ModelAction
1571 * @return A Thread reference
1573 Thread * ModelExecution::get_thread(const ModelAction *act) const
1575 return get_thread(act->get_tid());
1579 * @brief Get a Thread reference by its pthread ID
1580 * @param index The pthread's ID
1581 * @return A Thread reference
1583 Thread * ModelExecution::get_pthread(pthread_t pid) {
1589 uint32_t thread_id = x.v;
1590 if (thread_id < pthread_counter + 1) return pthread_map[thread_id];
1595 * @brief Check if a Thread is currently enabled
1596 * @param t The Thread to check
1597 * @return True if the Thread is currently enabled
1599 bool ModelExecution::is_enabled(Thread *t) const
1601 return scheduler->is_enabled(t);
1605 * @brief Check if a Thread is currently enabled
1606 * @param tid The ID of the Thread to check
1607 * @return True if the Thread is currently enabled
1609 bool ModelExecution::is_enabled(thread_id_t tid) const
1611 return scheduler->is_enabled(tid);
1615 * @brief Select the next thread to execute based on the curren action
1617 * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
1618 * actions should be followed by the execution of their child thread. In either
1619 * case, the current action should determine the next thread schedule.
1621 * @param curr The current action
1622 * @return The next thread to run, if the current action will determine this
1623 * selection; otherwise NULL
1625 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
1627 /* Do not split atomic RMW */
1628 if (curr->is_rmwr())
1629 return get_thread(curr);
1630 /* Follow CREATE with the created thread */
1631 /* which is not needed, because model.cc takes care of this */
1632 if (curr->get_type() == THREAD_CREATE)
1633 return curr->get_thread_operand();
1634 if (curr->get_type() == PTHREAD_CREATE) {
1635 return curr->get_thread_operand();
1641 * Takes the next step in the execution, if possible.
1642 * @param curr The current step to take
1643 * @return Returns the next Thread to run, if any; NULL if this execution
1646 Thread * ModelExecution::take_step(ModelAction *curr)
1648 Thread *curr_thrd = get_thread(curr);
1649 ASSERT(curr_thrd->get_state() == THREAD_READY);
1651 ASSERT(check_action_enabled(curr)); /* May have side effects? */
1652 curr = check_current_action(curr);
1655 /* Process this action in ModelHistory for records*/
1656 model->get_history()->process_action( curr, curr->get_tid() );
1658 if (curr_thrd->is_blocked() || curr_thrd->is_complete())
1659 scheduler->remove_thread(curr_thrd);
1661 return action_select_next_thread(curr);
1664 Fuzzer * ModelExecution::getFuzzer() {