Bug fix for broken treatment of promises + coherence based pruning to regain pruning...
[model-checker.git] / execution.cc
1 #include <stdio.h>
2 #include <algorithm>
3 #include <mutex>
4 #include <new>
5 #include <stdarg.h>
6
7 #include "model.h"
8 #include "execution.h"
9 #include "action.h"
10 #include "nodestack.h"
11 #include "schedule.h"
12 #include "common.h"
13 #include "clockvector.h"
14 #include "cyclegraph.h"
15 #include "promise.h"
16 #include "datarace.h"
17 #include "threads-model.h"
18 #include "bugmessage.h"
19
20 #define INITIAL_THREAD_ID       0
21
22 /**
23  * Structure for holding small ModelChecker members that should be snapshotted
24  */
25 struct model_snapshot_members {
26         model_snapshot_members() :
27                 /* First thread created will have id INITIAL_THREAD_ID */
28                 next_thread_id(INITIAL_THREAD_ID),
29                 used_sequence_numbers(0),
30                 next_backtrack(NULL),
31                 bugs(),
32                 failed_promise(false),
33                 hard_failed_promise(false),
34                 too_many_reads(false),
35                 no_valid_reads(false),
36                 bad_synchronization(false),
37                 asserted(false)
38         { }
39
40         ~model_snapshot_members() {
41                 for (unsigned int i = 0; i < bugs.size(); i++)
42                         delete bugs[i];
43                 bugs.clear();
44         }
45
46         unsigned int next_thread_id;
47         modelclock_t used_sequence_numbers;
48         ModelAction *next_backtrack;
49         SnapVector<bug_message *> bugs;
50         bool failed_promise;
51         bool hard_failed_promise;
52         bool too_many_reads;
53         bool no_valid_reads;
54         /** @brief Incorrectly-ordered synchronization was made */
55         bool bad_synchronization;
56         bool asserted;
57
58         SNAPSHOTALLOC
59 };
60
61 /** @brief Constructor */
62 ModelExecution::ModelExecution(ModelChecker *m,
63                 const struct model_params *params,
64                 Scheduler *scheduler,
65                 NodeStack *node_stack) :
66         model(m),
67         params(params),
68         scheduler(scheduler),
69         action_trace(),
70         thread_map(2), /* We'll always need at least 2 threads */
71         obj_map(),
72         condvar_waiters_map(),
73         obj_thrd_map(),
74         promises(),
75         futurevalues(),
76         pending_rel_seqs(),
77         thrd_last_action(1),
78         thrd_last_fence_release(),
79         node_stack(node_stack),
80         priv(new struct model_snapshot_members()),
81         mo_graph(new CycleGraph())
82 {
83         /* Initialize a model-checker thread, for special ModelActions */
84         model_thread = new Thread(get_next_id());
85         add_thread(model_thread);
86         scheduler->register_engine(this);
87         node_stack->register_engine(this);
88 }
89
90 /** @brief Destructor */
91 ModelExecution::~ModelExecution()
92 {
93         for (unsigned int i = 0; i < get_num_threads(); i++)
94                 delete get_thread(int_to_id(i));
95
96         for (unsigned int i = 0; i < promises.size(); i++)
97                 delete promises[i];
98
99         delete mo_graph;
100         delete priv;
101 }
102
103 int ModelExecution::get_execution_number() const
104 {
105         return model->get_execution_number();
106 }
107
108 static action_list_t * get_safe_ptr_action(HashTable<const void *, action_list_t *, uintptr_t, 4> * hash, void * ptr)
109 {
110         action_list_t *tmp = hash->get(ptr);
111         if (tmp == NULL) {
112                 tmp = new action_list_t();
113                 hash->put(ptr, tmp);
114         }
115         return tmp;
116 }
117
118 static SnapVector<action_list_t> * get_safe_ptr_vect_action(HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> * hash, void * ptr)
119 {
120         SnapVector<action_list_t> *tmp = hash->get(ptr);
121         if (tmp == NULL) {
122                 tmp = new SnapVector<action_list_t>();
123                 hash->put(ptr, tmp);
124         }
125         return tmp;
126 }
127
128 action_list_t * ModelExecution::get_actions_on_obj(void * obj, thread_id_t tid) const
129 {
130         SnapVector<action_list_t> *wrv = obj_thrd_map.get(obj);
131         if (wrv==NULL)
132                 return NULL;
133         unsigned int thread=id_to_int(tid);
134         if (thread < wrv->size())
135                 return &(*wrv)[thread];
136         else
137                 return NULL;
138 }
139
140 /** @return a thread ID for a new Thread */
141 thread_id_t ModelExecution::get_next_id()
142 {
143         return priv->next_thread_id++;
144 }
145
146 /** @return the number of user threads created during this execution */
147 unsigned int ModelExecution::get_num_threads() const
148 {
149         return priv->next_thread_id;
150 }
151
152 /** @return a sequence number for a new ModelAction */
153 modelclock_t ModelExecution::get_next_seq_num()
154 {
155         return ++priv->used_sequence_numbers;
156 }
157
158 /**
159  * @brief Should the current action wake up a given thread?
160  *
161  * @param curr The current action
162  * @param thread The thread that we might wake up
163  * @return True, if we should wake up the sleeping thread; false otherwise
164  */
165 bool ModelExecution::should_wake_up(const ModelAction *curr, const Thread *thread) const
166 {
167         const ModelAction *asleep = thread->get_pending();
168         /* Don't allow partial RMW to wake anyone up */
169         if (curr->is_rmwr())
170                 return false;
171         /* Synchronizing actions may have been backtracked */
172         if (asleep->could_synchronize_with(curr))
173                 return true;
174         /* All acquire/release fences and fence-acquire/store-release */
175         if (asleep->is_fence() && asleep->is_acquire() && curr->is_release())
176                 return true;
177         /* Fence-release + store can awake load-acquire on the same location */
178         if (asleep->is_read() && asleep->is_acquire() && curr->same_var(asleep) && curr->is_write()) {
179                 ModelAction *fence_release = get_last_fence_release(curr->get_tid());
180                 if (fence_release && *(get_last_action(thread->get_id())) < *fence_release)
181                         return true;
182         }
183         return false;
184 }
185
186 void ModelExecution::wake_up_sleeping_actions(ModelAction *curr)
187 {
188         for (unsigned int i = 0; i < get_num_threads(); i++) {
189                 Thread *thr = get_thread(int_to_id(i));
190                 if (scheduler->is_sleep_set(thr)) {
191                         if (should_wake_up(curr, thr))
192                                 /* Remove this thread from sleep set */
193                                 scheduler->remove_sleep(thr);
194                 }
195         }
196 }
197
198 /** @brief Alert the model-checker that an incorrectly-ordered
199  * synchronization was made */
200 void ModelExecution::set_bad_synchronization()
201 {
202         priv->bad_synchronization = true;
203 }
204
205 bool ModelExecution::assert_bug(const char *msg)
206 {
207         priv->bugs.push_back(new bug_message(msg));
208
209         if (isfeasibleprefix()) {
210                 set_assert();
211                 return true;
212         }
213         return false;
214 }
215
216 /** @return True, if any bugs have been reported for this execution */
217 bool ModelExecution::have_bug_reports() const
218 {
219         return priv->bugs.size() != 0;
220 }
221
222 SnapVector<bug_message *> * ModelExecution::get_bugs() const
223 {
224         return &priv->bugs;
225 }
226
227 /**
228  * Check whether the current trace has triggered an assertion which should halt
229  * its execution.
230  *
231  * @return True, if the execution should be aborted; false otherwise
232  */
233 bool ModelExecution::has_asserted() const
234 {
235         return priv->asserted;
236 }
237
238 /**
239  * Trigger a trace assertion which should cause this execution to be halted.
240  * This can be due to a detected bug or due to an infeasibility that should
241  * halt ASAP.
242  */
243 void ModelExecution::set_assert()
244 {
245         priv->asserted = true;
246 }
247
248 /**
249  * Check if we are in a deadlock. Should only be called at the end of an
250  * execution, although it should not give false positives in the middle of an
251  * execution (there should be some ENABLED thread).
252  *
253  * @return True if program is in a deadlock; false otherwise
254  */
255 bool ModelExecution::is_deadlocked() const
256 {
257         bool blocking_threads = false;
258         for (unsigned int i = 0; i < get_num_threads(); i++) {
259                 thread_id_t tid = int_to_id(i);
260                 if (is_enabled(tid))
261                         return false;
262                 Thread *t = get_thread(tid);
263                 if (!t->is_model_thread() && t->get_pending())
264                         blocking_threads = true;
265         }
266         return blocking_threads;
267 }
268
269 /**
270  * @brief Check if we are yield-blocked
271  *
272  * A program can be "yield-blocked" if all threads are ready to execute a
273  * yield.
274  *
275  * @return True if the program is yield-blocked; false otherwise
276  */
277 bool ModelExecution::is_yieldblocked() const
278 {
279         if (!params->yieldblock)
280                 return false;
281
282         for (unsigned int i = 0; i < get_num_threads(); i++) {
283                 thread_id_t tid = int_to_id(i);
284                 Thread *t = get_thread(tid);
285                 if (t->get_pending() && t->get_pending()->is_yield())
286                         return true;
287         }
288         return false;
289 }
290
291 /**
292  * Check if this is a complete execution. That is, have all thread completed
293  * execution (rather than exiting because sleep sets have forced a redundant
294  * execution).
295  *
296  * @return True if the execution is complete.
297  */
298 bool ModelExecution::is_complete_execution() const
299 {
300         if (is_yieldblocked())
301                 return false;
302         for (unsigned int i = 0; i < get_num_threads(); i++)
303                 if (is_enabled(int_to_id(i)))
304                         return false;
305         return true;
306 }
307
308 /**
309  * @brief Find the last fence-related backtracking conflict for a ModelAction
310  *
311  * This function performs the search for the most recent conflicting action
312  * against which we should perform backtracking, as affected by fence
313  * operations. This includes pairs of potentially-synchronizing actions which
314  * occur due to fence-acquire or fence-release, and hence should be explored in
315  * the opposite execution order.
316  *
317  * @param act The current action
318  * @return The most recent action which conflicts with act due to fences
319  */
320 ModelAction * ModelExecution::get_last_fence_conflict(ModelAction *act) const
321 {
322         /* Only perform release/acquire fence backtracking for stores */
323         if (!act->is_write())
324                 return NULL;
325
326         /* Find a fence-release (or, act is a release) */
327         ModelAction *last_release;
328         if (act->is_release())
329                 last_release = act;
330         else
331                 last_release = get_last_fence_release(act->get_tid());
332         if (!last_release)
333                 return NULL;
334
335         /* Skip past the release */
336         const action_list_t *list = &action_trace;
337         action_list_t::const_reverse_iterator rit;
338         for (rit = list->rbegin(); rit != list->rend(); rit++)
339                 if (*rit == last_release)
340                         break;
341         ASSERT(rit != list->rend());
342
343         /* Find a prior:
344          *   load-acquire
345          * or
346          *   load --sb-> fence-acquire */
347         ModelVector<ModelAction *> acquire_fences(get_num_threads(), NULL);
348         ModelVector<ModelAction *> prior_loads(get_num_threads(), NULL);
349         bool found_acquire_fences = false;
350         for ( ; rit != list->rend(); rit++) {
351                 ModelAction *prev = *rit;
352                 if (act->same_thread(prev))
353                         continue;
354
355                 int tid = id_to_int(prev->get_tid());
356
357                 if (prev->is_read() && act->same_var(prev)) {
358                         if (prev->is_acquire()) {
359                                 /* Found most recent load-acquire, don't need
360                                  * to search for more fences */
361                                 if (!found_acquire_fences)
362                                         return NULL;
363                         } else {
364                                 prior_loads[tid] = prev;
365                         }
366                 }
367                 if (prev->is_acquire() && prev->is_fence() && !acquire_fences[tid]) {
368                         found_acquire_fences = true;
369                         acquire_fences[tid] = prev;
370                 }
371         }
372
373         ModelAction *latest_backtrack = NULL;
374         for (unsigned int i = 0; i < acquire_fences.size(); i++)
375                 if (acquire_fences[i] && prior_loads[i])
376                         if (!latest_backtrack || *latest_backtrack < *acquire_fences[i])
377                                 latest_backtrack = acquire_fences[i];
378         return latest_backtrack;
379 }
380
381 /**
382  * @brief Find the last backtracking conflict for a ModelAction
383  *
384  * This function performs the search for the most recent conflicting action
385  * against which we should perform backtracking. This primary includes pairs of
386  * synchronizing actions which should be explored in the opposite execution
387  * order.
388  *
389  * @param act The current action
390  * @return The most recent action which conflicts with act
391  */
392 ModelAction * ModelExecution::get_last_conflict(ModelAction *act) const
393 {
394         switch (act->get_type()) {
395         case ATOMIC_FENCE:
396                 /* Only seq-cst fences can (directly) cause backtracking */
397                 if (!act->is_seqcst())
398                         break;
399         case ATOMIC_READ:
400         case ATOMIC_WRITE:
401         case ATOMIC_RMW: {
402                 ModelAction *ret = NULL;
403
404                 /* linear search: from most recent to oldest */
405                 action_list_t *list = obj_map.get(act->get_location());
406                 action_list_t::reverse_iterator rit;
407                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
408                         ModelAction *prev = *rit;
409                         if (prev == act)
410                                 continue;
411                         if (prev->could_synchronize_with(act)) {
412                                 ret = prev;
413                                 break;
414                         }
415                 }
416
417                 ModelAction *ret2 = get_last_fence_conflict(act);
418                 if (!ret2)
419                         return ret;
420                 if (!ret)
421                         return ret2;
422                 if (*ret < *ret2)
423                         return ret2;
424                 return ret;
425         }
426         case ATOMIC_LOCK:
427         case ATOMIC_TRYLOCK: {
428                 /* linear search: from most recent to oldest */
429                 action_list_t *list = obj_map.get(act->get_location());
430                 action_list_t::reverse_iterator rit;
431                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
432                         ModelAction *prev = *rit;
433                         if (act->is_conflicting_lock(prev))
434                                 return prev;
435                 }
436                 break;
437         }
438         case ATOMIC_UNLOCK: {
439                 /* linear search: from most recent to oldest */
440                 action_list_t *list = obj_map.get(act->get_location());
441                 action_list_t::reverse_iterator rit;
442                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
443                         ModelAction *prev = *rit;
444                         if (!act->same_thread(prev) && prev->is_failed_trylock())
445                                 return prev;
446                 }
447                 break;
448         }
449         case ATOMIC_WAIT: {
450                 /* linear search: from most recent to oldest */
451                 action_list_t *list = obj_map.get(act->get_location());
452                 action_list_t::reverse_iterator rit;
453                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
454                         ModelAction *prev = *rit;
455                         if (!act->same_thread(prev) && prev->is_failed_trylock())
456                                 return prev;
457                         if (!act->same_thread(prev) && prev->is_notify())
458                                 return prev;
459                 }
460                 break;
461         }
462
463         case ATOMIC_NOTIFY_ALL:
464         case ATOMIC_NOTIFY_ONE: {
465                 /* linear search: from most recent to oldest */
466                 action_list_t *list = obj_map.get(act->get_location());
467                 action_list_t::reverse_iterator rit;
468                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
469                         ModelAction *prev = *rit;
470                         if (!act->same_thread(prev) && prev->is_wait())
471                                 return prev;
472                 }
473                 break;
474         }
475         default:
476                 break;
477         }
478         return NULL;
479 }
480
481 /** This method finds backtracking points where we should try to
482  * reorder the parameter ModelAction against.
483  *
484  * @param the ModelAction to find backtracking points for.
485  */
486 void ModelExecution::set_backtracking(ModelAction *act)
487 {
488         Thread *t = get_thread(act);
489         ModelAction *prev = get_last_conflict(act);
490         if (prev == NULL)
491                 return;
492
493         Node *node = prev->get_node()->get_parent();
494
495         /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
496         int low_tid, high_tid;
497         if (node->enabled_status(t->get_id()) == THREAD_ENABLED) {
498                 low_tid = id_to_int(act->get_tid());
499                 high_tid = low_tid + 1;
500         } else {
501                 low_tid = 0;
502                 high_tid = get_num_threads();
503         }
504
505         for (int i = low_tid; i < high_tid; i++) {
506                 thread_id_t tid = int_to_id(i);
507
508                 /* Make sure this thread can be enabled here. */
509                 if (i >= node->get_num_threads())
510                         break;
511
512                 /* See Dynamic Partial Order Reduction (addendum), POPL '05 */
513                 /* Don't backtrack into a point where the thread is disabled or sleeping. */
514                 if (node->enabled_status(tid) != THREAD_ENABLED)
515                         continue;
516
517                 /* Check if this has been explored already */
518                 if (node->has_been_explored(tid))
519                         continue;
520
521                 /* See if fairness allows */
522                 if (params->fairwindow != 0 && !node->has_priority(tid)) {
523                         bool unfair = false;
524                         for (int t = 0; t < node->get_num_threads(); t++) {
525                                 thread_id_t tother = int_to_id(t);
526                                 if (node->is_enabled(tother) && node->has_priority(tother)) {
527                                         unfair = true;
528                                         break;
529                                 }
530                         }
531                         if (unfair)
532                                 continue;
533                 }
534
535                 /* See if CHESS-like yield fairness allows */
536                 if (params->yieldon) {
537                         bool unfair = false;
538                         for (int t = 0; t < node->get_num_threads(); t++) {
539                                 thread_id_t tother = int_to_id(t);
540                                 if (node->is_enabled(tother) && node->has_priority_over(tid, tother)) {
541                                         unfair = true;
542                                         break;
543                                 }
544                         }
545                         if (unfair)
546                                 continue;
547                 }
548
549                 /* Cache the latest backtracking point */
550                 set_latest_backtrack(prev);
551
552                 /* If this is a new backtracking point, mark the tree */
553                 if (!node->set_backtrack(tid))
554                         continue;
555                 DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
556                                         id_to_int(prev->get_tid()),
557                                         id_to_int(t->get_id()));
558                 if (DBG_ENABLED()) {
559                         prev->print();
560                         act->print();
561                 }
562         }
563 }
564
565 /**
566  * @brief Cache the a backtracking point as the "most recent", if eligible
567  *
568  * Note that this does not prepare the NodeStack for this backtracking
569  * operation, it only caches the action on a per-execution basis
570  *
571  * @param act The operation at which we should explore a different next action
572  * (i.e., backtracking point)
573  * @return True, if this action is now the most recent backtracking point;
574  * false otherwise
575  */
576 bool ModelExecution::set_latest_backtrack(ModelAction *act)
577 {
578         if (!priv->next_backtrack || *act > *priv->next_backtrack) {
579                 priv->next_backtrack = act;
580                 return true;
581         }
582         return false;
583 }
584
585 /**
586  * Returns last backtracking point. The model checker will explore a different
587  * path for this point in the next execution.
588  * @return The ModelAction at which the next execution should diverge.
589  */
590 ModelAction * ModelExecution::get_next_backtrack()
591 {
592         ModelAction *next = priv->next_backtrack;
593         priv->next_backtrack = NULL;
594         return next;
595 }
596
597 /**
598  * Processes a read model action.
599  * @param curr is the read model action to process.
600  * @return True if processing this read updates the mo_graph.
601  */
602 bool ModelExecution::process_read(ModelAction *curr)
603 {
604         Node *node = curr->get_node();
605         while (true) {
606                 bool updated = false;
607                 switch (node->get_read_from_status()) {
608                 case READ_FROM_PAST: {
609                         const ModelAction *rf = node->get_read_from_past();
610                         ASSERT(rf);
611
612                         mo_graph->startChanges();
613
614                         ASSERT(!is_infeasible());
615                         if (!check_recency(curr, rf)) {
616                                 if (node->increment_read_from()) {
617                                         mo_graph->rollbackChanges();
618                                         continue;
619                                 } else {
620                                         priv->too_many_reads = true;
621                                 }
622                         }
623
624                         updated = r_modification_order(curr, rf);
625                         read_from(curr, rf);
626                         mo_graph->commitChanges();
627                         mo_check_promises(curr, true);
628                         break;
629                 }
630                 case READ_FROM_PROMISE: {
631                         Promise *promise = curr->get_node()->get_read_from_promise();
632                         if (promise->add_reader(curr))
633                                 priv->failed_promise = true;
634                         curr->set_read_from_promise(promise);
635                         mo_graph->startChanges();
636                         if (!check_recency(curr, promise))
637                                 priv->too_many_reads = true;
638                         updated = r_modification_order(curr, promise);
639                         mo_graph->commitChanges();
640                         break;
641                 }
642                 case READ_FROM_FUTURE: {
643                         /* Read from future value */
644                         struct future_value fv = node->get_future_value();
645                         Promise *promise = new Promise(this, curr, fv);
646                         curr->set_read_from_promise(promise);
647                         promises.push_back(promise);
648                         mo_graph->startChanges();
649                         updated = r_modification_order(curr, promise);
650                         mo_graph->commitChanges();
651                         break;
652                 }
653                 default:
654                         ASSERT(false);
655                 }
656                 get_thread(curr)->set_return_value(curr->get_return_value());
657                 return updated;
658         }
659 }
660
661 /**
662  * Processes a lock, trylock, or unlock model action.  @param curr is
663  * the read model action to process.
664  *
665  * The try lock operation checks whether the lock is taken.  If not,
666  * it falls to the normal lock operation case.  If so, it returns
667  * fail.
668  *
669  * The lock operation has already been checked that it is enabled, so
670  * it just grabs the lock and synchronizes with the previous unlock.
671  *
672  * The unlock operation has to re-enable all of the threads that are
673  * waiting on the lock.
674  *
675  * @return True if synchronization was updated; false otherwise
676  */
677 bool ModelExecution::process_mutex(ModelAction *curr)
678 {
679         std::mutex *mutex = curr->get_mutex();
680         struct std::mutex_state *state = NULL;
681
682         if (mutex)
683                 state = mutex->get_state();
684
685         switch (curr->get_type()) {
686         case ATOMIC_TRYLOCK: {
687                 bool success = !state->locked;
688                 curr->set_try_lock(success);
689                 if (!success) {
690                         get_thread(curr)->set_return_value(0);
691                         break;
692                 }
693                 get_thread(curr)->set_return_value(1);
694         }
695                 //otherwise fall into the lock case
696         case ATOMIC_LOCK: {
697                 if (curr->get_cv()->getClock(state->alloc_tid) <= state->alloc_clock)
698                         assert_bug("Lock access before initialization");
699                 state->locked = get_thread(curr);
700                 ModelAction *unlock = get_last_unlock(curr);
701                 //synchronize with the previous unlock statement
702                 if (unlock != NULL) {
703                         synchronize(unlock, curr);
704                         return true;
705                 }
706                 break;
707         }
708         case ATOMIC_WAIT:
709         case ATOMIC_UNLOCK: {
710                 /* wake up the other threads */
711                 for (unsigned int i = 0; i < get_num_threads(); i++) {
712                         Thread *t = get_thread(int_to_id(i));
713                         Thread *curr_thrd = get_thread(curr);
714                         if (t->waiting_on() == curr_thrd && t->get_pending()->is_lock())
715                                 scheduler->wake(t);
716                 }
717
718                 /* unlock the lock - after checking who was waiting on it */
719                 state->locked = NULL;
720
721                 if (!curr->is_wait())
722                         break; /* The rest is only for ATOMIC_WAIT */
723
724                 /* Should we go to sleep? (simulate spurious failures) */
725                 if (curr->get_node()->get_misc() == 0) {
726                         get_safe_ptr_action(&condvar_waiters_map, curr->get_location())->push_back(curr);
727                         /* disable us */
728                         scheduler->sleep(get_thread(curr));
729                 }
730                 break;
731         }
732         case ATOMIC_NOTIFY_ALL: {
733                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
734                 //activate all the waiting threads
735                 for (action_list_t::iterator rit = waiters->begin(); rit != waiters->end(); rit++) {
736                         scheduler->wake(get_thread(*rit));
737                 }
738                 waiters->clear();
739                 break;
740         }
741         case ATOMIC_NOTIFY_ONE: {
742                 action_list_t *waiters = get_safe_ptr_action(&condvar_waiters_map, curr->get_location());
743                 int wakeupthread = curr->get_node()->get_misc();
744                 action_list_t::iterator it = waiters->begin();
745                 advance(it, wakeupthread);
746                 scheduler->wake(get_thread(*it));
747                 waiters->erase(it);
748                 break;
749         }
750
751         default:
752                 ASSERT(0);
753         }
754         return false;
755 }
756
757 /**
758  * @brief Check if the current pending promises allow a future value to be sent
759  *
760  * It is unsafe to pass a future value back if there exists a pending promise Pr
761  * such that:
762  *
763  *    reader --exec-> Pr --exec-> writer
764  *
765  * If such Pr exists, we must save the pending future value until Pr is
766  * resolved.
767  *
768  * @param writer The operation which sends the future value. Must be a write.
769  * @param reader The operation which will observe the value. Must be a read.
770  * @return True if the future value can be sent now; false if it must wait.
771  */
772 bool ModelExecution::promises_may_allow(const ModelAction *writer,
773                 const ModelAction *reader) const
774 {
775         for (int i = promises.size() - 1; i >= 0; i--) {
776                 ModelAction *pr = promises[i]->get_reader(0);
777                 //reader is after promise...doesn't cross any promise
778                 if (*reader > *pr)
779                         return true;
780                 //writer is after promise, reader before...bad...
781                 if (*writer > *pr)
782                         return false;
783         }
784         return true;
785 }
786
787 /**
788  * @brief Add a future value to a reader
789  *
790  * This function performs a few additional checks to ensure that the future
791  * value can be feasibly observed by the reader
792  *
793  * @param writer The operation whose value is sent. Must be a write.
794  * @param reader The read operation which may read the future value. Must be a read.
795  */
796 void ModelExecution::add_future_value(const ModelAction *writer, ModelAction *reader)
797 {
798         /* Do more ambitious checks now that mo is more complete */
799         if (!mo_may_allow(writer, reader))
800                 return;
801
802         Node *node = reader->get_node();
803
804         /* Find an ancestor thread which exists at the time of the reader */
805         Thread *write_thread = get_thread(writer);
806         while (id_to_int(write_thread->get_id()) >= node->get_num_threads())
807                 write_thread = write_thread->get_parent();
808
809         struct future_value fv = {
810                 writer->get_write_value(),
811                 writer->get_seq_number() + params->maxfuturedelay,
812                 write_thread->get_id(),
813         };
814         if (node->add_future_value(fv))
815                 set_latest_backtrack(reader);
816 }
817
818 /**
819  * Process a write ModelAction
820  * @param curr The ModelAction to process
821  * @param work The work queue, for adding fixup work
822  * @return True if the mo_graph was updated or promises were resolved
823  */
824 bool ModelExecution::process_write(ModelAction *curr, work_queue_t *work)
825 {
826         /* Readers to which we may send our future value */
827         ModelVector<ModelAction *> send_fv;
828
829         const ModelAction *earliest_promise_reader;
830         bool updated_promises = false;
831
832         bool updated_mod_order = w_modification_order(curr, &send_fv);
833         Promise *promise = pop_promise_to_resolve(curr);
834
835         if (promise) {
836                 earliest_promise_reader = promise->get_reader(0);
837                 updated_promises = resolve_promise(curr, promise, work);
838         } else
839                 earliest_promise_reader = NULL;
840
841         for (unsigned int i = 0; i < send_fv.size(); i++) {
842                 ModelAction *read = send_fv[i];
843
844                 /* Don't send future values to reads after the Promise we resolve */
845                 if (!earliest_promise_reader || *read < *earliest_promise_reader) {
846                         /* Check if future value can be sent immediately */
847                         if (promises_may_allow(curr, read)) {
848                                 add_future_value(curr, read);
849                         } else {
850                                 futurevalues.push_back(PendingFutureValue(curr, read));
851                         }
852                 }
853         }
854
855         /* Check the pending future values */
856         for (int i = (int)futurevalues.size() - 1; i >= 0; i--) {
857                 struct PendingFutureValue pfv = futurevalues[i];
858                 if (promises_may_allow(pfv.writer, pfv.reader)) {
859                         add_future_value(pfv.writer, pfv.reader);
860                         futurevalues.erase(futurevalues.begin() + i);
861                 }
862         }
863
864         mo_graph->commitChanges();
865         mo_check_promises(curr, false);
866
867         get_thread(curr)->set_return_value(VALUE_NONE);
868         return updated_mod_order || updated_promises;
869 }
870
871 /**
872  * Process a fence ModelAction
873  * @param curr The ModelAction to process
874  * @return True if synchronization was updated
875  */
876 bool ModelExecution::process_fence(ModelAction *curr)
877 {
878         /*
879          * fence-relaxed: no-op
880          * fence-release: only log the occurence (not in this function), for
881          *   use in later synchronization
882          * fence-acquire (this function): search for hypothetical release
883          *   sequences
884          * fence-seq-cst: MO constraints formed in {r,w}_modification_order
885          */
886         bool updated = false;
887         if (curr->is_acquire()) {
888                 action_list_t *list = &action_trace;
889                 action_list_t::reverse_iterator rit;
890                 /* Find X : is_read(X) && X --sb-> curr */
891                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
892                         ModelAction *act = *rit;
893                         if (act == curr)
894                                 continue;
895                         if (act->get_tid() != curr->get_tid())
896                                 continue;
897                         /* Stop at the beginning of the thread */
898                         if (act->is_thread_start())
899                                 break;
900                         /* Stop once we reach a prior fence-acquire */
901                         if (act->is_fence() && act->is_acquire())
902                                 break;
903                         if (!act->is_read())
904                                 continue;
905                         /* read-acquire will find its own release sequences */
906                         if (act->is_acquire())
907                                 continue;
908
909                         /* Establish hypothetical release sequences */
910                         rel_heads_list_t release_heads;
911                         get_release_seq_heads(curr, act, &release_heads);
912                         for (unsigned int i = 0; i < release_heads.size(); i++)
913                                 synchronize(release_heads[i], curr);
914                         if (release_heads.size() != 0)
915                                 updated = true;
916                 }
917         }
918         return updated;
919 }
920
921 /**
922  * @brief Process the current action for thread-related activity
923  *
924  * Performs current-action processing for a THREAD_* ModelAction. Proccesses
925  * may include setting Thread status, completing THREAD_FINISH/THREAD_JOIN
926  * synchronization, etc.  This function is a no-op for non-THREAD actions
927  * (e.g., ATOMIC_{READ,WRITE,RMW,LOCK}, etc.)
928  *
929  * @param curr The current action
930  * @return True if synchronization was updated or a thread completed
931  */
932 bool ModelExecution::process_thread_action(ModelAction *curr)
933 {
934         bool updated = false;
935
936         switch (curr->get_type()) {
937         case THREAD_CREATE: {
938                 thrd_t *thrd = (thrd_t *)curr->get_location();
939                 struct thread_params *params = (struct thread_params *)curr->get_value();
940                 Thread *th = new Thread(get_next_id(), thrd, params->func, params->arg, get_thread(curr));
941                 add_thread(th);
942                 th->set_creation(curr);
943                 /* Promises can be satisfied by children */
944                 for (unsigned int i = 0; i < promises.size(); i++) {
945                         Promise *promise = promises[i];
946                         if (promise->thread_is_available(curr->get_tid()))
947                                 promise->add_thread(th->get_id());
948                 }
949                 break;
950         }
951         case THREAD_JOIN: {
952                 Thread *blocking = curr->get_thread_operand();
953                 ModelAction *act = get_last_action(blocking->get_id());
954                 synchronize(act, curr);
955                 updated = true; /* trigger rel-seq checks */
956                 break;
957         }
958         case THREAD_FINISH: {
959                 Thread *th = get_thread(curr);
960                 /* Wake up any joining threads */
961                 for (unsigned int i = 0; i < get_num_threads(); i++) {
962                         Thread *waiting = get_thread(int_to_id(i));
963                         if (waiting->waiting_on() == th &&
964                                         waiting->get_pending()->is_thread_join())
965                                 scheduler->wake(waiting);
966                 }
967                 th->complete();
968                 /* Completed thread can't satisfy promises */
969                 for (unsigned int i = 0; i < promises.size(); i++) {
970                         Promise *promise = promises[i];
971                         if (promise->thread_is_available(th->get_id()))
972                                 if (promise->eliminate_thread(th->get_id()))
973                                         priv->failed_promise = true;
974                 }
975                 updated = true; /* trigger rel-seq checks */
976                 break;
977         }
978         case THREAD_START: {
979                 check_promises(curr->get_tid(), NULL, curr->get_cv());
980                 break;
981         }
982         default:
983                 break;
984         }
985
986         return updated;
987 }
988
989 /**
990  * @brief Process the current action for release sequence fixup activity
991  *
992  * Performs model-checker release sequence fixups for the current action,
993  * forcing a single pending release sequence to break (with a given, potential
994  * "loose" write) or to complete (i.e., synchronize). If a pending release
995  * sequence forms a complete release sequence, then we must perform the fixup
996  * synchronization, mo_graph additions, etc.
997  *
998  * @param curr The current action; must be a release sequence fixup action
999  * @param work_queue The work queue to which to add work items as they are
1000  * generated
1001  */
1002 void ModelExecution::process_relseq_fixup(ModelAction *curr, work_queue_t *work_queue)
1003 {
1004         const ModelAction *write = curr->get_node()->get_relseq_break();
1005         struct release_seq *sequence = pending_rel_seqs.back();
1006         pending_rel_seqs.pop_back();
1007         ASSERT(sequence);
1008         ModelAction *acquire = sequence->acquire;
1009         const ModelAction *rf = sequence->rf;
1010         const ModelAction *release = sequence->release;
1011         ASSERT(acquire);
1012         ASSERT(release);
1013         ASSERT(rf);
1014         ASSERT(release->same_thread(rf));
1015
1016         if (write == NULL) {
1017                 /**
1018                  * @todo Forcing a synchronization requires that we set
1019                  * modification order constraints. For instance, we can't allow
1020                  * a fixup sequence in which two separate read-acquire
1021                  * operations read from the same sequence, where the first one
1022                  * synchronizes and the other doesn't. Essentially, we can't
1023                  * allow any writes to insert themselves between 'release' and
1024                  * 'rf'
1025                  */
1026
1027                 /* Must synchronize */
1028                 if (!synchronize(release, acquire))
1029                         return;
1030
1031                 /* Propagate the changed clock vector */
1032                 propagate_clockvector(acquire, work_queue);
1033         } else {
1034                 /* Break release sequence with new edges:
1035                  *   release --mo--> write --mo--> rf */
1036                 mo_graph->addEdge(release, write);
1037                 mo_graph->addEdge(write, rf);
1038         }
1039
1040         /* See if we have realized a data race */
1041         checkDataRaces();
1042 }
1043
1044 /**
1045  * Initialize the current action by performing one or more of the following
1046  * actions, as appropriate: merging RMWR and RMWC/RMW actions, stepping forward
1047  * in the NodeStack, manipulating backtracking sets, allocating and
1048  * initializing clock vectors, and computing the promises to fulfill.
1049  *
1050  * @param curr The current action, as passed from the user context; may be
1051  * freed/invalidated after the execution of this function, with a different
1052  * action "returned" its place (pass-by-reference)
1053  * @return True if curr is a newly-explored action; false otherwise
1054  */
1055 bool ModelExecution::initialize_curr_action(ModelAction **curr)
1056 {
1057         ModelAction *newcurr;
1058
1059         if ((*curr)->is_rmwc() || (*curr)->is_rmw()) {
1060                 newcurr = process_rmw(*curr);
1061                 delete *curr;
1062
1063                 if (newcurr->is_rmw())
1064                         compute_promises(newcurr);
1065
1066                 *curr = newcurr;
1067                 return false;
1068         }
1069
1070         (*curr)->set_seq_number(get_next_seq_num());
1071
1072         newcurr = node_stack->explore_action(*curr, scheduler->get_enabled_array());
1073         if (newcurr) {
1074                 /* First restore type and order in case of RMW operation */
1075                 if ((*curr)->is_rmwr())
1076                         newcurr->copy_typeandorder(*curr);
1077
1078                 ASSERT((*curr)->get_location() == newcurr->get_location());
1079                 newcurr->copy_from_new(*curr);
1080
1081                 /* Discard duplicate ModelAction; use action from NodeStack */
1082                 delete *curr;
1083
1084                 /* Always compute new clock vector */
1085                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1086
1087                 *curr = newcurr;
1088                 return false; /* Action was explored previously */
1089         } else {
1090                 newcurr = *curr;
1091
1092                 /* Always compute new clock vector */
1093                 newcurr->create_cv(get_parent_action(newcurr->get_tid()));
1094
1095                 /* Assign most recent release fence */
1096                 newcurr->set_last_fence_release(get_last_fence_release(newcurr->get_tid()));
1097
1098                 /*
1099                  * Perform one-time actions when pushing new ModelAction onto
1100                  * NodeStack
1101                  */
1102                 if (newcurr->is_write())
1103                         compute_promises(newcurr);
1104                 else if (newcurr->is_relseq_fixup())
1105                         compute_relseq_breakwrites(newcurr);
1106                 else if (newcurr->is_wait())
1107                         newcurr->get_node()->set_misc_max(2);
1108                 else if (newcurr->is_notify_one()) {
1109                         newcurr->get_node()->set_misc_max(get_safe_ptr_action(&condvar_waiters_map, newcurr->get_location())->size());
1110                 }
1111                 return true; /* This was a new ModelAction */
1112         }
1113 }
1114
1115 /**
1116  * @brief Establish reads-from relation between two actions
1117  *
1118  * Perform basic operations involved with establishing a concrete rf relation,
1119  * including setting the ModelAction data and checking for release sequences.
1120  *
1121  * @param act The action that is reading (must be a read)
1122  * @param rf The action from which we are reading (must be a write)
1123  *
1124  * @return True if this read established synchronization
1125  */
1126 bool ModelExecution::read_from(ModelAction *act, const ModelAction *rf)
1127 {
1128         ASSERT(rf);
1129         ASSERT(rf->is_write());
1130
1131         act->set_read_from(rf);
1132         if (act->is_acquire()) {
1133                 rel_heads_list_t release_heads;
1134                 get_release_seq_heads(act, act, &release_heads);
1135                 int num_heads = release_heads.size();
1136                 for (unsigned int i = 0; i < release_heads.size(); i++)
1137                         if (!synchronize(release_heads[i], act))
1138                                 num_heads--;
1139                 return num_heads > 0;
1140         }
1141         return false;
1142 }
1143
1144 /**
1145  * @brief Synchronizes two actions
1146  *
1147  * When A synchronizes with B (or A --sw-> B), B inherits A's clock vector.
1148  * This function performs the synchronization as well as providing other hooks
1149  * for other checks along with synchronization.
1150  *
1151  * @param first The left-hand side of the synchronizes-with relation
1152  * @param second The right-hand side of the synchronizes-with relation
1153  * @return True if the synchronization was successful (i.e., was consistent
1154  * with the execution order); false otherwise
1155  */
1156 bool ModelExecution::synchronize(const ModelAction *first, ModelAction *second)
1157 {
1158         if (*second < *first) {
1159                 set_bad_synchronization();
1160                 return false;
1161         }
1162         check_promises(first->get_tid(), second->get_cv(), first->get_cv());
1163         return second->synchronize_with(first);
1164 }
1165
1166 /**
1167  * Check promises and eliminate potentially-satisfying threads when a thread is
1168  * blocked (e.g., join, lock). A thread which is waiting on another thread can
1169  * no longer satisfy a promise generated from that thread.
1170  *
1171  * @param blocker The thread on which a thread is waiting
1172  * @param waiting The waiting thread
1173  */
1174 void ModelExecution::thread_blocking_check_promises(Thread *blocker, Thread *waiting)
1175 {
1176         for (unsigned int i = 0; i < promises.size(); i++) {
1177                 Promise *promise = promises[i];
1178                 if (!promise->thread_is_available(waiting->get_id()))
1179                         continue;
1180                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
1181                         ModelAction *reader = promise->get_reader(j);
1182                         if (reader->get_tid() != blocker->get_id())
1183                                 continue;
1184                         if (promise->eliminate_thread(waiting->get_id())) {
1185                                 /* Promise has failed */
1186                                 priv->failed_promise = true;
1187                         } else {
1188                                 /* Only eliminate the 'waiting' thread once */
1189                                 return;
1190                         }
1191                 }
1192         }
1193 }
1194
1195 /**
1196  * @brief Check whether a model action is enabled.
1197  *
1198  * Checks whether an operation would be successful (i.e., is a lock already
1199  * locked, or is the joined thread already complete).
1200  *
1201  * For yield-blocking, yields are never enabled.
1202  *
1203  * @param curr is the ModelAction to check whether it is enabled.
1204  * @return a bool that indicates whether the action is enabled.
1205  */
1206 bool ModelExecution::check_action_enabled(ModelAction *curr) {
1207         if (curr->is_lock()) {
1208                 std::mutex *lock = curr->get_mutex();
1209                 struct std::mutex_state *state = lock->get_state();
1210                 if (state->locked)
1211                         return false;
1212         } else if (curr->is_thread_join()) {
1213                 Thread *blocking = curr->get_thread_operand();
1214                 if (!blocking->is_complete()) {
1215                         thread_blocking_check_promises(blocking, get_thread(curr));
1216                         return false;
1217                 }
1218         } else if (params->yieldblock && curr->is_yield()) {
1219                 return false;
1220         }
1221
1222         return true;
1223 }
1224
1225 /**
1226  * This is the heart of the model checker routine. It performs model-checking
1227  * actions corresponding to a given "current action." Among other processes, it
1228  * calculates reads-from relationships, updates synchronization clock vectors,
1229  * forms a memory_order constraints graph, and handles replay/backtrack
1230  * execution when running permutations of previously-observed executions.
1231  *
1232  * @param curr The current action to process
1233  * @return The ModelAction that is actually executed; may be different than
1234  * curr
1235  */
1236 ModelAction * ModelExecution::check_current_action(ModelAction *curr)
1237 {
1238         ASSERT(curr);
1239         bool second_part_of_rmw = curr->is_rmwc() || curr->is_rmw();
1240         bool newly_explored = initialize_curr_action(&curr);
1241
1242         DBG();
1243
1244         wake_up_sleeping_actions(curr);
1245
1246         /* Compute fairness information for CHESS yield algorithm */
1247         if (params->yieldon) {
1248                 curr->get_node()->update_yield(scheduler);
1249         }
1250
1251         /* Add the action to lists before any other model-checking tasks */
1252         if (!second_part_of_rmw)
1253                 add_action_to_lists(curr);
1254
1255         /* Build may_read_from set for newly-created actions */
1256         if (newly_explored && curr->is_read())
1257                 build_may_read_from(curr);
1258
1259         /* Initialize work_queue with the "current action" work */
1260         work_queue_t work_queue(1, CheckCurrWorkEntry(curr));
1261         while (!work_queue.empty() && !has_asserted()) {
1262                 WorkQueueEntry work = work_queue.front();
1263                 work_queue.pop_front();
1264
1265                 switch (work.type) {
1266                 case WORK_CHECK_CURR_ACTION: {
1267                         ModelAction *act = work.action;
1268                         bool update = false; /* update this location's release seq's */
1269                         bool update_all = false; /* update all release seq's */
1270
1271                         if (process_thread_action(curr))
1272                                 update_all = true;
1273
1274                         if (act->is_read() && !second_part_of_rmw && process_read(act))
1275                                 update = true;
1276
1277                         if (act->is_write() && process_write(act, &work_queue))
1278                                 update = true;
1279
1280                         if (act->is_fence() && process_fence(act))
1281                                 update_all = true;
1282
1283                         if (act->is_mutex_op() && process_mutex(act))
1284                                 update_all = true;
1285
1286                         if (act->is_relseq_fixup())
1287                                 process_relseq_fixup(curr, &work_queue);
1288
1289                         if (update_all)
1290                                 work_queue.push_back(CheckRelSeqWorkEntry(NULL));
1291                         else if (update)
1292                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1293                         break;
1294                 }
1295                 case WORK_CHECK_RELEASE_SEQ:
1296                         resolve_release_sequences(work.location, &work_queue);
1297                         break;
1298                 case WORK_CHECK_MO_EDGES: {
1299                         /** @todo Complete verification of work_queue */
1300                         ModelAction *act = work.action;
1301                         bool updated = false;
1302
1303                         if (act->is_read()) {
1304                                 const ModelAction *rf = act->get_reads_from();
1305                                 const Promise *promise = act->get_reads_from_promise();
1306                                 if (rf) {
1307                                         if (r_modification_order(act, rf))
1308                                                 updated = true;
1309                                 } else if (promise) {
1310                                         if (r_modification_order(act, promise))
1311                                                 updated = true;
1312                                 }
1313                         }
1314                         if (act->is_write()) {
1315                                 if (w_modification_order(act, NULL))
1316                                         updated = true;
1317                         }
1318                         mo_graph->commitChanges();
1319
1320                         if (updated)
1321                                 work_queue.push_back(CheckRelSeqWorkEntry(act->get_location()));
1322                         break;
1323                 }
1324                 default:
1325                         ASSERT(false);
1326                         break;
1327                 }
1328         }
1329
1330         check_curr_backtracking(curr);
1331         set_backtracking(curr);
1332         return curr;
1333 }
1334
1335 void ModelExecution::check_curr_backtracking(ModelAction *curr)
1336 {
1337         Node *currnode = curr->get_node();
1338         Node *parnode = currnode->get_parent();
1339
1340         if ((parnode && !parnode->backtrack_empty()) ||
1341                          !currnode->misc_empty() ||
1342                          !currnode->read_from_empty() ||
1343                          !currnode->promise_empty() ||
1344                          !currnode->relseq_break_empty()) {
1345                 set_latest_backtrack(curr);
1346         }
1347 }
1348
1349 bool ModelExecution::promises_expired() const
1350 {
1351         for (unsigned int i = 0; i < promises.size(); i++) {
1352                 Promise *promise = promises[i];
1353                 if (promise->get_expiration() < priv->used_sequence_numbers)
1354                         return true;
1355         }
1356         return false;
1357 }
1358
1359 /**
1360  * This is the strongest feasibility check available.
1361  * @return whether the current trace (partial or complete) must be a prefix of
1362  * a feasible trace.
1363  */
1364 bool ModelExecution::isfeasibleprefix() const
1365 {
1366         return pending_rel_seqs.size() == 0 && is_feasible_prefix_ignore_relseq();
1367 }
1368
1369 /**
1370  * Print disagnostic information about an infeasible execution
1371  * @param prefix A string to prefix the output with; if NULL, then a default
1372  * message prefix will be provided
1373  */
1374 void ModelExecution::print_infeasibility(const char *prefix) const
1375 {
1376         char buf[100];
1377         char *ptr = buf;
1378         if (mo_graph->checkForCycles())
1379                 ptr += sprintf(ptr, "[mo cycle]");
1380         if (priv->failed_promise || priv->hard_failed_promise)
1381                 ptr += sprintf(ptr, "[failed promise]");
1382         if (priv->too_many_reads)
1383                 ptr += sprintf(ptr, "[too many reads]");
1384         if (priv->no_valid_reads)
1385                 ptr += sprintf(ptr, "[no valid reads-from]");
1386         if (priv->bad_synchronization)
1387                 ptr += sprintf(ptr, "[bad sw ordering]");
1388         if (promises_expired())
1389                 ptr += sprintf(ptr, "[promise expired]");
1390         if (promises.size() != 0)
1391                 ptr += sprintf(ptr, "[unresolved promise]");
1392         if (ptr != buf)
1393                 model_print("%s: %s", prefix ? prefix : "Infeasible", buf);
1394 }
1395
1396 /**
1397  * Returns whether the current completed trace is feasible, except for pending
1398  * release sequences.
1399  */
1400 bool ModelExecution::is_feasible_prefix_ignore_relseq() const
1401 {
1402         return !is_infeasible() && promises.size() == 0 && ! priv->failed_promise;
1403
1404 }
1405
1406 /**
1407  * Check if the current partial trace is infeasible. Does not check any
1408  * end-of-execution flags, which might rule out the execution. Thus, this is
1409  * useful only for ruling an execution as infeasible.
1410  * @return whether the current partial trace is infeasible.
1411  */
1412 bool ModelExecution::is_infeasible() const
1413 {
1414         return mo_graph->checkForCycles() ||
1415                 priv->no_valid_reads ||
1416                 priv->too_many_reads ||
1417                 priv->bad_synchronization ||
1418                 priv->hard_failed_promise ||
1419                 promises_expired();
1420 }
1421
1422 /** Close out a RMWR by converting previous RMWR into a RMW or READ. */
1423 ModelAction * ModelExecution::process_rmw(ModelAction *act) {
1424         ModelAction *lastread = get_last_action(act->get_tid());
1425         lastread->process_rmw(act);
1426         if (act->is_rmw()) {
1427                 if (lastread->get_reads_from())
1428                         mo_graph->addRMWEdge(lastread->get_reads_from(), lastread);
1429                 else
1430                         mo_graph->addRMWEdge(lastread->get_reads_from_promise(), lastread);
1431                 mo_graph->commitChanges();
1432         }
1433         return lastread;
1434 }
1435
1436 /**
1437  * A helper function for ModelExecution::check_recency, to check if the current
1438  * thread is able to read from a different write/promise for 'params.maxreads'
1439  * number of steps and if that write/promise should become visible (i.e., is
1440  * ordered later in the modification order). This helps model memory liveness.
1441  *
1442  * @param curr The current action. Must be a read.
1443  * @param rf The write/promise from which we plan to read
1444  * @param other_rf The write/promise from which we may read
1445  * @return True if we were able to read from other_rf for params.maxreads steps
1446  */
1447 template <typename T, typename U>
1448 bool ModelExecution::should_read_instead(const ModelAction *curr, const T *rf, const U *other_rf) const
1449 {
1450         /* Need a different write/promise */
1451         if (other_rf->equals(rf))
1452                 return false;
1453
1454         /* Only look for "newer" writes/promises */
1455         if (!mo_graph->checkReachable(rf, other_rf))
1456                 return false;
1457
1458         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1459         action_list_t *list = &(*thrd_lists)[id_to_int(curr->get_tid())];
1460         action_list_t::reverse_iterator rit = list->rbegin();
1461         ASSERT((*rit) == curr);
1462         /* Skip past curr */
1463         rit++;
1464
1465         /* Does this write/promise work for everyone? */
1466         for (int i = 0; i < params->maxreads; i++, rit++) {
1467                 ModelAction *act = *rit;
1468                 if (!act->may_read_from(other_rf))
1469                         return false;
1470         }
1471         return true;
1472 }
1473
1474 /**
1475  * Checks whether a thread has read from the same write or Promise for too many
1476  * times without seeing the effects of a later write/Promise.
1477  *
1478  * Basic idea:
1479  * 1) there must a different write/promise that we could read from,
1480  * 2) we must have read from the same write/promise in excess of maxreads times,
1481  * 3) that other write/promise must have been in the reads_from set for maxreads times, and
1482  * 4) that other write/promise must be mod-ordered after the write/promise we are reading.
1483  *
1484  * If so, we decide that the execution is no longer feasible.
1485  *
1486  * @param curr The current action. Must be a read.
1487  * @param rf The ModelAction/Promise from which we might read.
1488  * @return True if the read should succeed; false otherwise
1489  */
1490 template <typename T>
1491 bool ModelExecution::check_recency(ModelAction *curr, const T *rf) const
1492 {
1493         if (!params->maxreads)
1494                 return true;
1495
1496         //NOTE: Next check is just optimization, not really necessary....
1497         if (curr->get_node()->get_read_from_past_size() +
1498                         curr->get_node()->get_read_from_promise_size() <= 1)
1499                 return true;
1500
1501         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1502         int tid = id_to_int(curr->get_tid());
1503         ASSERT(tid < (int)thrd_lists->size());
1504         action_list_t *list = &(*thrd_lists)[tid];
1505         action_list_t::reverse_iterator rit = list->rbegin();
1506         ASSERT((*rit) == curr);
1507         /* Skip past curr */
1508         rit++;
1509
1510         action_list_t::reverse_iterator ritcopy = rit;
1511         /* See if we have enough reads from the same value */
1512         for (int count = 0; count < params->maxreads; ritcopy++, count++) {
1513                 if (ritcopy == list->rend())
1514                         return true;
1515                 ModelAction *act = *ritcopy;
1516                 if (!act->is_read())
1517                         return true;
1518                 if (act->get_reads_from_promise() && !act->get_reads_from_promise()->equals(rf))
1519                         return true;
1520                 if (act->get_reads_from() && !act->get_reads_from()->equals(rf))
1521                         return true;
1522                 if (act->get_node()->get_read_from_past_size() +
1523                                 act->get_node()->get_read_from_promise_size() <= 1)
1524                         return true;
1525         }
1526         for (int i = 0; i < curr->get_node()->get_read_from_past_size(); i++) {
1527                 const ModelAction *write = curr->get_node()->get_read_from_past(i);
1528                 if (should_read_instead(curr, rf, write))
1529                         return false; /* liveness failure */
1530         }
1531         for (int i = 0; i < curr->get_node()->get_read_from_promise_size(); i++) {
1532                 const Promise *promise = curr->get_node()->get_read_from_promise(i);
1533                 if (should_read_instead(curr, rf, promise))
1534                         return false; /* liveness failure */
1535         }
1536         return true;
1537 }
1538
1539 /**
1540  * @brief Updates the mo_graph with the constraints imposed from the current
1541  * read.
1542  *
1543  * Basic idea is the following: Go through each other thread and find
1544  * the last action that happened before our read.  Two cases:
1545  *
1546  * -# The action is a write: that write must either occur before
1547  * the write we read from or be the write we read from.
1548  * -# The action is a read: the write that that action read from
1549  * must occur before the write we read from or be the same write.
1550  *
1551  * @param curr The current action. Must be a read.
1552  * @param rf The ModelAction or Promise that curr reads from. Must be a write.
1553  * @return True if modification order edges were added; false otherwise
1554  */
1555 template <typename rf_type>
1556 bool ModelExecution::r_modification_order(ModelAction *curr, const rf_type *rf)
1557 {
1558         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1559         unsigned int i;
1560         bool added = false;
1561         ASSERT(curr->is_read());
1562
1563         /* Last SC fence in the current thread */
1564         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1565         ModelAction *last_sc_write = NULL;
1566         if (curr->is_seqcst())
1567                 last_sc_write = get_last_seq_cst_write(curr);
1568
1569         /* Iterate over all threads */
1570         for (i = 0; i < thrd_lists->size(); i++) {
1571                 /* Last SC fence in thread i */
1572                 ModelAction *last_sc_fence_thread_local = NULL;
1573                 if (int_to_id((int)i) != curr->get_tid())
1574                         last_sc_fence_thread_local = get_last_seq_cst_fence(int_to_id(i), NULL);
1575
1576                 /* Last SC fence in thread i, before last SC fence in current thread */
1577                 ModelAction *last_sc_fence_thread_before = NULL;
1578                 if (last_sc_fence_local)
1579                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1580
1581                 /* Iterate over actions in thread, starting from most recent */
1582                 action_list_t *list = &(*thrd_lists)[i];
1583                 action_list_t::reverse_iterator rit;
1584                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1585                         ModelAction *act = *rit;
1586
1587                         /* Skip curr */
1588                         if (act == curr)
1589                                 continue;
1590                         /* Don't want to add reflexive edges on 'rf' */
1591                         if (act->equals(rf)) {
1592                                 if (act->happens_before(curr))
1593                                         break;
1594                                 else
1595                                         continue;
1596                         }
1597
1598                         if (act->is_write()) {
1599                                 /* C++, Section 29.3 statement 5 */
1600                                 if (curr->is_seqcst() && last_sc_fence_thread_local &&
1601                                                 *act < *last_sc_fence_thread_local) {
1602                                         added = mo_graph->addEdge(act, rf) || added;
1603                                         break;
1604                                 }
1605                                 /* C++, Section 29.3 statement 4 */
1606                                 else if (act->is_seqcst() && last_sc_fence_local &&
1607                                                 *act < *last_sc_fence_local) {
1608                                         added = mo_graph->addEdge(act, rf) || added;
1609                                         break;
1610                                 }
1611                                 /* C++, Section 29.3 statement 6 */
1612                                 else if (last_sc_fence_thread_before &&
1613                                                 *act < *last_sc_fence_thread_before) {
1614                                         added = mo_graph->addEdge(act, rf) || added;
1615                                         break;
1616                                 }
1617                         }
1618
1619                         /* C++, Section 29.3 statement 3 (second subpoint) */
1620                         if (curr->is_seqcst() && last_sc_write && act == last_sc_write) {
1621                                 added = mo_graph->addEdge(act, rf) || added;
1622                                 break;
1623                         }
1624
1625                         /*
1626                          * Include at most one act per-thread that "happens
1627                          * before" curr
1628                          */
1629                         if (act->happens_before(curr)) {
1630                                 if (act->is_write()) {
1631                                         added = mo_graph->addEdge(act, rf) || added;
1632                                 } else {
1633                                         const ModelAction *prevrf = act->get_reads_from();
1634                                         const Promise *prevrf_promise = act->get_reads_from_promise();
1635                                         if (prevrf) {
1636                                                 if (!prevrf->equals(rf))
1637                                                         added = mo_graph->addEdge(prevrf, rf) || added;
1638                                         } else if (!prevrf_promise->equals(rf)) {
1639                                                 added = mo_graph->addEdge(prevrf_promise, rf) || added;
1640                                         }
1641                                 }
1642                                 break;
1643                         }
1644                 }
1645         }
1646
1647         /*
1648          * All compatible, thread-exclusive promises must be ordered after any
1649          * concrete loads from the same thread
1650          */
1651         for (unsigned int i = 0; i < promises.size(); i++)
1652                 if (promises[i]->is_compatible_exclusive(curr))
1653                         added = mo_graph->addEdge(rf, promises[i]) || added;
1654
1655         return added;
1656 }
1657
1658 /**
1659  * Updates the mo_graph with the constraints imposed from the current write.
1660  *
1661  * Basic idea is the following: Go through each other thread and find
1662  * the lastest action that happened before our write.  Two cases:
1663  *
1664  * (1) The action is a write => that write must occur before
1665  * the current write
1666  *
1667  * (2) The action is a read => the write that that action read from
1668  * must occur before the current write.
1669  *
1670  * This method also handles two other issues:
1671  *
1672  * (I) Sequential Consistency: Making sure that if the current write is
1673  * seq_cst, that it occurs after the previous seq_cst write.
1674  *
1675  * (II) Sending the write back to non-synchronizing reads.
1676  *
1677  * @param curr The current action. Must be a write.
1678  * @param send_fv A vector for stashing reads to which we may pass our future
1679  * value. If NULL, then don't record any future values.
1680  * @return True if modification order edges were added; false otherwise
1681  */
1682 bool ModelExecution::w_modification_order(ModelAction *curr, ModelVector<ModelAction *> *send_fv)
1683 {
1684         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
1685         unsigned int i;
1686         bool added = false;
1687         ASSERT(curr->is_write());
1688
1689         if (curr->is_seqcst()) {
1690                 /* We have to at least see the last sequentially consistent write,
1691                          so we are initialized. */
1692                 ModelAction *last_seq_cst = get_last_seq_cst_write(curr);
1693                 if (last_seq_cst != NULL) {
1694                         added = mo_graph->addEdge(last_seq_cst, curr) || added;
1695                 }
1696         }
1697
1698         /* Last SC fence in the current thread */
1699         ModelAction *last_sc_fence_local = get_last_seq_cst_fence(curr->get_tid(), NULL);
1700
1701         /* Iterate over all threads */
1702         for (i = 0; i < thrd_lists->size(); i++) {
1703                 /* Last SC fence in thread i, before last SC fence in current thread */
1704                 ModelAction *last_sc_fence_thread_before = NULL;
1705                 if (last_sc_fence_local && int_to_id((int)i) != curr->get_tid())
1706                         last_sc_fence_thread_before = get_last_seq_cst_fence(int_to_id(i), last_sc_fence_local);
1707
1708                 /* Iterate over actions in thread, starting from most recent */
1709                 action_list_t *list = &(*thrd_lists)[i];
1710                 action_list_t::reverse_iterator rit;
1711                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1712                         ModelAction *act = *rit;
1713                         if (act == curr) {
1714                                 /*
1715                                  * 1) If RMW and it actually read from something, then we
1716                                  * already have all relevant edges, so just skip to next
1717                                  * thread.
1718                                  *
1719                                  * 2) If RMW and it didn't read from anything, we should
1720                                  * whatever edge we can get to speed up convergence.
1721                                  *
1722                                  * 3) If normal write, we need to look at earlier actions, so
1723                                  * continue processing list.
1724                                  */
1725                                 if (curr->is_rmw()) {
1726                                         if (curr->get_reads_from() != NULL)
1727                                                 break;
1728                                         else
1729                                                 continue;
1730                                 } else
1731                                         continue;
1732                         }
1733
1734                         /* C++, Section 29.3 statement 7 */
1735                         if (last_sc_fence_thread_before && act->is_write() &&
1736                                         *act < *last_sc_fence_thread_before) {
1737                                 added = mo_graph->addEdge(act, curr) || added;
1738                                 break;
1739                         }
1740
1741                         /*
1742                          * Include at most one act per-thread that "happens
1743                          * before" curr
1744                          */
1745                         if (act->happens_before(curr)) {
1746                                 /*
1747                                  * Note: if act is RMW, just add edge:
1748                                  *   act --mo--> curr
1749                                  * The following edge should be handled elsewhere:
1750                                  *   readfrom(act) --mo--> act
1751                                  */
1752                                 if (act->is_write())
1753                                         added = mo_graph->addEdge(act, curr) || added;
1754                                 else if (act->is_read()) {
1755                                         //if previous read accessed a null, just keep going
1756                                         if (act->get_reads_from() == NULL)
1757                                                 continue;
1758                                         added = mo_graph->addEdge(act->get_reads_from(), curr) || added;
1759                                 }
1760                                 break;
1761                         } else if (act->is_read() && !act->could_synchronize_with(curr) &&
1762                                                      !act->same_thread(curr)) {
1763                                 /* We have an action that:
1764                                    (1) did not happen before us
1765                                    (2) is a read and we are a write
1766                                    (3) cannot synchronize with us
1767                                    (4) is in a different thread
1768                                    =>
1769                                    that read could potentially read from our write.  Note that
1770                                    these checks are overly conservative at this point, we'll
1771                                    do more checks before actually removing the
1772                                    pendingfuturevalue.
1773
1774                                  */
1775
1776                                 if (send_fv && thin_air_constraint_may_allow(curr, act) && check_coherence_promise(curr, act)) {
1777                                         if (!is_infeasible())
1778                                                 send_fv->push_back(act);
1779                                         else if (curr->is_rmw() && act->is_rmw() && curr->get_reads_from() && curr->get_reads_from() == act->get_reads_from())
1780                                                 add_future_value(curr, act);
1781                                 }
1782                         }
1783                 }
1784         }
1785
1786         /*
1787          * All compatible, thread-exclusive promises must be ordered after any
1788          * concrete stores to the same thread, or else they can be merged with
1789          * this store later
1790          */
1791         for (unsigned int i = 0; i < promises.size(); i++)
1792                 if (promises[i]->is_compatible_exclusive(curr))
1793                         added = mo_graph->addEdge(curr, promises[i]) || added;
1794
1795         return added;
1796 }
1797
1798 //This procedure uses cohere to prune future values that are
1799 //guaranteed to generate a coherence violation.
1800 //
1801 //need to see if there is (1) a promise for thread write, (2)
1802 //the promise is sb before write, (3) the promise can only be
1803 //resolved by the thread read, and (4) the promise has same
1804 //location as read/write
1805
1806 bool ModelExecution::check_coherence_promise(const ModelAction * write, const ModelAction *read) {
1807         thread_id_t write_tid=write->get_tid();
1808         for(unsigned int i = promises.size(); i>0; i--) {
1809                 Promise *pr=promises[i-1];
1810                 if (!pr->same_location(write))
1811                         continue;
1812                 //the reading thread is the only thread that can resolve the promise
1813                 if (pr->get_num_was_available_threads()==1 && pr->thread_was_available(read->get_tid())) {
1814                         for(unsigned int j=0;j<pr->get_num_readers();j++) {
1815                                 ModelAction *prreader=pr->get_reader(j);
1816                                 //the writing thread reads from the promise before the write
1817                                 if (prreader->get_tid()==write_tid &&
1818                                                 (*prreader)<(*write)) {
1819                                         if ((*read)>(*prreader)) {
1820                                                 //check that we don't have a read between the read and promise
1821                                                 //from the same thread as read
1822                                                 bool okay=false;
1823                                                 for(const ModelAction *tmp=read;tmp!=prreader;) {
1824                                                         tmp=tmp->get_node()->get_parent()->get_action();
1825                                                         if (tmp->is_read() && tmp->same_thread(read)) {
1826                                                                 okay=true;
1827                                                                 break;
1828                                                         }
1829                                                 }
1830                                                 if (okay)
1831                                                         continue;
1832                                         }
1833                                         return false;
1834                                 }
1835                         }
1836                 }
1837         }
1838         return true;
1839 }
1840
1841
1842 /** Arbitrary reads from the future are not allowed.  Section 29.3
1843  * part 9 places some constraints.  This method checks one result of constraint
1844  * constraint.  Others require compiler support. */
1845 bool ModelExecution::thin_air_constraint_may_allow(const ModelAction *writer, const ModelAction *reader) const
1846 {
1847         if (!writer->is_rmw())
1848                 return true;
1849
1850         if (!reader->is_rmw())
1851                 return true;
1852
1853         for (const ModelAction *search = writer->get_reads_from(); search != NULL; search = search->get_reads_from()) {
1854                 if (search == reader)
1855                         return false;
1856                 if (search->get_tid() == reader->get_tid() &&
1857                                 search->happens_before(reader))
1858                         break;
1859         }
1860
1861         return true;
1862 }
1863
1864 /**
1865  * Arbitrary reads from the future are not allowed. Section 29.3 part 9 places
1866  * some constraints. This method checks one the following constraint (others
1867  * require compiler support):
1868  *
1869  *   If X --hb-> Y --mo-> Z, then X should not read from Z.
1870  *   If X --hb-> Y, A --rf-> Y, and A --mo-> Z, then X should not read from Z.
1871  */
1872 bool ModelExecution::mo_may_allow(const ModelAction *writer, const ModelAction *reader)
1873 {
1874         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(reader->get_location());
1875         unsigned int i;
1876         /* Iterate over all threads */
1877         for (i = 0; i < thrd_lists->size(); i++) {
1878                 const ModelAction *write_after_read = NULL;
1879
1880                 /* Iterate over actions in thread, starting from most recent */
1881                 action_list_t *list = &(*thrd_lists)[i];
1882                 action_list_t::reverse_iterator rit;
1883                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
1884                         ModelAction *act = *rit;
1885
1886                         /* Don't disallow due to act == reader */
1887                         if (!reader->happens_before(act) || reader == act)
1888                                 break;
1889                         else if (act->is_write())
1890                                 write_after_read = act;
1891                         else if (act->is_read() && act->get_reads_from() != NULL)
1892                                 write_after_read = act->get_reads_from();
1893                 }
1894
1895                 if (write_after_read && write_after_read != writer && mo_graph->checkReachable(write_after_read, writer))
1896                         return false;
1897         }
1898         return true;
1899 }
1900
1901 /**
1902  * Finds the head(s) of the release sequence(s) containing a given ModelAction.
1903  * The ModelAction under consideration is expected to be taking part in
1904  * release/acquire synchronization as an object of the "reads from" relation.
1905  * Note that this can only provide release sequence support for RMW chains
1906  * which do not read from the future, as those actions cannot be traced until
1907  * their "promise" is fulfilled. Similarly, we may not even establish the
1908  * presence of a release sequence with certainty, as some modification order
1909  * constraints may be decided further in the future. Thus, this function
1910  * "returns" two pieces of data: a pass-by-reference vector of @a release_heads
1911  * and a boolean representing certainty.
1912  *
1913  * @param rf The action that might be part of a release sequence. Must be a
1914  * write.
1915  * @param release_heads A pass-by-reference style return parameter. After
1916  * execution of this function, release_heads will contain the heads of all the
1917  * relevant release sequences, if any exists with certainty
1918  * @param pending A pass-by-reference style return parameter which is only used
1919  * when returning false (i.e., uncertain). Returns most information regarding
1920  * an uncertain release sequence, including any write operations that might
1921  * break the sequence.
1922  * @return true, if the ModelExecution is certain that release_heads is complete;
1923  * false otherwise
1924  */
1925 bool ModelExecution::release_seq_heads(const ModelAction *rf,
1926                 rel_heads_list_t *release_heads,
1927                 struct release_seq *pending) const
1928 {
1929         /* Only check for release sequences if there are no cycles */
1930         if (mo_graph->checkForCycles())
1931                 return false;
1932
1933         for ( ; rf != NULL; rf = rf->get_reads_from()) {
1934                 ASSERT(rf->is_write());
1935
1936                 if (rf->is_release())
1937                         release_heads->push_back(rf);
1938                 else if (rf->get_last_fence_release())
1939                         release_heads->push_back(rf->get_last_fence_release());
1940                 if (!rf->is_rmw())
1941                         break; /* End of RMW chain */
1942
1943                 /** @todo Need to be smarter here...  In the linux lock
1944                  * example, this will run to the beginning of the program for
1945                  * every acquire. */
1946                 /** @todo The way to be smarter here is to keep going until 1
1947                  * thread has a release preceded by an acquire and you've seen
1948                  *       both. */
1949
1950                 /* acq_rel RMW is a sufficient stopping condition */
1951                 if (rf->is_acquire() && rf->is_release())
1952                         return true; /* complete */
1953         };
1954         if (!rf) {
1955                 /* read from future: need to settle this later */
1956                 pending->rf = NULL;
1957                 return false; /* incomplete */
1958         }
1959
1960         if (rf->is_release())
1961                 return true; /* complete */
1962
1963         /* else relaxed write
1964          * - check for fence-release in the same thread (29.8, stmt. 3)
1965          * - check modification order for contiguous subsequence
1966          *   -> rf must be same thread as release */
1967
1968         const ModelAction *fence_release = rf->get_last_fence_release();
1969         /* Synchronize with a fence-release unconditionally; we don't need to
1970          * find any more "contiguous subsequence..." for it */
1971         if (fence_release)
1972                 release_heads->push_back(fence_release);
1973
1974         int tid = id_to_int(rf->get_tid());
1975         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(rf->get_location());
1976         action_list_t *list = &(*thrd_lists)[tid];
1977         action_list_t::const_reverse_iterator rit;
1978
1979         /* Find rf in the thread list */
1980         rit = std::find(list->rbegin(), list->rend(), rf);
1981         ASSERT(rit != list->rend());
1982
1983         /* Find the last {write,fence}-release */
1984         for (; rit != list->rend(); rit++) {
1985                 if (fence_release && *(*rit) < *fence_release)
1986                         break;
1987                 if ((*rit)->is_release())
1988                         break;
1989         }
1990         if (rit == list->rend()) {
1991                 /* No write-release in this thread */
1992                 return true; /* complete */
1993         } else if (fence_release && *(*rit) < *fence_release) {
1994                 /* The fence-release is more recent (and so, "stronger") than
1995                  * the most recent write-release */
1996                 return true; /* complete */
1997         } /* else, need to establish contiguous release sequence */
1998         ModelAction *release = *rit;
1999
2000         ASSERT(rf->same_thread(release));
2001
2002         pending->writes.clear();
2003
2004         bool certain = true;
2005         for (unsigned int i = 0; i < thrd_lists->size(); i++) {
2006                 if (id_to_int(rf->get_tid()) == (int)i)
2007                         continue;
2008                 list = &(*thrd_lists)[i];
2009
2010                 /* Can we ensure no future writes from this thread may break
2011                  * the release seq? */
2012                 bool future_ordered = false;
2013
2014                 ModelAction *last = get_last_action(int_to_id(i));
2015                 Thread *th = get_thread(int_to_id(i));
2016                 if ((last && rf->happens_before(last)) ||
2017                                 !is_enabled(th) ||
2018                                 th->is_complete())
2019                         future_ordered = true;
2020
2021                 ASSERT(!th->is_model_thread() || future_ordered);
2022
2023                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2024                         const ModelAction *act = *rit;
2025                         /* Reach synchronization -> this thread is complete */
2026                         if (act->happens_before(release))
2027                                 break;
2028                         if (rf->happens_before(act)) {
2029                                 future_ordered = true;
2030                                 continue;
2031                         }
2032
2033                         /* Only non-RMW writes can break release sequences */
2034                         if (!act->is_write() || act->is_rmw())
2035                                 continue;
2036
2037                         /* Check modification order */
2038                         if (mo_graph->checkReachable(rf, act)) {
2039                                 /* rf --mo--> act */
2040                                 future_ordered = true;
2041                                 continue;
2042                         }
2043                         if (mo_graph->checkReachable(act, release))
2044                                 /* act --mo--> release */
2045                                 break;
2046                         if (mo_graph->checkReachable(release, act) &&
2047                                       mo_graph->checkReachable(act, rf)) {
2048                                 /* release --mo-> act --mo--> rf */
2049                                 return true; /* complete */
2050                         }
2051                         /* act may break release sequence */
2052                         pending->writes.push_back(act);
2053                         certain = false;
2054                 }
2055                 if (!future_ordered)
2056                         certain = false; /* This thread is uncertain */
2057         }
2058
2059         if (certain) {
2060                 release_heads->push_back(release);
2061                 pending->writes.clear();
2062         } else {
2063                 pending->release = release;
2064                 pending->rf = rf;
2065         }
2066         return certain;
2067 }
2068
2069 /**
2070  * An interface for getting the release sequence head(s) with which a
2071  * given ModelAction must synchronize. This function only returns a non-empty
2072  * result when it can locate a release sequence head with certainty. Otherwise,
2073  * it may mark the internal state of the ModelExecution so that it will handle
2074  * the release sequence at a later time, causing @a acquire to update its
2075  * synchronization at some later point in execution.
2076  *
2077  * @param acquire The 'acquire' action that may synchronize with a release
2078  * sequence
2079  * @param read The read action that may read from a release sequence; this may
2080  * be the same as acquire, or else an earlier action in the same thread (i.e.,
2081  * when 'acquire' is a fence-acquire)
2082  * @param release_heads A pass-by-reference return parameter. Will be filled
2083  * with the head(s) of the release sequence(s), if they exists with certainty.
2084  * @see ModelExecution::release_seq_heads
2085  */
2086 void ModelExecution::get_release_seq_heads(ModelAction *acquire,
2087                 ModelAction *read, rel_heads_list_t *release_heads)
2088 {
2089         const ModelAction *rf = read->get_reads_from();
2090         struct release_seq *sequence = (struct release_seq *)snapshot_calloc(1, sizeof(struct release_seq));
2091         sequence->acquire = acquire;
2092         sequence->read = read;
2093
2094         if (!release_seq_heads(rf, release_heads, sequence)) {
2095                 /* add act to 'lazy checking' list */
2096                 pending_rel_seqs.push_back(sequence);
2097         } else {
2098                 snapshot_free(sequence);
2099         }
2100 }
2101
2102 /**
2103  * @brief Propagate a modified clock vector to actions later in the execution
2104  * order
2105  *
2106  * After an acquire operation lazily completes a release-sequence
2107  * synchronization, we must update all clock vectors for operations later than
2108  * the acquire in the execution order.
2109  *
2110  * @param acquire The ModelAction whose clock vector must be propagated
2111  * @param work The work queue to which we can add work items, if this
2112  * propagation triggers more updates (e.g., to the modification order)
2113  */
2114 void ModelExecution::propagate_clockvector(ModelAction *acquire, work_queue_t *work)
2115 {
2116         /* Re-check all pending release sequences */
2117         work->push_back(CheckRelSeqWorkEntry(NULL));
2118         /* Re-check read-acquire for mo_graph edges */
2119         work->push_back(MOEdgeWorkEntry(acquire));
2120
2121         /* propagate synchronization to later actions */
2122         action_list_t::reverse_iterator rit = action_trace.rbegin();
2123         for (; (*rit) != acquire; rit++) {
2124                 ModelAction *propagate = *rit;
2125                 if (acquire->happens_before(propagate)) {
2126                         synchronize(acquire, propagate);
2127                         /* Re-check 'propagate' for mo_graph edges */
2128                         work->push_back(MOEdgeWorkEntry(propagate));
2129                 }
2130         }
2131 }
2132
2133 /**
2134  * Attempt to resolve all stashed operations that might synchronize with a
2135  * release sequence for a given location. This implements the "lazy" portion of
2136  * determining whether or not a release sequence was contiguous, since not all
2137  * modification order information is present at the time an action occurs.
2138  *
2139  * @param location The location/object that should be checked for release
2140  * sequence resolutions. A NULL value means to check all locations.
2141  * @param work_queue The work queue to which to add work items as they are
2142  * generated
2143  * @return True if any updates occurred (new synchronization, new mo_graph
2144  * edges)
2145  */
2146 bool ModelExecution::resolve_release_sequences(void *location, work_queue_t *work_queue)
2147 {
2148         bool updated = false;
2149         SnapVector<struct release_seq *>::iterator it = pending_rel_seqs.begin();
2150         while (it != pending_rel_seqs.end()) {
2151                 struct release_seq *pending = *it;
2152                 ModelAction *acquire = pending->acquire;
2153                 const ModelAction *read = pending->read;
2154
2155                 /* Only resolve sequences on the given location, if provided */
2156                 if (location && read->get_location() != location) {
2157                         it++;
2158                         continue;
2159                 }
2160
2161                 const ModelAction *rf = read->get_reads_from();
2162                 rel_heads_list_t release_heads;
2163                 bool complete;
2164                 complete = release_seq_heads(rf, &release_heads, pending);
2165                 for (unsigned int i = 0; i < release_heads.size(); i++)
2166                         if (!acquire->has_synchronized_with(release_heads[i]))
2167                                 if (synchronize(release_heads[i], acquire))
2168                                         updated = true;
2169
2170                 if (updated) {
2171                         /* Propagate the changed clock vector */
2172                         propagate_clockvector(acquire, work_queue);
2173                 }
2174                 if (complete) {
2175                         it = pending_rel_seqs.erase(it);
2176                         snapshot_free(pending);
2177                 } else {
2178                         it++;
2179                 }
2180         }
2181
2182         // If we resolved promises or data races, see if we have realized a data race.
2183         checkDataRaces();
2184
2185         return updated;
2186 }
2187
2188 /**
2189  * Performs various bookkeeping operations for the current ModelAction. For
2190  * instance, adds action to the per-object, per-thread action vector and to the
2191  * action trace list of all thread actions.
2192  *
2193  * @param act is the ModelAction to add.
2194  */
2195 void ModelExecution::add_action_to_lists(ModelAction *act)
2196 {
2197         int tid = id_to_int(act->get_tid());
2198         ModelAction *uninit = NULL;
2199         int uninit_id = -1;
2200         action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
2201         if (list->empty() && act->is_atomic_var()) {
2202                 uninit = get_uninitialized_action(act);
2203                 uninit_id = id_to_int(uninit->get_tid());
2204                 list->push_front(uninit);
2205         }
2206         list->push_back(act);
2207
2208         action_trace.push_back(act);
2209         if (uninit)
2210                 action_trace.push_front(uninit);
2211
2212         SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
2213         if (tid >= (int)vec->size())
2214                 vec->resize(priv->next_thread_id);
2215         (*vec)[tid].push_back(act);
2216         if (uninit)
2217                 (*vec)[uninit_id].push_front(uninit);
2218
2219         if ((int)thrd_last_action.size() <= tid)
2220                 thrd_last_action.resize(get_num_threads());
2221         thrd_last_action[tid] = act;
2222         if (uninit)
2223                 thrd_last_action[uninit_id] = uninit;
2224
2225         if (act->is_fence() && act->is_release()) {
2226                 if ((int)thrd_last_fence_release.size() <= tid)
2227                         thrd_last_fence_release.resize(get_num_threads());
2228                 thrd_last_fence_release[tid] = act;
2229         }
2230
2231         if (act->is_wait()) {
2232                 void *mutex_loc = (void *) act->get_value();
2233                 get_safe_ptr_action(&obj_map, mutex_loc)->push_back(act);
2234
2235                 SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, mutex_loc);
2236                 if (tid >= (int)vec->size())
2237                         vec->resize(priv->next_thread_id);
2238                 (*vec)[tid].push_back(act);
2239         }
2240 }
2241
2242 /**
2243  * @brief Get the last action performed by a particular Thread
2244  * @param tid The thread ID of the Thread in question
2245  * @return The last action in the thread
2246  */
2247 ModelAction * ModelExecution::get_last_action(thread_id_t tid) const
2248 {
2249         int threadid = id_to_int(tid);
2250         if (threadid < (int)thrd_last_action.size())
2251                 return thrd_last_action[id_to_int(tid)];
2252         else
2253                 return NULL;
2254 }
2255
2256 /**
2257  * @brief Get the last fence release performed by a particular Thread
2258  * @param tid The thread ID of the Thread in question
2259  * @return The last fence release in the thread, if one exists; NULL otherwise
2260  */
2261 ModelAction * ModelExecution::get_last_fence_release(thread_id_t tid) const
2262 {
2263         int threadid = id_to_int(tid);
2264         if (threadid < (int)thrd_last_fence_release.size())
2265                 return thrd_last_fence_release[id_to_int(tid)];
2266         else
2267                 return NULL;
2268 }
2269
2270 /**
2271  * Gets the last memory_order_seq_cst write (in the total global sequence)
2272  * performed on a particular object (i.e., memory location), not including the
2273  * current action.
2274  * @param curr The current ModelAction; also denotes the object location to
2275  * check
2276  * @return The last seq_cst write
2277  */
2278 ModelAction * ModelExecution::get_last_seq_cst_write(ModelAction *curr) const
2279 {
2280         void *location = curr->get_location();
2281         action_list_t *list = obj_map.get(location);
2282         /* Find: max({i in dom(S) | seq_cst(t_i) && isWrite(t_i) && samevar(t_i, t)}) */
2283         action_list_t::reverse_iterator rit;
2284         for (rit = list->rbegin(); (*rit) != curr; rit++)
2285                 ;
2286         rit++; /* Skip past curr */
2287         for ( ; rit != list->rend(); rit++)
2288                 if ((*rit)->is_write() && (*rit)->is_seqcst())
2289                         return *rit;
2290         return NULL;
2291 }
2292
2293 /**
2294  * Gets the last memory_order_seq_cst fence (in the total global sequence)
2295  * performed in a particular thread, prior to a particular fence.
2296  * @param tid The ID of the thread to check
2297  * @param before_fence The fence from which to begin the search; if NULL, then
2298  * search for the most recent fence in the thread.
2299  * @return The last prior seq_cst fence in the thread, if exists; otherwise, NULL
2300  */
2301 ModelAction * ModelExecution::get_last_seq_cst_fence(thread_id_t tid, const ModelAction *before_fence) const
2302 {
2303         /* All fences should have location FENCE_LOCATION */
2304         action_list_t *list = obj_map.get(FENCE_LOCATION);
2305
2306         if (!list)
2307                 return NULL;
2308
2309         action_list_t::reverse_iterator rit = list->rbegin();
2310
2311         if (before_fence) {
2312                 for (; rit != list->rend(); rit++)
2313                         if (*rit == before_fence)
2314                                 break;
2315
2316                 ASSERT(*rit == before_fence);
2317                 rit++;
2318         }
2319
2320         for (; rit != list->rend(); rit++)
2321                 if ((*rit)->is_fence() && (tid == (*rit)->get_tid()) && (*rit)->is_seqcst())
2322                         return *rit;
2323         return NULL;
2324 }
2325
2326 /**
2327  * Gets the last unlock operation performed on a particular mutex (i.e., memory
2328  * location). This function identifies the mutex according to the current
2329  * action, which is presumed to perform on the same mutex.
2330  * @param curr The current ModelAction; also denotes the object location to
2331  * check
2332  * @return The last unlock operation
2333  */
2334 ModelAction * ModelExecution::get_last_unlock(ModelAction *curr) const
2335 {
2336         void *location = curr->get_location();
2337         action_list_t *list = obj_map.get(location);
2338         /* Find: max({i in dom(S) | isUnlock(t_i) && samevar(t_i, t)}) */
2339         action_list_t::reverse_iterator rit;
2340         for (rit = list->rbegin(); rit != list->rend(); rit++)
2341                 if ((*rit)->is_unlock() || (*rit)->is_wait())
2342                         return *rit;
2343         return NULL;
2344 }
2345
2346 ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
2347 {
2348         ModelAction *parent = get_last_action(tid);
2349         if (!parent)
2350                 parent = get_thread(tid)->get_creation();
2351         return parent;
2352 }
2353
2354 /**
2355  * Returns the clock vector for a given thread.
2356  * @param tid The thread whose clock vector we want
2357  * @return Desired clock vector
2358  */
2359 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
2360 {
2361         return get_parent_action(tid)->get_cv();
2362 }
2363
2364 /**
2365  * @brief Find the promise (if any) to resolve for the current action and
2366  * remove it from the pending promise vector
2367  * @param curr The current ModelAction. Should be a write.
2368  * @return The Promise to resolve, if any; otherwise NULL
2369  */
2370 Promise * ModelExecution::pop_promise_to_resolve(const ModelAction *curr)
2371 {
2372         for (unsigned int i = 0; i < promises.size(); i++)
2373                 if (curr->get_node()->get_promise(i)) {
2374                         Promise *ret = promises[i];
2375                         promises.erase(promises.begin() + i);
2376                         return ret;
2377                 }
2378         return NULL;
2379 }
2380
2381 /**
2382  * Resolve a Promise with a current write.
2383  * @param write The ModelAction that is fulfilling Promises
2384  * @param promise The Promise to resolve
2385  * @param work The work queue, for adding new fixup work
2386  * @return True if the Promise was successfully resolved; false otherwise
2387  */
2388 bool ModelExecution::resolve_promise(ModelAction *write, Promise *promise,
2389                 work_queue_t *work)
2390 {
2391         ModelVector<ModelAction *> actions_to_check;
2392
2393         for (unsigned int i = 0; i < promise->get_num_readers(); i++) {
2394                 ModelAction *read = promise->get_reader(i);
2395                 if (read_from(read, write)) {
2396                         /* Propagate the changed clock vector */
2397                         propagate_clockvector(read, work);
2398                 }
2399                 actions_to_check.push_back(read);
2400         }
2401         /* Make sure the promise's value matches the write's value */
2402         ASSERT(promise->is_compatible(write) && promise->same_value(write));
2403         if (!mo_graph->resolvePromise(promise, write))
2404                 priv->hard_failed_promise = true;
2405
2406         /**
2407          * @todo  It is possible to end up in an inconsistent state, where a
2408          * "resolved" promise may still be referenced if
2409          * CycleGraph::resolvePromise() failed, so don't delete 'promise'.
2410          *
2411          * Note that the inconsistency only matters when dumping mo_graph to
2412          * file.
2413          *
2414          * delete promise;
2415          */
2416
2417         //Check whether reading these writes has made threads unable to
2418         //resolve promises
2419         for (unsigned int i = 0; i < actions_to_check.size(); i++) {
2420                 ModelAction *read = actions_to_check[i];
2421                 mo_check_promises(read, true);
2422         }
2423
2424         return true;
2425 }
2426
2427 /**
2428  * Compute the set of promises that could potentially be satisfied by this
2429  * action. Note that the set computation actually appears in the Node, not in
2430  * ModelExecution.
2431  * @param curr The ModelAction that may satisfy promises
2432  */
2433 void ModelExecution::compute_promises(ModelAction *curr)
2434 {
2435         for (unsigned int i = 0; i < promises.size(); i++) {
2436                 Promise *promise = promises[i];
2437                 if (!promise->is_compatible(curr) || !promise->same_value(curr))
2438                         continue;
2439
2440                 bool satisfy = true;
2441                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2442                         const ModelAction *act = promise->get_reader(j);
2443                         if (act->happens_before(curr) ||
2444                                         act->could_synchronize_with(curr)) {
2445                                 satisfy = false;
2446                                 break;
2447                         }
2448                 }
2449                 if (satisfy)
2450                         curr->get_node()->set_promise(i);
2451         }
2452 }
2453
2454 /** Checks promises in response to change in ClockVector Threads. */
2455 void ModelExecution::check_promises(thread_id_t tid, ClockVector *old_cv, ClockVector *merge_cv)
2456 {
2457         for (unsigned int i = 0; i < promises.size(); i++) {
2458                 Promise *promise = promises[i];
2459                 if (!promise->thread_is_available(tid))
2460                         continue;
2461                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2462                         const ModelAction *act = promise->get_reader(j);
2463                         if ((!old_cv || !old_cv->synchronized_since(act)) &&
2464                                         merge_cv->synchronized_since(act)) {
2465                                 if (promise->eliminate_thread(tid)) {
2466                                         /* Promise has failed */
2467                                         priv->failed_promise = true;
2468                                         return;
2469                                 }
2470                         }
2471                 }
2472         }
2473 }
2474
2475 void ModelExecution::check_promises_thread_disabled()
2476 {
2477         for (unsigned int i = 0; i < promises.size(); i++) {
2478                 Promise *promise = promises[i];
2479                 if (promise->has_failed()) {
2480                         priv->failed_promise = true;
2481                         return;
2482                 }
2483         }
2484 }
2485
2486 /**
2487  * @brief Checks promises in response to addition to modification order for
2488  * threads.
2489  *
2490  * We test whether threads are still available for satisfying promises after an
2491  * addition to our modification order constraints. Those that are unavailable
2492  * are "eliminated". Once all threads are eliminated from satisfying a promise,
2493  * that promise has failed.
2494  *
2495  * @param act The ModelAction which updated the modification order
2496  * @param is_read_check Should be true if act is a read and we must check for
2497  * updates to the store from which it read (there is a distinction here for
2498  * RMW's, which are both a load and a store)
2499  */
2500 void ModelExecution::mo_check_promises(const ModelAction *act, bool is_read_check)
2501 {
2502         const ModelAction *write = is_read_check ? act->get_reads_from() : act;
2503
2504         for (unsigned int i = 0; i < promises.size(); i++) {
2505                 Promise *promise = promises[i];
2506
2507                 // Is this promise on the same location?
2508                 if (!promise->same_location(write))
2509                         continue;
2510
2511                 for (unsigned int j = 0; j < promise->get_num_readers(); j++) {
2512                         const ModelAction *pread = promise->get_reader(j);
2513                         if (!pread->happens_before(act))
2514                                continue;
2515                         if (mo_graph->checkPromise(write, promise)) {
2516                                 priv->hard_failed_promise = true;
2517                                 return;
2518                         }
2519                         break;
2520                 }
2521
2522                 // Don't do any lookups twice for the same thread
2523                 if (!promise->thread_is_available(act->get_tid()))
2524                         continue;
2525
2526                 if (mo_graph->checkReachable(promise, write)) {
2527                         if (mo_graph->checkPromise(write, promise)) {
2528                                 priv->hard_failed_promise = true;
2529                                 return;
2530                         }
2531                 }
2532         }
2533 }
2534
2535 /**
2536  * Compute the set of writes that may break the current pending release
2537  * sequence. This information is extracted from previou release sequence
2538  * calculations.
2539  *
2540  * @param curr The current ModelAction. Must be a release sequence fixup
2541  * action.
2542  */
2543 void ModelExecution::compute_relseq_breakwrites(ModelAction *curr)
2544 {
2545         if (pending_rel_seqs.empty())
2546                 return;
2547
2548         struct release_seq *pending = pending_rel_seqs.back();
2549         for (unsigned int i = 0; i < pending->writes.size(); i++) {
2550                 const ModelAction *write = pending->writes[i];
2551                 curr->get_node()->add_relseq_break(write);
2552         }
2553
2554         /* NULL means don't break the sequence; just synchronize */
2555         curr->get_node()->add_relseq_break(NULL);
2556 }
2557
2558 /**
2559  * Build up an initial set of all past writes that this 'read' action may read
2560  * from, as well as any previously-observed future values that must still be valid.
2561  *
2562  * @param curr is the current ModelAction that we are exploring; it must be a
2563  * 'read' operation.
2564  */
2565 void ModelExecution::build_may_read_from(ModelAction *curr)
2566 {
2567         SnapVector<action_list_t> *thrd_lists = obj_thrd_map.get(curr->get_location());
2568         unsigned int i;
2569         ASSERT(curr->is_read());
2570
2571         ModelAction *last_sc_write = NULL;
2572
2573         if (curr->is_seqcst())
2574                 last_sc_write = get_last_seq_cst_write(curr);
2575
2576         /* Iterate over all threads */
2577         for (i = 0; i < thrd_lists->size(); i++) {
2578                 /* Iterate over actions in thread, starting from most recent */
2579                 action_list_t *list = &(*thrd_lists)[i];
2580                 action_list_t::reverse_iterator rit;
2581                 for (rit = list->rbegin(); rit != list->rend(); rit++) {
2582                         ModelAction *act = *rit;
2583
2584                         /* Only consider 'write' actions */
2585                         if (!act->is_write() || act == curr)
2586                                 continue;
2587
2588                         /* Don't consider more than one seq_cst write if we are a seq_cst read. */
2589                         bool allow_read = true;
2590
2591                         if (curr->is_seqcst() && (act->is_seqcst() || (last_sc_write != NULL && act->happens_before(last_sc_write))) && act != last_sc_write)
2592                                 allow_read = false;
2593                         else if (curr->get_sleep_flag() && !curr->is_seqcst() && !sleep_can_read_from(curr, act))
2594                                 allow_read = false;
2595
2596                         if (allow_read) {
2597                                 /* Only add feasible reads */
2598                                 mo_graph->startChanges();
2599                                 r_modification_order(curr, act);
2600                                 if (!is_infeasible())
2601                                         curr->get_node()->add_read_from_past(act);
2602                                 mo_graph->rollbackChanges();
2603                         }
2604
2605                         /* Include at most one act per-thread that "happens before" curr */
2606                         if (act->happens_before(curr))
2607                                 break;
2608                 }
2609         }
2610
2611         /* Inherit existing, promised future values */
2612         for (i = 0; i < promises.size(); i++) {
2613                 const Promise *promise = promises[i];
2614                 const ModelAction *promise_read = promise->get_reader(0);
2615                 if (promise_read->same_var(curr)) {
2616                         /* Only add feasible future-values */
2617                         mo_graph->startChanges();
2618                         r_modification_order(curr, promise);
2619                         if (!is_infeasible())
2620                                 curr->get_node()->add_read_from_promise(promise_read);
2621                         mo_graph->rollbackChanges();
2622                 }
2623         }
2624
2625         /* We may find no valid may-read-from only if the execution is doomed */
2626         if (!curr->get_node()->read_from_size()) {
2627                 priv->no_valid_reads = true;
2628                 set_assert();
2629         }
2630
2631         if (DBG_ENABLED()) {
2632                 model_print("Reached read action:\n");
2633                 curr->print();
2634                 model_print("Printing read_from_past\n");
2635                 curr->get_node()->print_read_from_past();
2636                 model_print("End printing read_from_past\n");
2637         }
2638 }
2639
2640 bool ModelExecution::sleep_can_read_from(ModelAction *curr, const ModelAction *write)
2641 {
2642         for ( ; write != NULL; write = write->get_reads_from()) {
2643                 /* UNINIT actions don't have a Node, and they never sleep */
2644                 if (write->is_uninitialized())
2645                         return true;
2646                 Node *prevnode = write->get_node()->get_parent();
2647
2648                 bool thread_sleep = prevnode->enabled_status(curr->get_tid()) == THREAD_SLEEP_SET;
2649                 if (write->is_release() && thread_sleep)
2650                         return true;
2651                 if (!write->is_rmw())
2652                         return false;
2653         }
2654         return true;
2655 }
2656
2657 /**
2658  * @brief Get an action representing an uninitialized atomic
2659  *
2660  * This function may create a new one or try to retrieve one from the NodeStack
2661  *
2662  * @param curr The current action, which prompts the creation of an UNINIT action
2663  * @return A pointer to the UNINIT ModelAction
2664  */
2665 ModelAction * ModelExecution::get_uninitialized_action(const ModelAction *curr) const
2666 {
2667         Node *node = curr->get_node();
2668         ModelAction *act = node->get_uninit_action();
2669         if (!act) {
2670                 act = new ModelAction(ATOMIC_UNINIT, std::memory_order_relaxed, curr->get_location(), params->uninitvalue, model_thread);
2671                 node->set_uninit_action(act);
2672         }
2673         act->create_cv(NULL);
2674         return act;
2675 }
2676
2677 static void print_list(const action_list_t *list)
2678 {
2679         action_list_t::const_iterator it;
2680
2681         model_print("------------------------------------------------------------------------------------\n");
2682         model_print("#    t    Action type     MO       Location         Value               Rf  CV\n");
2683         model_print("------------------------------------------------------------------------------------\n");
2684
2685         unsigned int hash = 0;
2686
2687         for (it = list->begin(); it != list->end(); it++) {
2688                 const ModelAction *act = *it;
2689                 if (act->get_seq_number() > 0)
2690                         act->print();
2691                 hash = hash^(hash<<3)^((*it)->hash());
2692         }
2693         model_print("HASH %u\n", hash);
2694         model_print("------------------------------------------------------------------------------------\n");
2695 }
2696
2697 #if SUPPORT_MOD_ORDER_DUMP
2698 void ModelExecution::dumpGraph(char *filename) const
2699 {
2700         char buffer[200];
2701         sprintf(buffer, "%s.dot", filename);
2702         FILE *file = fopen(buffer, "w");
2703         fprintf(file, "digraph %s {\n", filename);
2704         mo_graph->dumpNodes(file);
2705         ModelAction **thread_array = (ModelAction **)model_calloc(1, sizeof(ModelAction *) * get_num_threads());
2706
2707         for (action_list_t::const_iterator it = action_trace.begin(); it != action_trace.end(); it++) {
2708                 ModelAction *act = *it;
2709                 if (act->is_read()) {
2710                         mo_graph->dot_print_node(file, act);
2711                         if (act->get_reads_from())
2712                                 mo_graph->dot_print_edge(file,
2713                                                 act->get_reads_from(),
2714                                                 act,
2715                                                 "label=\"rf\", color=red, weight=2");
2716                         else
2717                                 mo_graph->dot_print_edge(file,
2718                                                 act->get_reads_from_promise(),
2719                                                 act,
2720                                                 "label=\"rf\", color=red");
2721                 }
2722                 if (thread_array[act->get_tid()]) {
2723                         mo_graph->dot_print_edge(file,
2724                                         thread_array[id_to_int(act->get_tid())],
2725                                         act,
2726                                         "label=\"sb\", color=blue, weight=400");
2727                 }
2728
2729                 thread_array[act->get_tid()] = act;
2730         }
2731         fprintf(file, "}\n");
2732         model_free(thread_array);
2733         fclose(file);
2734 }
2735 #endif
2736
2737 /** @brief Prints an execution trace summary. */
2738 void ModelExecution::print_summary() const
2739 {
2740 #if SUPPORT_MOD_ORDER_DUMP
2741         char buffername[100];
2742         sprintf(buffername, "exec%04u", get_execution_number());
2743         mo_graph->dumpGraphToFile(buffername);
2744         sprintf(buffername, "graph%04u", get_execution_number());
2745         dumpGraph(buffername);
2746 #endif
2747
2748         model_print("Execution trace %d:", get_execution_number());
2749         if (isfeasibleprefix()) {
2750                 if (is_yieldblocked())
2751                         model_print(" YIELD BLOCKED");
2752                 if (scheduler->all_threads_sleeping())
2753                         model_print(" SLEEP-SET REDUNDANT");
2754                 if (have_bug_reports())
2755                         model_print(" DETECTED BUG(S)");
2756         } else
2757                 print_infeasibility(" INFEASIBLE");
2758         model_print("\n");
2759
2760         print_list(&action_trace);
2761         model_print("\n");
2762
2763         if (!promises.empty()) {
2764                 model_print("Pending promises:\n");
2765                 for (unsigned int i = 0; i < promises.size(); i++) {
2766                         model_print(" [P%u] ", i);
2767                         promises[i]->print();
2768                 }
2769                 model_print("\n");
2770         }
2771 }
2772
2773 /**
2774  * Add a Thread to the system for the first time. Should only be called once
2775  * per thread.
2776  * @param t The Thread to add
2777  */
2778 void ModelExecution::add_thread(Thread *t)
2779 {
2780         unsigned int i = id_to_int(t->get_id());
2781         if (i >= thread_map.size())
2782                 thread_map.resize(i + 1);
2783         thread_map[i] = t;
2784         if (!t->is_model_thread())
2785                 scheduler->add_thread(t);
2786 }
2787
2788 /**
2789  * @brief Get a Thread reference by its ID
2790  * @param tid The Thread's ID
2791  * @return A Thread reference
2792  */
2793 Thread * ModelExecution::get_thread(thread_id_t tid) const
2794 {
2795         unsigned int i = id_to_int(tid);
2796         if (i < thread_map.size())
2797                 return thread_map[i];
2798         return NULL;
2799 }
2800
2801 /**
2802  * @brief Get a reference to the Thread in which a ModelAction was executed
2803  * @param act The ModelAction
2804  * @return A Thread reference
2805  */
2806 Thread * ModelExecution::get_thread(const ModelAction *act) const
2807 {
2808         return get_thread(act->get_tid());
2809 }
2810
2811 /**
2812  * @brief Get a Promise's "promise number"
2813  *
2814  * A "promise number" is an index number that is unique to a promise, valid
2815  * only for a specific snapshot of an execution trace. Promises may come and go
2816  * as they are generated an resolved, so an index only retains meaning for the
2817  * current snapshot.
2818  *
2819  * @param promise The Promise to check
2820  * @return The promise index, if the promise still is valid; otherwise -1
2821  */
2822 int ModelExecution::get_promise_number(const Promise *promise) const
2823 {
2824         for (unsigned int i = 0; i < promises.size(); i++)
2825                 if (promises[i] == promise)
2826                         return i;
2827         /* Not found */
2828         return -1;
2829 }
2830
2831 /**
2832  * @brief Check if a Thread is currently enabled
2833  * @param t The Thread to check
2834  * @return True if the Thread is currently enabled
2835  */
2836 bool ModelExecution::is_enabled(Thread *t) const
2837 {
2838         return scheduler->is_enabled(t);
2839 }
2840
2841 /**
2842  * @brief Check if a Thread is currently enabled
2843  * @param tid The ID of the Thread to check
2844  * @return True if the Thread is currently enabled
2845  */
2846 bool ModelExecution::is_enabled(thread_id_t tid) const
2847 {
2848         return scheduler->is_enabled(tid);
2849 }
2850
2851 /**
2852  * @brief Select the next thread to execute based on the curren action
2853  *
2854  * RMW actions occur in two parts, and we cannot split them. And THREAD_CREATE
2855  * actions should be followed by the execution of their child thread. In either
2856  * case, the current action should determine the next thread schedule.
2857  *
2858  * @param curr The current action
2859  * @return The next thread to run, if the current action will determine this
2860  * selection; otherwise NULL
2861  */
2862 Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
2863 {
2864         /* Do not split atomic RMW */
2865         if (curr->is_rmwr())
2866                 return get_thread(curr);
2867         /* Follow CREATE with the created thread */
2868         if (curr->get_type() == THREAD_CREATE)
2869                 return curr->get_thread_operand();
2870         return NULL;
2871 }
2872
2873 /** @return True if the execution has taken too many steps */
2874 bool ModelExecution::too_many_steps() const
2875 {
2876         return params->bound != 0 && priv->used_sequence_numbers > params->bound;
2877 }
2878
2879 /**
2880  * Takes the next step in the execution, if possible.
2881  * @param curr The current step to take
2882  * @return Returns the next Thread to run, if any; NULL if this execution
2883  * should terminate
2884  */
2885 Thread * ModelExecution::take_step(ModelAction *curr)
2886 {
2887         Thread *curr_thrd = get_thread(curr);
2888         ASSERT(curr_thrd->get_state() == THREAD_READY);
2889
2890         ASSERT(check_action_enabled(curr)); /* May have side effects? */
2891         curr = check_current_action(curr);
2892         ASSERT(curr);
2893
2894         if (curr_thrd->is_blocked() || curr_thrd->is_complete())
2895                 scheduler->remove_thread(curr_thrd);
2896
2897         return action_select_next_thread(curr);
2898 }
2899
2900 /**
2901  * Launch end-of-execution release sequence fixups only when
2902  * the execution is otherwise feasible AND there are:
2903  *
2904  * (1) pending release sequences
2905  * (2) pending assertions that could be invalidated by a change
2906  * in clock vectors (i.e., data races)
2907  * (3) no pending promises
2908  */
2909 void ModelExecution::fixup_release_sequences()
2910 {
2911         while (!pending_rel_seqs.empty() &&
2912                         is_feasible_prefix_ignore_relseq() &&
2913                         haveUnrealizedRaces()) {
2914                 model_print("*** WARNING: release sequence fixup action "
2915                                 "(%zu pending release seuqence(s)) ***\n",
2916                                 pending_rel_seqs.size());
2917                 ModelAction *fixup = new ModelAction(MODEL_FIXUP_RELSEQ,
2918                                 std::memory_order_seq_cst, NULL, VALUE_NONE,
2919                                 model_thread);
2920                 take_step(fixup);
2921         };
2922 }