action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
- if (prev->is_success_lock())
+ if (act->is_conflicting_lock(prev))
return prev;
}
} else if (type==ATOMIC_UNLOCK) {
action_list_t::reverse_iterator rit;
for (rit = list->rbegin(); rit != list->rend(); rit++) {
ModelAction *prev = *rit;
- if (prev->is_failed_trylock())
+ if (!act->same_thread(prev)&&prev->is_failed_trylock())
return prev;
}
}
void ModelChecker::set_backtracking(ModelAction *act)
{
- ModelAction *prev;
- Node *node;
Thread *t = get_thread(act);
-
- prev = get_last_conflict(act);
+ ModelAction * prev = get_last_conflict(act);
if (prev == NULL)
return;
- node = prev->get_node()->get_parent();
-
- while (!node->is_enabled(t))
- t = t->get_parent();
-
- /* Check if this has been explored already */
- if (node->has_been_explored(t->get_id()))
- return;
+ Node * node = prev->get_node()->get_parent();
- /* Cache the latest backtracking point */
- if (!priv->next_backtrack || *prev > *priv->next_backtrack)
- priv->next_backtrack = prev;
+ int low_tid, high_tid;
+ if (node->is_enabled(t)) {
+ low_tid=id_to_int(act->get_tid());
+ high_tid=low_tid+1;
+ } else {
+ low_tid=0;
+ high_tid=get_num_threads();
+ }
+
+ for(int i=low_tid;i<high_tid;i++) {
+ thread_id_t tid=int_to_id(i);
+ if (!node->is_enabled(tid))
+ continue;
- /* If this is a new backtracking point, mark the tree */
- if (!node->set_backtrack(t->get_id()))
- return;
- DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
- prev->get_tid(), t->get_id());
- if (DBG_ENABLED()) {
- prev->print();
- act->print();
+ /* Check if this has been explored already */
+ if (node->has_been_explored(tid))
+ continue;
+
+ /* Cache the latest backtracking point */
+ if (!priv->next_backtrack || *prev > *priv->next_backtrack)
+ priv->next_backtrack = prev;
+
+ /* If this is a new backtracking point, mark the tree */
+ if (!node->set_backtrack(tid))
+ continue;
+ DEBUG("Setting backtrack: conflict = %d, instead tid = %d\n",
+ prev->get_tid(), t->get_id());
+ if (DBG_ENABLED()) {
+ prev->print();
+ act->print();
+ }
}
}
case ATOMIC_TRYLOCK: {
bool success=!state->islocked;
curr->set_try_lock(success);
- if (!success)
+ if (!success) {
+ get_thread(curr)->set_return_value(0);
break;
+ }
+ get_thread(curr)->set_return_value(1);
}
//otherwise fall into the lock case
case ATOMIC_LOCK: {
state->islocked=true;
ModelAction *unlock=get_last_unlock(curr);
//synchronize with the previous unlock statement
- curr->synchronize_with(unlock);
+ if ( unlock != NULL )
+ curr->synchronize_with(unlock);
break;
}
case ATOMIC_UNLOCK: {
newcurr->copy_typeandorder(curr);
ASSERT(curr->get_location()==newcurr->get_location());
+ newcurr->copy_from_new(curr);
/* Discard duplicate ModelAction; use action from NodeStack */
delete curr;
if (!check_action_enabled(curr)) {
//we'll make the execution look like we chose to run this action
//much later...when a lock is actually available to relese
- remove_thread(get_current_thread());
get_current_thread()->set_pending(curr);
+ remove_thread(get_current_thread());
return get_next_thread(NULL);
}
build_reads_from_past(curr);
curr = newcurr;
- /* Add the action to lists before any other model-checking tasks */
- if (!second_part_of_rmw)
- add_action_to_lists(newcurr);
-
- /* Build may_read_from set for newly-created actions */
- if (curr == newcurr && curr->is_read())
- build_reads_from_past(curr);
- curr = newcurr;
-
/* Thread specific actions */
switch (curr->get_type()) {
case THREAD_CREATE: {