trace_ref(NULL),
thrdmap_ref(NULL),
action_ref(NULL),
+ func_act_ref(NULL),
value(value),
type(type),
+ original_type(ATOMIC_NOP),
order(order),
original_order(order),
seq_number(ACTION_INITIAL_CLOCK)
action_ref(NULL),
value(value),
type(type),
+ original_type(ATOMIC_NOP),
order(order),
original_order(order),
seq_number(ACTION_INITIAL_CLOCK)
action_ref(NULL),
value(value),
type(type),
+ original_type(ATOMIC_NOP),
order(order),
original_order(order),
seq_number(ACTION_INITIAL_CLOCK)
action_ref(NULL),
value(value),
type(type),
+ original_type(ATOMIC_NOP),
order(order),
original_order(order),
seq_number(ACTION_INITIAL_CLOCK)
action_ref(NULL),
value(value),
type(type),
+ original_type(ATOMIC_NOP),
order(order),
original_order(order),
seq_number(ACTION_INITIAL_CLOCK)
else
return NULL;
}
+
+/** @brief Swap type with original type */
+void ModelAction::use_original_type()
+{
+ action_type_t tmp = type;
+ type = original_type;
+ original_type = tmp;
+}
* iteself does not indicate no value.
*/
#define VALUE_NONE 0xdeadbeef
+#define HAS_REFERENCE ((void *)0x1)
/**
* @brief The "location" at which a fence occurs
thread_id_t get_tid() const { return tid; }
action_type get_type() const { return type; }
void set_type(action_type _type) { type = _type; }
+ action_type get_original_type() const { return type; }
+ void set_original_type(action_type _type) { original_type = _type; }
void set_free() { type = READY_FREE; }
memory_order get_mo() const { return order; }
memory_order get_original_mo() const { return original_order; }
bool equals(const ModelAction *x) const { return this == x; }
void set_value(uint64_t val) { value = val; }
+ void use_original_type();
+
/* to accomodate pthread create and join */
Thread * thread_operand;
void set_thread_operand(Thread *th) { thread_operand = th; }
void setTraceRef(sllnode<ModelAction *> *ref) { trace_ref = ref; }
void setThrdMapRef(sllnode<ModelAction *> *ref) { thrdmap_ref = ref; }
void setActionRef(sllnode<ModelAction *> *ref) { action_ref = ref; }
+ void setFuncActRef(void *ref) { func_act_ref = ref; }
sllnode<ModelAction *> * getTraceRef() { return trace_ref; }
sllnode<ModelAction *> * getThrdMapRef() { return thrdmap_ref; }
sllnode<ModelAction *> * getActionRef() { return action_ref; }
+ void * getFuncActRef() { return func_act_ref; }
+
SNAPSHOTALLOC
private:
const char * get_type_str() const;
sllnode<ModelAction *> * trace_ref;
sllnode<ModelAction *> * thrdmap_ref;
sllnode<ModelAction *> * action_ref;
-
+ void * func_act_ref;
/** @brief The value written (for write or RMW; undefined for read) */
uint64_t value;
/** @brief Type of action (read, write, RMW, fence, thread create, etc.) */
action_type type;
+ /** @brief The original type of action (read, write, RMW) before it was
+ * set as READY_FREE or weaken from a RMW to a write */
+ action_type original_type;
+
/** @brief The memory order for this operation. */
memory_order order;
obj_map(),
condvar_waiters_map(),
obj_thrd_map(),
+ obj_wr_thrd_map(),
+ obj_last_sc_map(),
mutex_map(),
+ cond_map(),
thrd_last_action(1),
thrd_last_fence_release(),
priv(new struct model_snapshot_members ()),
Thread * ModelExecution::action_select_next_thread(const ModelAction *curr) const
{
/* Do not split atomic RMW */
- if (curr->is_rmwr() && !paused_by_fuzzer(curr))
+ if (curr->is_rmwr())
return get_thread(curr);
/* Follow CREATE with the created thread */
/* which is not needed, because model.cc takes care of this */
return NULL;
}
-/** @param act A read atomic action */
-bool ModelExecution::paused_by_fuzzer(const ModelAction * act) const
-{
- ASSERT(act->is_read());
-
- // Actions paused by fuzzer have their sequence number reset to 0
- return act->get_seq_number() == 0;
-}
-
/**
* Takes the next step in the execution, if possible.
* @param curr The current step to take
/** Compute which actions to free. */
void ModelExecution::collectActions() {
+ if (priv->used_sequence_numbers < params->traceminsize)
+ return;
+
//Compute minimal clock vector for all live threads
ClockVector *cvmin = computeMinimalCV();
SnapVector<CycleNode *> * queue = new SnapVector<CycleNode *>();
//Free if it is invisible or we have set a flag to remove visible actions.
if (actseq <= tid_clock || params->removevisible) {
+ // For read actions being used by ModelHistory, mark the reads_from as being used.
+ if (act->is_read() && act->getFuncActRef() != NULL) {
+ ModelAction * reads_from = act->get_reads_from();
+ if (reads_from->getFuncActRef() == NULL)
+ reads_from->setFuncActRef(HAS_REFERENCE);
+ }
+
ModelAction * write;
if (act->is_write()) {
write = act;
CycleNode * prevnode = node->getInEdge(i);
ModelAction * prevact = prevnode->getAction();
if (prevact->get_type() != READY_FREE) {
+ if (prevact->getFuncActRef() != NULL) {
+ // Copy the original type if being used by ModelHistory
+ prevact->set_original_type(prevact->get_type());
+ }
prevact->set_free();
queue->push_back(prevnode);
}
}
if (act->is_read()) {
+ // For read actions being used by ModelHistory, mark the reads_from as being used.
+ if (act->getFuncActRef() != NULL) {
+ ModelAction * reads_from = act->get_reads_from();
+ if (reads_from->getFuncActRef() == NULL)
+ reads_from->setFuncActRef(HAS_REFERENCE);
+ }
+
if (act->get_reads_from()->is_free()) {
if (act->is_rmw()) {
+ if (act->getFuncActRef() != NULL) {
+ // Copy the original type if being used by ModelHistory
+ act->set_original_type(act->get_type());
+ }
//Weaken a RMW from a freed store to a write
act->set_type(ATOMIC_WRITE);
} else {
if (islastact) {
fixupLastAct(act);
}
- delete act;
- continue;
+
+ //Only delete this action if not being using by ModelHistory.
+ //Otherwise, the deletion of action is deferred.
+ if (act->getFuncActRef() == NULL) {
+ delete act;
+ continue;
+ }
}
}
}
if (act->is_read()) {
if (act->get_reads_from()->is_free()) {
if (act->is_rmw()) {
+ if (act->getFuncActRef() != NULL) {
+ // Copy the original type if being used by ModelHistory
+ act->set_original_type(act->get_type());
+ }
act->set_type(ATOMIC_WRITE);
} else {
removeAction(act);
if (islastact) {
fixupLastAct(act);
}
- delete act;
- continue;
+ if (act->getFuncActRef() == NULL) {
+ delete act;
+ continue;
+ }
}
}
} else if (act->is_free()) {
if (islastact) {
fixupLastAct(act);
}
- delete act;
- continue;
+ if (act->getFuncActRef() == NULL) {
+ delete act;
+ continue;
+ }
} else if (act->is_write()) {
//Do nothing with write that hasn't been marked to be freed
} else if (islastact) {
Fuzzer * fuzzer;
Thread * action_select_next_thread(const ModelAction *curr) const;
- bool paused_by_fuzzer(const ModelAction * act) const;
bool isfinished;
};
for (sllnode<ModelAction *> * it = act_list->begin();it != NULL;it = it->getNext()) {
ModelAction * act = it->getVal();
+ act->setFuncActRef(NULL); // Remove func_act_ref so that this action can be removed
FuncInst * func_inst = get_inst(act);
void * loc = act->get_location();
}
/* Monitor the statuses of threads waiting for tid */
- monitor_waiting_thread(func_id, tid);
+ // monitor_waiting_thread(func_id, tid);
}
/* @param func_id a non-zero value */
return;
/* Monitor the statuses of threads waiting for tid */
- monitor_waiting_thread_counter(tid);
+ // monitor_waiting_thread_counter(tid);
/* Every write action should be processed, including
* nonatomic writes (which have no position) */
return;
/* Add to curr_inst_list */
- curr_act_list->push_back(act);
+ act->setFuncActRef(curr_act_list->add_back(act));
FuncNode * func_node = func_nodes[func_id];
func_node->add_inst(act);