*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
#define CDSATOMICLOAD(size) \
uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
ensureModel(); \
- return (uint ## size ## _t)model->switch_to_master( \
+ uint ## size ## _t val = (uint ## size ## _t)model->switch_to_master( \
new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
+ thread_id_t tid = thread_current()->get_id(); \
+ for(int i=0;i < size / 8;i++) { \
+ atomraceCheckRead(tid, (void *)(((char *)obj)+i)); \
+ } \
+ return val; \
}
CDSATOMICLOAD(8)
*((volatile uint ## size ## _t *)obj) = val; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
- recordWrite(tid, (void *)(((char *)obj)+i)); \
+ atomraceCheckWrite(tid, (void *)(((char *)obj)+i)); \
} \
}
*((volatile uint ## size ## _t *)addr) = _copy; \
thread_id_t tid = thread_current()->get_id(); \
for(int i=0;i < size / 8;i++) { \
+ atomraceCheckRead(tid, (void *)(((char *)addr)+i)); \
recordWrite(tid, (void *)(((char *)addr)+i)); \
} \
- return _old; \
+ return _old; \
})
// cds atomic exchange
void cds_func_entry(const char * funcName) {
ensureModel();
/*
- Thread * th = thread_current();
- uint32_t func_id;
-
- ModelHistory *history = model->get_history();
- if ( !history->getFuncMap()->contains(funcName) ) {
- // add func id to func map
- func_id = history->get_func_counter();
- history->incr_func_counter();
- history->getFuncMap()->put(funcName, func_id);
-
- // add func id to reverse func map
- ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
- if ( func_map_rev->size() <= func_id )
- func_map_rev->resize( func_id + 1 );
- func_map_rev->at(func_id) = funcName;
- } else {
- func_id = history->getFuncMap()->get(funcName);
- }
-
- history->enter_function(func_id, th->get_id());
-*/
+ Thread * th = thread_current();
+ uint32_t func_id;
+
+ ModelHistory *history = model->get_history();
+ if ( !history->getFuncMap()->contains(funcName) ) {
+ // add func id to func map
+ func_id = history->get_func_counter();
+ history->incr_func_counter();
+ history->getFuncMap()->put(funcName, func_id);
+
+ // add func id to reverse func map
+ ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
+ if ( func_map_rev->size() <= func_id )
+ func_map_rev->resize( func_id + 1 );
+ func_map_rev->at(func_id) = funcName;
+ } else {
+ func_id = history->getFuncMap()->get(funcName);
+ }
+
+ history->enter_function(func_id, th->get_id());
+ */
}
void cds_func_exit(const char * funcName) {
ensureModel();
/* Thread * th = thread_current();
- uint32_t func_id;
+ uint32_t func_id;
- ModelHistory *history = model->get_history();
- func_id = history->getFuncMap()->get(funcName);
+ ModelHistory *history = model->get_history();
+ func_id = history->getFuncMap()->get(funcName);
- * func_id not found; this could happen in the case where a function calls cds_func_entry
- * when the model has been defined yet, but then an atomic inside the function initializes
- * the model. And then cds_func_exit is called upon the function exiting.
- *
- if (func_id == 0)
- return;
+ * func_id not found; this could happen in the case where a function calls cds_func_entry
+ * when the model has been defined yet, but then an atomic inside the function initializes
+ * the model. And then cds_func_exit is called upon the function exiting.
+ *
+ if (func_id == 0)
+ return;
- history->exit_function(func_id, th->get_id());
-*/
+ history->exit_function(func_id, th->get_id());
+ */
}
}
}
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+{
+ struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+ struct DataRace * race = NULL;
+
+ if (record->isAtomic)
+ goto Exit;
+
+ /* Check for datarace against last read. */
+
+ for (int i = 0;i < record->numReads;i++) {
+ modelclock_t readClock = record->readClock[i];
+ thread_id_t readThread = record->thread[i];
+
+ /* Note that readClock can't actuall be zero here, so it could be
+ optimized. */
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto Exit;
+ }
+ }
+
+ /* Check for datarace against last write. */
+
+ {
+ modelclock_t writeClock = record->writeClock;
+ thread_id_t writeThread = record->writeThread;
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto Exit;
+ }
+ }
+Exit:
+ record->numReads = 0;
+ record->writeThread = thread;
+ record->isAtomic = 1;
+ modelclock_t ourClock = currClock->getClock(thread);
+ record->writeClock = ourClock;
+ return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ {
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ /* Can't race with atomic */
+ if (shadowval & ATOMICMASK)
+ goto ShadowExit;
+
+ {
+ /* Check for datarace against last read. */
+
+ modelclock_t readClock = READVECTOR(shadowval);
+ thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, readClock, readThread)) {
+ /* We have a datarace */
+ race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ {
+ /* Check for datarace against last write. */
+
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+ goto ShadowExit;
+ }
+ }
+
+ShadowExit:
+ *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
+ }
+
+Exit:
+ if (race) {
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+ }
+}
+
/** This function does race detection for a write on an expanded record. */
void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
struct RaceRecord *record = (struct RaceRecord *)(*shadow);
record->isAtomic = 1;
}
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+ struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+ record->numReads = 0;
+ record->writeThread = thread;
+ modelclock_t ourClock = currClock->getClock(thread);
+ record->writeClock = ourClock;
+ record->isAtomic = 0;
+}
+
/** This function just updates metadata on atomic write. */
void recordWrite(thread_id_t thread, void *location) {
uint64_t *shadow = lookupAddressEntry(location);
*shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
}
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+ thread_id_t thread = thread_current()->get_id();
+ for(;size != 0;size--) {
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+ return;
+ }
+
+ int threadid = id_to_int(thread);
+ modelclock_t ourClock = currClock->getClock(thread);
+
+ /* Thread ID is too large or clock is too large. */
+ if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+ expandRecord(shadow);
+ fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+ return;
+ }
+
+ *shadow = ENCODEOP(0, 0, threadid, ourClock);
+ location = (void *)(((char *) location) + 1);
+ }
+}
+
/** This function does race detection on a read for an expanded record. */
else model_free(race);
}
}
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+ struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+ struct DataRace * race = NULL;
+ /* Check for datarace against last write. */
+ if (record->isAtomic)
+ return NULL;
+
+ modelclock_t writeClock = record->writeClock;
+ thread_id_t writeThread = record->writeThread;
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ }
+ return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+ uint64_t *shadow = lookupAddressEntry(location);
+ uint64_t shadowval = *shadow;
+ ClockVector *currClock = get_execution()->get_cv(thread);
+ if (currClock == NULL)
+ return;
+
+ struct DataRace * race = NULL;
+
+ /* Do full record */
+ if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+ race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+ goto Exit;
+ }
+
+ if (shadowval && ATOMICMASK)
+ return;
+
+ {
+ /* Check for datarace against last write. */
+
+ modelclock_t writeClock = WRITEVECTOR(shadowval);
+ thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+ if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+ /* We have a datarace */
+ race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+ goto Exit;
+ }
+
+
+ }
+Exit:
+ if (race) {
+ race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+ if (raceset->add(race))
+ assert_race(race);
+ else model_free(race);
+ }
+}