From: bdemsky <bdemsky@uci.edu>
Date: Wed, 20 Nov 2019 19:50:00 +0000 (-0800)
Subject: Add datarace support for atomics and calloc
X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=69d9b3609ff724140893c7dffb1e7a38c3090a81;p=c11tester.git

Add datarace support for atomics and calloc
---

diff --git a/cmodelint.cc b/cmodelint.cc
index 8774c5b3..1a03153f 100644
--- a/cmodelint.cc
+++ b/cmodelint.cc
@@ -113,7 +113,7 @@ VOLATILELOAD(64)
 		*((volatile uint ## size ## _t *)obj) = val;            \
 		thread_id_t tid = thread_current()->get_id();           \
 		for(int i=0;i < size / 8;i++) {                         \
-			recordWrite(tid, (void *)(((char *)obj)+i));          \
+			atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
 		}                                                       \
 	}
 
@@ -130,7 +130,7 @@ VOLATILESTORE(64)
 		*((volatile uint ## size ## _t *)obj) = val;                                 \
 		thread_id_t tid = thread_current()->get_id();           \
 		for(int i=0;i < size / 8;i++) {                       \
-			recordWrite(tid, (void *)(((char *)obj)+i));          \
+			atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
 		}                                                       \
 	}
 
@@ -143,8 +143,13 @@ CDSATOMICINT(64)
 #define CDSATOMICLOAD(size)                                             \
 	uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
 		ensureModel();                                                      \
-		return (uint ## size ## _t)model->switch_to_master( \
+		uint ## size ## _t val = (uint ## size ## _t)model->switch_to_master( \
 			new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
+		thread_id_t tid = thread_current()->get_id();           \
+		for(int i=0;i < size / 8;i++) {                         \
+			atomraceCheckRead(tid, (void *)(((char *)obj)+i));    \
+		}                                                       \
+		return val; \
 	}
 
 CDSATOMICLOAD(8)
@@ -160,7 +165,7 @@ CDSATOMICLOAD(64)
 		*((volatile uint ## size ## _t *)obj) = val;                     \
 		thread_id_t tid = thread_current()->get_id();           \
 		for(int i=0;i < size / 8;i++) {                       \
-			recordWrite(tid, (void *)(((char *)obj)+i));          \
+			atomraceCheckWrite(tid, (void *)(((char *)obj)+i));          \
 		}                                                       \
 	}
 
@@ -180,9 +185,10 @@ CDSATOMICSTORE(64)
 		*((volatile uint ## size ## _t *)addr) = _copy;                  \
 		thread_id_t tid = thread_current()->get_id();           \
 		for(int i=0;i < size / 8;i++) {                       \
+			atomraceCheckRead(tid,  (void *)(((char *)addr)+i));  \
 			recordWrite(tid, (void *)(((char *)addr)+i));         \
 		}                                                       \
-		return _old;                                                          \
+		return _old;                                            \
 	})
 
 // cds atomic exchange
@@ -336,45 +342,45 @@ void cds_atomic_thread_fence(int atomic_index, const char * position) {
 void cds_func_entry(const char * funcName) {
 	ensureModel();
 	/*
-	Thread * th = thread_current();
-	uint32_t func_id;
-
-	ModelHistory *history = model->get_history();
-	if ( !history->getFuncMap()->contains(funcName) ) {
-		// add func id to func map
-		func_id = history->get_func_counter();
-		history->incr_func_counter();
-		history->getFuncMap()->put(funcName, func_id);
-
-		// add func id to reverse func map
-		ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
-		if ( func_map_rev->size() <= func_id )
-			func_map_rev->resize( func_id + 1 );
-		func_map_rev->at(func_id) = funcName;
-	} else {
-		func_id = history->getFuncMap()->get(funcName);
-	}
-
-	history->enter_function(func_id, th->get_id());
-*/
+	   Thread * th = thread_current();
+	   uint32_t func_id;
+
+	   ModelHistory *history = model->get_history();
+	   if ( !history->getFuncMap()->contains(funcName) ) {
+	        // add func id to func map
+	        func_id = history->get_func_counter();
+	        history->incr_func_counter();
+	        history->getFuncMap()->put(funcName, func_id);
+
+	        // add func id to reverse func map
+	        ModelVector<const char *> * func_map_rev = history->getFuncMapRev();
+	        if ( func_map_rev->size() <= func_id )
+	                func_map_rev->resize( func_id + 1 );
+	        func_map_rev->at(func_id) = funcName;
+	   } else {
+	        func_id = history->getFuncMap()->get(funcName);
+	   }
+
+	   history->enter_function(func_id, th->get_id());
+	 */
 }
 
 void cds_func_exit(const char * funcName) {
 	ensureModel();
 
 /*	Thread * th = thread_current();
-	uint32_t func_id;
+        uint32_t func_id;
 
-	ModelHistory *history = model->get_history();
-	func_id = history->getFuncMap()->get(funcName);
+        ModelHistory *history = model->get_history();
+        func_id = history->getFuncMap()->get(funcName);
 
-	 * func_id not found; this could happen in the case where a function calls cds_func_entry
-	 * when the model has been defined yet, but then an atomic inside the function initializes
-	 * the model. And then cds_func_exit is called upon the function exiting.
-	 *
-	if (func_id == 0)
-		return;
+ * func_id not found; this could happen in the case where a function calls cds_func_entry
+ * when the model has been defined yet, but then an atomic inside the function initializes
+ * the model. And then cds_func_exit is called upon the function exiting.
+ *
+        if (func_id == 0)
+                return;
 
-	history->exit_function(func_id, th->get_id());
-*/
+        history->exit_function(func_id, th->get_id());
+ */
 }
diff --git a/datarace.cc b/datarace.cc
index dd88c2fe..be0fc3b6 100644
--- a/datarace.cc
+++ b/datarace.cc
@@ -318,6 +318,123 @@ Exit:
 	}
 }
 
+
+/** This function does race detection for a write on an expanded record. */
+struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
+{
+	struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+	struct DataRace * race = NULL;
+
+	if (record->isAtomic)
+		goto Exit;
+
+	/* Check for datarace against last read. */
+
+	for (int i = 0;i < record->numReads;i++) {
+		modelclock_t readClock = record->readClock[i];
+		thread_id_t readThread = record->thread[i];
+
+		/* Note that readClock can't actuall be zero here, so it could be
+		         optimized. */
+
+		if (clock_may_race(currClock, thread, readClock, readThread)) {
+			/* We have a datarace */
+			race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+			goto Exit;
+		}
+	}
+
+	/* Check for datarace against last write. */
+
+	{
+		modelclock_t writeClock = record->writeClock;
+		thread_id_t writeThread = record->writeThread;
+
+		if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+			/* We have a datarace */
+			race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+			goto Exit;
+		}
+	}
+Exit:
+	record->numReads = 0;
+	record->writeThread = thread;
+	record->isAtomic = 1;
+	modelclock_t ourClock = currClock->getClock(thread);
+	record->writeClock = ourClock;
+	return race;
+}
+
+/** This function does race detection on a write. */
+void atomraceCheckWrite(thread_id_t thread, void *location)
+{
+	uint64_t *shadow = lookupAddressEntry(location);
+	uint64_t shadowval = *shadow;
+	ClockVector *currClock = get_execution()->get_cv(thread);
+	if (currClock == NULL)
+		return;
+
+	struct DataRace * race = NULL;
+	/* Do full record */
+	if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+		race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+		goto Exit;
+	}
+
+	{
+		int threadid = id_to_int(thread);
+		modelclock_t ourClock = currClock->getClock(thread);
+
+		/* Thread ID is too large or clock is too large. */
+		if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+			expandRecord(shadow);
+			race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
+			goto Exit;
+		}
+
+		/* Can't race with atomic */
+		if (shadowval & ATOMICMASK)
+			goto ShadowExit;
+
+		{
+			/* Check for datarace against last read. */
+
+			modelclock_t readClock = READVECTOR(shadowval);
+			thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
+
+			if (clock_may_race(currClock, thread, readClock, readThread)) {
+				/* We have a datarace */
+				race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
+				goto ShadowExit;
+			}
+		}
+
+		{
+			/* Check for datarace against last write. */
+
+			modelclock_t writeClock = WRITEVECTOR(shadowval);
+			thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+			if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+				/* We have a datarace */
+				race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
+				goto ShadowExit;
+			}
+		}
+
+ShadowExit:
+		*shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
+	}
+
+Exit:
+	if (race) {
+		race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+		if (raceset->add(race))
+			assert_race(race);
+		else model_free(race);
+	}
+}
+
 /** This function does race detection for a write on an expanded record. */
 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
 	struct RaceRecord *record = (struct RaceRecord *)(*shadow);
@@ -328,6 +445,16 @@ void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, Clock
 	record->isAtomic = 1;
 }
 
+/** This function does race detection for a write on an expanded record. */
+void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
+	struct RaceRecord *record = (struct RaceRecord *)(*shadow);
+	record->numReads = 0;
+	record->writeThread = thread;
+	modelclock_t ourClock = currClock->getClock(thread);
+	record->writeClock = ourClock;
+	record->isAtomic = 0;
+}
+
 /** This function just updates metadata on atomic write. */
 void recordWrite(thread_id_t thread, void *location) {
 	uint64_t *shadow = lookupAddressEntry(location);
@@ -352,6 +479,34 @@ void recordWrite(thread_id_t thread, void *location) {
 	*shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
 }
 
+/** This function just updates metadata on atomic write. */
+void recordCalloc(void *location, size_t size) {
+	thread_id_t thread = thread_current()->get_id();
+	for(;size != 0;size--) {
+		uint64_t *shadow = lookupAddressEntry(location);
+		uint64_t shadowval = *shadow;
+		ClockVector *currClock = get_execution()->get_cv(thread);
+		/* Do full record */
+		if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+			fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+			return;
+		}
+
+		int threadid = id_to_int(thread);
+		modelclock_t ourClock = currClock->getClock(thread);
+
+		/* Thread ID is too large or clock is too large. */
+		if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
+			expandRecord(shadow);
+			fullRecordWriteNonAtomic(thread, location, shadow, currClock);
+			return;
+		}
+
+		*shadow = ENCODEOP(0, 0, threadid, ourClock);
+		location = (void *)(((char *) location) + 1);
+	}
+}
+
 
 
 /** This function does race detection on a read for an expanded record. */
@@ -483,3 +638,66 @@ Exit:
 		else model_free(race);
 	}
 }
+
+
+/** This function does race detection on a read for an expanded record. */
+struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
+{
+	struct RaceRecord *record = (struct RaceRecord *) (*shadow);
+	struct DataRace * race = NULL;
+	/* Check for datarace against last write. */
+	if (record->isAtomic)
+		return NULL;
+
+	modelclock_t writeClock = record->writeClock;
+	thread_id_t writeThread = record->writeThread;
+
+	if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+		/* We have a datarace */
+		race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+	}
+	return race;
+}
+
+/** This function does race detection on a read. */
+void atomraceCheckRead(thread_id_t thread, const void *location)
+{
+	uint64_t *shadow = lookupAddressEntry(location);
+	uint64_t shadowval = *shadow;
+	ClockVector *currClock = get_execution()->get_cv(thread);
+	if (currClock == NULL)
+		return;
+
+	struct DataRace * race = NULL;
+
+	/* Do full record */
+	if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
+		race = atomfullRaceCheckRead(thread, location, shadow, currClock);
+		goto Exit;
+	}
+
+	if (shadowval && ATOMICMASK)
+		return;
+
+	{
+		/* Check for datarace against last write. */
+
+		modelclock_t writeClock = WRITEVECTOR(shadowval);
+		thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
+
+		if (clock_may_race(currClock, thread, writeClock, writeThread)) {
+			/* We have a datarace */
+			race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
+			goto Exit;
+		}
+
+
+	}
+Exit:
+	if (race) {
+		race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
+		if (raceset->add(race))
+			assert_race(race);
+		else model_free(race);
+	}
+}
diff --git a/datarace.h b/datarace.h
index 2de2a339..f026556a 100644
--- a/datarace.h
+++ b/datarace.h
@@ -44,8 +44,11 @@ struct DataRace {
 
 void initRaceDetector();
 void raceCheckWrite(thread_id_t thread, void *location);
+void atomraceCheckWrite(thread_id_t thread, void *location);
 void raceCheckRead(thread_id_t thread, const void *location);
+void atomraceCheckRead(thread_id_t thread, const void *location);
 void recordWrite(thread_id_t thread, void *location);
+void recordCalloc(void *location, size_t size);
 void assert_race(struct DataRace *race);
 bool hasNonAtomicStore(const void *location);
 void setAtomicStoreFlag(const void *location);
diff --git a/mymemory.cc b/mymemory.cc
index 1f8b6160..5185cd0f 100644
--- a/mymemory.cc
+++ b/mymemory.cc
@@ -11,6 +11,7 @@
 #include "common.h"
 #include "threads-model.h"
 #include "model.h"
+#include "datarace.h"
 
 #define REQUESTS_BEFORE_ALLOC 1024
 
@@ -234,10 +235,12 @@ void * calloc(size_t num, size_t size)
 	if (user_snapshot_space) {
 		void *tmp = mspace_calloc(user_snapshot_space, num, size);
 		ASSERT(tmp);
+		recordCalloc(tmp, num*size);
 		return tmp;
 	} else {
 		void *tmp = HandleEarlyAllocationRequest(size * num);
 		memset(tmp, 0, size * num);
+		recordCalloc(tmp, num*size);
 		return tmp;
 	}
 }