3 #include "threads-model.h"
7 #include "clockvector.h"
10 #include "execution.h"
11 #include "stl-model.h"
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
19 static const ModelExecution * get_execution()
21 return model->get_execution();
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
27 root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28 memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29 memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30 raceset = new RaceSet();
33 void * table_calloc(size_t size)
35 if ((((char *)memory_base) + size) > memory_top) {
36 return snapshot_calloc(size, 1);
38 void *tmp = memory_base;
39 memory_base = ((char *)memory_base) + size;
44 /** This function looks up the entry in the shadow table corresponding to a
46 static uint64_t * lookupAddressEntry(const void *address)
48 struct ShadowTable *currtable = root;
50 currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51 if (currtable == NULL) {
52 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
56 struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57 if (basetable == NULL) {
58 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
60 return &basetable->array[((uintptr_t)address) & MASK16BIT];
64 bool hasNonAtomicStore(const void *address) {
65 uint64_t * shadow = lookupAddressEntry(address);
66 uint64_t shadowval = *shadow;
67 if (ISSHORTRECORD(shadowval)) {
68 //Do we have a non atomic write with a non-zero clock
69 return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
73 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74 return !record->isAtomic && record->writeClock != 0;
78 void setAtomicStoreFlag(const void *address) {
79 uint64_t * shadow = lookupAddressEntry(address);
80 uint64_t shadowval = *shadow;
81 if (ISSHORTRECORD(shadowval)) {
82 *shadow = shadowval | ATOMICMASK;
86 struct RaceRecord *record = (struct RaceRecord *)shadowval;
91 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
92 uint64_t * shadow = lookupAddressEntry(address);
93 uint64_t shadowval = *shadow;
94 if (ISSHORTRECORD(shadowval)) {
95 //Do we have a non atomic write with a non-zero clock
96 *thread = WRTHREADID(shadowval);
97 *clock = WRITEVECTOR(shadowval);
99 struct RaceRecord *record = (struct RaceRecord *)shadowval;
100 *thread = record->writeThread;
101 *clock = record->writeClock;
106 * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
107 * to check the potential for a data race.
108 * @param clock1 The current clock vector
109 * @param tid1 The current thread; paired with clock1
110 * @param clock2 The clock value for the potentially-racing action
111 * @param tid2 The thread ID for the potentially-racing action
112 * @return true if the current clock allows a race with the event at clock2/tid2
114 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
115 modelclock_t clock2, thread_id_t tid2)
117 return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
121 * Expands a record from the compact form to the full form. This is
122 * necessary for multiple readers or for very large thread ids or time
124 static void expandRecord(uint64_t *shadow)
126 uint64_t shadowval = *shadow;
128 modelclock_t readClock = READVECTOR(shadowval);
129 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
130 modelclock_t writeClock = WRITEVECTOR(shadowval);
131 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
133 struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
134 record->writeThread = writeThread;
135 record->writeClock = writeClock;
137 if (readClock != 0) {
138 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
139 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
140 record->numReads = 1;
141 ASSERT(readThread >= 0);
142 record->thread[0] = readThread;
143 record->readClock[0] = readClock;
145 if (shadowval & ATOMICMASK)
146 record->isAtomic = 1;
147 *shadow = (uint64_t) record;
150 #define FIRST_STACK_FRAME 2
152 unsigned int race_hash(struct DataRace *race) {
153 unsigned int hash = 0;
154 for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
155 hash ^= ((uintptr_t)race->backtrace[i]);
156 hash = (hash >> 3) | (hash << 29);
162 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
163 if (r1->numframes != r2->numframes)
165 for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
166 if (r1->backtrace[i] != r2->backtrace[i])
172 /** This function is called when we detect a data race.*/
173 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
175 struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
176 race->oldthread = oldthread;
177 race->oldclock = oldclock;
178 race->isoldwrite = isoldwrite;
179 race->newaction = newaction;
180 race->isnewwrite = isnewwrite;
181 race->address = address;
186 * @brief Assert a data race
188 * Asserts a data race which is currently realized, causing the execution to
189 * end and stashing a message in the model-checker's bug list
191 * @param race The race to report
193 void assert_race(struct DataRace *race)
195 model_print("Race detected at location: \n");
196 backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
197 model_print("\nData race detected @ address %p:\n"
198 " Access 1: %5s in thread %2d @ clock %3u\n"
199 " Access 2: %5s in thread %2d @ clock %3u\n\n",
201 race->isoldwrite ? "write" : "read",
202 id_to_int(race->oldthread),
204 race->isnewwrite ? "write" : "read",
205 id_to_int(race->newaction->get_tid()),
206 race->newaction->get_seq_number()
210 /** This function does race detection for a write on an expanded record. */
211 struct DataRace * fullRaceCheckWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock)
213 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
214 struct DataRace * race = NULL;
216 /* Check for datarace against last read. */
218 for (int i = 0;i < record->numReads;i++) {
219 modelclock_t readClock = record->readClock[i];
220 thread_id_t readThread = record->thread[i];
222 /* Note that readClock can't actuall be zero here, so it could be
225 if (clock_may_race(currClock, thread, readClock, readThread)) {
226 /* We have a datarace */
227 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
232 /* Check for datarace against last write. */
235 modelclock_t writeClock = record->writeClock;
236 thread_id_t writeThread = record->writeThread;
238 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239 /* We have a datarace */
240 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
245 record->numReads = 0;
246 record->writeThread = thread;
247 record->isAtomic = 0;
248 modelclock_t ourClock = currClock->getClock(thread);
249 record->writeClock = ourClock;
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
256 uint64_t *shadow = lookupAddressEntry(location);
257 uint64_t shadowval = *shadow;
258 ClockVector *currClock = get_execution()->get_cv(thread);
259 struct DataRace * race = NULL;
261 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
262 race = fullRaceCheckWrite(thread, location, shadow, currClock);
267 int threadid = id_to_int(thread);
268 modelclock_t ourClock = currClock->getClock(thread);
270 /* Thread ID is too large or clock is too large. */
271 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
272 expandRecord(shadow);
273 race = fullRaceCheckWrite(thread, location, shadow, currClock);
280 /* Check for datarace against last read. */
282 modelclock_t readClock = READVECTOR(shadowval);
283 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
285 if (clock_may_race(currClock, thread, readClock, readThread)) {
286 /* We have a datarace */
287 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
293 /* Check for datarace against last write. */
295 modelclock_t writeClock = WRITEVECTOR(shadowval);
296 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
298 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
299 /* We have a datarace */
300 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
306 *shadow = ENCODEOP(0, 0, threadid, ourClock);
311 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
312 if (raceset->add(race))
314 else model_free(race);
318 /** This function does race detection for a write on an expanded record. */
319 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
320 struct RaceRecord *record = (struct RaceRecord *)(*shadow);
321 record->numReads = 0;
322 record->writeThread = thread;
323 modelclock_t ourClock = currClock->getClock(thread);
324 record->writeClock = ourClock;
325 record->isAtomic = 1;
328 /** This function just updates metadata on atomic write. */
329 void recordWrite(thread_id_t thread, void *location) {
330 uint64_t *shadow = lookupAddressEntry(location);
331 uint64_t shadowval = *shadow;
332 ClockVector *currClock = get_execution()->get_cv(thread);
334 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
335 fullRecordWrite(thread, location, shadow, currClock);
339 int threadid = id_to_int(thread);
340 modelclock_t ourClock = currClock->getClock(thread);
342 /* Thread ID is too large or clock is too large. */
343 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
344 expandRecord(shadow);
345 fullRecordWrite(thread, location, shadow, currClock);
349 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
354 /** This function does race detection on a read for an expanded record. */
355 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
357 struct RaceRecord *record = (struct RaceRecord *) (*shadow);
358 struct DataRace * race = NULL;
359 /* Check for datarace against last write. */
361 modelclock_t writeClock = record->writeClock;
362 thread_id_t writeThread = record->writeThread;
364 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
365 /* We have a datarace */
366 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
369 /* Shorten vector when possible */
373 for (int i = 0;i < record->numReads;i++) {
374 modelclock_t readClock = record->readClock[i];
375 thread_id_t readThread = record->thread[i];
377 /* Note that is not really a datarace check as reads cannot
378 actually race. It is just determining that this read subsumes
379 another in the sense that either this read races or neither
380 read races. Note that readClock can't actually be zero, so it
381 could be optimized. */
383 if (clock_may_race(currClock, thread, readClock, readThread)) {
384 /* Still need this read in vector */
385 if (copytoindex != i) {
386 ASSERT(record->thread[i] >= 0);
387 record->readClock[copytoindex] = record->readClock[i];
388 record->thread[copytoindex] = record->thread[i];
394 if (__builtin_popcount(copytoindex) <= 1) {
395 if (copytoindex == 0) {
396 int newCapacity = INITCAPACITY;
397 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
398 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
399 } else if (copytoindex>=INITCAPACITY) {
400 int newCapacity = copytoindex * 2;
401 thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
402 modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
403 std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
404 std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
405 snapshot_free(record->readClock);
406 snapshot_free(record->thread);
407 record->readClock = newreadClock;
408 record->thread = newthread;
412 modelclock_t ourClock = currClock->getClock(thread);
415 record->thread[copytoindex] = thread;
416 record->readClock[copytoindex] = ourClock;
417 record->numReads = copytoindex + 1;
421 /** This function does race detection on a read. */
422 void raceCheckRead(thread_id_t thread, const void *location)
424 uint64_t *shadow = lookupAddressEntry(location);
425 uint64_t shadowval = *shadow;
426 ClockVector *currClock = get_execution()->get_cv(thread);
427 struct DataRace * race = NULL;
430 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
431 race = fullRaceCheckRead(thread, location, shadow, currClock);
436 int threadid = id_to_int(thread);
437 modelclock_t ourClock = currClock->getClock(thread);
439 /* Thread ID is too large or clock is too large. */
440 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
441 expandRecord(shadow);
442 race = fullRaceCheckRead(thread, location, shadow, currClock);
446 /* Check for datarace against last write. */
448 modelclock_t writeClock = WRITEVECTOR(shadowval);
449 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
451 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
452 /* We have a datarace */
453 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
459 modelclock_t readClock = READVECTOR(shadowval);
460 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
462 if (clock_may_race(currClock, thread, readClock, readThread)) {
463 /* We don't subsume this read... Have to expand record. */
464 expandRecord(shadow);
465 fullRaceCheckRead(thread, location, shadow, currClock);
470 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
474 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
475 if (raceset->add(race))
477 else model_free(race);