28ebcf6d744a903fa7c7d7510678405d6ad41252
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return !(ATOMICMASK & shadowval);
70         } else {
71                 if (shadowval == 0)
72                         return true;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0) {
85                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
86                         return;
87                 }
88                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
89                 record->isAtomic = 1;
90         }
91 }
92
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94         uint64_t * shadow = lookupAddressEntry(address);
95         uint64_t shadowval = *shadow;
96         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97                 //Do we have a non atomic write with a non-zero clock
98                 *thread = WRTHREADID(shadowval);
99                 *clock = WRITEVECTOR(shadowval);
100         } else {
101                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102                 *thread = record->writeThread;
103                 *clock = record->writeClock;
104         }
105 }
106
107 /**
108  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109  * to check the potential for a data race.
110  * @param clock1 The current clock vector
111  * @param tid1 The current thread; paired with clock1
112  * @param clock2 The clock value for the potentially-racing action
113  * @param tid2 The thread ID for the potentially-racing action
114  * @return true if the current clock allows a race with the event at clock2/tid2
115  */
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117                                                                                                          modelclock_t clock2, thread_id_t tid2)
118 {
119         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
120 }
121
122 /**
123  * Expands a record from the compact form to the full form.  This is
124  * necessary for multiple readers or for very large thread ids or time
125  * stamps. */
126 static void expandRecord(uint64_t *shadow)
127 {
128         uint64_t shadowval = *shadow;
129
130         modelclock_t readClock = READVECTOR(shadowval);
131         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132         modelclock_t writeClock = WRITEVECTOR(shadowval);
133         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
134
135         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136         record->writeThread = writeThread;
137         record->writeClock = writeClock;
138
139         if (readClock != 0) {
140                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142                 record->numReads = 1;
143                 ASSERT(readThread >= 0);
144                 record->thread[0] = readThread;
145                 record->readClock[0] = readClock;
146         }
147         if (shadowval & ATOMICMASK)
148                 record->isAtomic = 1;
149         *shadow = (uint64_t) record;
150 }
151
152 #define FIRST_STACK_FRAME 2
153
154 unsigned int race_hash(struct DataRace *race) {
155         unsigned int hash = 0;
156         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157                 hash ^= ((uintptr_t)race->backtrace[i]);
158                 hash = (hash >> 3) | (hash << 29);
159         }
160         return hash;
161 }
162
163 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
164         if (r1->numframes != r2->numframes)
165                 return false;
166         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
167                 if (r1->backtrace[i] != r2->backtrace[i])
168                         return false;
169         }
170         return true;
171 }
172
173 /** This function is called when we detect a data race.*/
174 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
175 {
176         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
177         race->oldthread = oldthread;
178         race->oldclock = oldclock;
179         race->isoldwrite = isoldwrite;
180         race->newaction = newaction;
181         race->isnewwrite = isnewwrite;
182         race->address = address;
183         return race;
184 }
185
186 /**
187  * @brief Assert a data race
188  *
189  * Asserts a data race which is currently realized, causing the execution to
190  * end and stashing a message in the model-checker's bug list
191  *
192  * @param race The race to report
193  */
194 void assert_race(struct DataRace *race)
195 {
196         model_print("Race detected at location: \n");
197         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
198         model_print("\nData race detected @ address %p:\n"
199                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
200                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
201                                                         race->address,
202                                                         race->isoldwrite ? "write" : "read",
203                                                         id_to_int(race->oldthread),
204                                                         race->oldclock,
205                                                         race->isnewwrite ? "write" : "read",
206                                                         id_to_int(race->newaction->get_tid()),
207                                                         race->newaction->get_seq_number()
208                                                         );
209 }
210
211 /** This function does race detection for a write on an expanded record. */
212 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
213 {
214         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
215         struct DataRace * race = NULL;
216
217         /* Check for datarace against last read. */
218
219         for (int i = 0;i < record->numReads;i++) {
220                 modelclock_t readClock = record->readClock[i];
221                 thread_id_t readThread = record->thread[i];
222
223                 /* Note that readClock can't actuall be zero here, so it could be
224                          optimized. */
225
226                 if (clock_may_race(currClock, thread, readClock, readThread)) {
227                         /* We have a datarace */
228                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
229                         goto Exit;
230                 }
231         }
232
233         /* Check for datarace against last write. */
234         {
235                 modelclock_t writeClock = record->writeClock;
236                 thread_id_t writeThread = record->writeThread;
237
238                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
239                         /* We have a datarace */
240                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
241                         goto Exit;
242                 }
243         }
244 Exit:
245         record->numReads = 0;
246         record->writeThread = thread;
247         record->isAtomic = 0;
248         modelclock_t ourClock = currClock->getClock(thread);
249         record->writeClock = ourClock;
250         return race;
251 }
252
253 /** This function does race detection on a write. */
254 void raceCheckWrite(thread_id_t thread, void *location)
255 {
256         uint64_t *shadow = lookupAddressEntry(location);
257         uint64_t shadowval = *shadow;
258         ClockVector *currClock = get_execution()->get_cv(thread);
259         if (currClock == NULL)
260                 return;
261
262         struct DataRace * race = NULL;
263         /* Do full record */
264         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
265                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
266                 goto Exit;
267         }
268
269         {
270                 int threadid = id_to_int(thread);
271                 modelclock_t ourClock = currClock->getClock(thread);
272
273                 /* Thread ID is too large or clock is too large. */
274                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
275                         expandRecord(shadow);
276                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
277                         goto Exit;
278                 }
279
280                 {
281                         /* Check for datarace against last read. */
282                         modelclock_t readClock = READVECTOR(shadowval);
283                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
284
285                         if (clock_may_race(currClock, thread, readClock, readThread)) {
286                                 /* We have a datarace */
287                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
288                                 goto ShadowExit;
289                         }
290                 }
291
292                 {
293                         /* Check for datarace against last write. */
294                         modelclock_t writeClock = WRITEVECTOR(shadowval);
295                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
296
297                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
298                                 /* We have a datarace */
299                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
300                                 goto ShadowExit;
301                         }
302                 }
303
304 ShadowExit:
305                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
306         }
307
308 Exit:
309         if (race) {
310                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
311                 if (raceset->add(race))
312                         assert_race(race);
313                 else model_free(race);
314         }
315 }
316
317 /** This function does race detection for a write on an expanded record. */
318 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
319 {
320         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
321         struct DataRace * race = NULL;
322
323         if (record->isAtomic)
324                 goto Exit;
325
326         /* Check for datarace against last read. */
327
328         for (int i = 0;i < record->numReads;i++) {
329                 modelclock_t readClock = record->readClock[i];
330                 thread_id_t readThread = record->thread[i];
331
332                 /* Note that readClock can't actuall be zero here, so it could be
333                          optimized. */
334
335                 if (clock_may_race(currClock, thread, readClock, readThread)) {
336                         /* We have a datarace */
337                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
338                         goto Exit;
339                 }
340         }
341
342         /* Check for datarace against last write. */
343
344         {
345                 modelclock_t writeClock = record->writeClock;
346                 thread_id_t writeThread = record->writeThread;
347
348                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
349                         /* We have a datarace */
350                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
351                         goto Exit;
352                 }
353         }
354 Exit:
355         record->numReads = 0;
356         record->writeThread = thread;
357         record->isAtomic = 1;
358         modelclock_t ourClock = currClock->getClock(thread);
359         record->writeClock = ourClock;
360         return race;
361 }
362
363 /** This function does race detection on a write. */
364 void atomraceCheckWrite(thread_id_t thread, void *location)
365 {
366         uint64_t *shadow = lookupAddressEntry(location);
367         uint64_t shadowval = *shadow;
368         ClockVector *currClock = get_execution()->get_cv(thread);
369         if (currClock == NULL)
370                 return;
371
372         struct DataRace * race = NULL;
373         /* Do full record */
374         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
375                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
376                 goto Exit;
377         }
378
379         {
380                 int threadid = id_to_int(thread);
381                 modelclock_t ourClock = currClock->getClock(thread);
382
383                 /* Thread ID is too large or clock is too large. */
384                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
385                         expandRecord(shadow);
386                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
387                         goto Exit;
388                 }
389
390                 /* Can't race with atomic */
391                 if (shadowval & ATOMICMASK)
392                         goto ShadowExit;
393
394                 {
395                         /* Check for datarace against last read. */
396                         modelclock_t readClock = READVECTOR(shadowval);
397                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
398
399                         if (clock_may_race(currClock, thread, readClock, readThread)) {
400                                 /* We have a datarace */
401                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
402                                 goto ShadowExit;
403                         }
404                 }
405
406                 {
407                         /* Check for datarace against last write. */
408                         modelclock_t writeClock = WRITEVECTOR(shadowval);
409                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
410
411                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
412                                 /* We have a datarace */
413                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
414                                 goto ShadowExit;
415                         }
416                 }
417
418 ShadowExit:
419                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
420         }
421
422 Exit:
423         if (race) {
424                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
425                 if (raceset->add(race))
426                         assert_race(race);
427                 else model_free(race);
428         }
429 }
430
431 /** This function does race detection for a write on an expanded record. */
432 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
433         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
434         record->numReads = 0;
435         record->writeThread = thread;
436         modelclock_t ourClock = currClock->getClock(thread);
437         record->writeClock = ourClock;
438         record->isAtomic = 1;
439 }
440
441 /** This function does race detection for a write on an expanded record. */
442 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
443         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
444         record->numReads = 0;
445         record->writeThread = thread;
446         modelclock_t ourClock = currClock->getClock(thread);
447         record->writeClock = ourClock;
448         record->isAtomic = 0;
449 }
450
451 /** This function just updates metadata on atomic write. */
452 void recordWrite(thread_id_t thread, void *location) {
453         uint64_t *shadow = lookupAddressEntry(location);
454         uint64_t shadowval = *shadow;
455         ClockVector *currClock = get_execution()->get_cv(thread);
456         /* Do full record */
457         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
458                 fullRecordWrite(thread, location, shadow, currClock);
459                 return;
460         }
461
462         int threadid = id_to_int(thread);
463         modelclock_t ourClock = currClock->getClock(thread);
464
465         /* Thread ID is too large or clock is too large. */
466         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
467                 expandRecord(shadow);
468                 fullRecordWrite(thread, location, shadow, currClock);
469                 return;
470         }
471
472         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
473 }
474
475 /** This function just updates metadata on atomic write. */
476 void recordCalloc(void *location, size_t size) {
477         thread_id_t thread = thread_current()->get_id();
478         for(;size != 0;size--) {
479                 uint64_t *shadow = lookupAddressEntry(location);
480                 uint64_t shadowval = *shadow;
481                 ClockVector *currClock = get_execution()->get_cv(thread);
482                 /* Do full record */
483                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
484                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
485                         return;
486                 }
487
488                 int threadid = id_to_int(thread);
489                 modelclock_t ourClock = currClock->getClock(thread);
490
491                 /* Thread ID is too large or clock is too large. */
492                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
493                         expandRecord(shadow);
494                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
495                         return;
496                 }
497
498                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
499                 location = (void *)(((char *) location) + 1);
500         }
501 }
502
503 /** This function does race detection on a read for an expanded record. */
504 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
505 {
506         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
507         struct DataRace * race = NULL;
508         /* Check for datarace against last write. */
509
510         modelclock_t writeClock = record->writeClock;
511         thread_id_t writeThread = record->writeThread;
512
513         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
514                 /* We have a datarace */
515                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
516         }
517
518         /* Shorten vector when possible */
519
520         int copytoindex = 0;
521
522         for (int i = 0;i < record->numReads;i++) {
523                 modelclock_t readClock = record->readClock[i];
524                 thread_id_t readThread = record->thread[i];
525
526                 /*  Note that is not really a datarace check as reads cannot
527                                 actually race.  It is just determining that this read subsumes
528                                 another in the sense that either this read races or neither
529                                 read races. Note that readClock can't actually be zero, so it
530                                 could be optimized.  */
531
532                 if (clock_may_race(currClock, thread, readClock, readThread)) {
533                         /* Still need this read in vector */
534                         if (copytoindex != i) {
535                                 ASSERT(record->thread[i] >= 0);
536                                 record->readClock[copytoindex] = record->readClock[i];
537                                 record->thread[copytoindex] = record->thread[i];
538                         }
539                         copytoindex++;
540                 }
541         }
542
543         if (__builtin_popcount(copytoindex) <= 1) {
544                 if (copytoindex == 0) {
545                         int newCapacity = INITCAPACITY;
546                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
547                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
548                 } else if (copytoindex>=INITCAPACITY) {
549                         int newCapacity = copytoindex * 2;
550                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
551                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
552                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
553                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
554                         snapshot_free(record->readClock);
555                         snapshot_free(record->thread);
556                         record->readClock = newreadClock;
557                         record->thread = newthread;
558                 }
559         }
560
561         modelclock_t ourClock = currClock->getClock(thread);
562
563         ASSERT(thread >= 0);
564         record->thread[copytoindex] = thread;
565         record->readClock[copytoindex] = ourClock;
566         record->numReads = copytoindex + 1;
567         return race;
568 }
569
570 /** This function does race detection on a read. */
571 void raceCheckRead(thread_id_t thread, const void *location)
572 {
573         uint64_t *shadow = lookupAddressEntry(location);
574         uint64_t shadowval = *shadow;
575         ClockVector *currClock = get_execution()->get_cv(thread);
576         if (currClock == NULL)
577                 return;
578
579         struct DataRace * race = NULL;
580
581         /* Do full record */
582         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
583                 race = fullRaceCheckRead(thread, location, shadow, currClock);
584                 goto Exit;
585         }
586
587         {
588                 int threadid = id_to_int(thread);
589                 modelclock_t ourClock = currClock->getClock(thread);
590
591                 /* Thread ID is too large or clock is too large. */
592                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
593                         expandRecord(shadow);
594                         race = fullRaceCheckRead(thread, location, shadow, currClock);
595                         goto Exit;
596                 }
597
598                 /* Check for datarace against last write. */
599
600                 modelclock_t writeClock = WRITEVECTOR(shadowval);
601                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
602
603                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
604                         /* We have a datarace */
605                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
606                         goto ShadowExit;
607                 }
608
609 ShadowExit:
610                 {
611                         modelclock_t readClock = READVECTOR(shadowval);
612                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
613
614                         if (clock_may_race(currClock, thread, readClock, readThread)) {
615                                 /* We don't subsume this read... Have to expand record. */
616                                 expandRecord(shadow);
617                                 fullRaceCheckRead(thread, location, shadow, currClock);
618                                 goto Exit;
619                         }
620                 }
621
622                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
623         }
624 Exit:
625         if (race) {
626                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
627                 if (raceset->add(race))
628                         assert_race(race);
629                 else model_free(race);
630         }
631 }
632
633
634 /** This function does race detection on a read for an expanded record. */
635 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
636 {
637         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
638         struct DataRace * race = NULL;
639         /* Check for datarace against last write. */
640         if (record->isAtomic)
641                 return NULL;
642
643         modelclock_t writeClock = record->writeClock;
644         thread_id_t writeThread = record->writeThread;
645
646         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
647                 /* We have a datarace */
648                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
649         }
650         return race;
651 }
652
653 /** This function does race detection on a read. */
654 void atomraceCheckRead(thread_id_t thread, const void *location)
655 {
656         uint64_t *shadow = lookupAddressEntry(location);
657         uint64_t shadowval = *shadow;
658         ClockVector *currClock = get_execution()->get_cv(thread);
659         if (currClock == NULL)
660                 return;
661
662         struct DataRace * race = NULL;
663
664         /* Do full record */
665         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
666                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
667                 goto Exit;
668         }
669
670         if (shadowval & ATOMICMASK)
671                 return;
672
673         {
674                 /* Check for datarace against last write. */
675                 modelclock_t writeClock = WRITEVECTOR(shadowval);
676                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
677
678                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
679                         /* We have a datarace */
680                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
681                         goto Exit;
682                 }
683
684
685         }
686 Exit:
687         if (race) {
688                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
689                 if (raceset->add(race))
690                         assert_race(race);
691                 else model_free(race);
692         }
693 }
694
695 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
696 {
697         uint64_t *shadow = lookupAddressEntry(location);
698         uint64_t shadowval = *shadow;
699
700         ClockVector *currClock = get_execution()->get_cv(thread);
701         if (currClock == NULL)
702                 return shadow;
703
704         struct DataRace * race = NULL;
705
706         /* Do full record */
707         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
708                 race = fullRaceCheckRead(thread, location, shadow, currClock);
709                 goto Exit;
710         }
711
712         {
713                 int threadid = id_to_int(thread);
714                 modelclock_t ourClock = currClock->getClock(thread);
715
716                 /* Thread ID is too large or clock is too large. */
717                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
718                         expandRecord(shadow);
719                         race = fullRaceCheckRead(thread, location, shadow, currClock);
720                         goto Exit;
721                 }
722
723                 /* Check for datarace against last write. */
724                 modelclock_t writeClock = WRITEVECTOR(shadowval);
725                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
726
727                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
728                         /* We have a datarace */
729                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
730                 }
731
732                 modelclock_t readClock = READVECTOR(shadowval);
733                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
734
735                 if (clock_may_race(currClock, thread, readClock, readThread)) {
736                         /* We don't subsume this read... Have to expand record. */
737                         expandRecord(shadow);
738                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
739                         record->thread[1] = thread;
740                         record->readClock[1] = ourClock;
741                         record->numReads++;
742
743                         goto Exit;
744                 }
745
746                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
747
748                 *old_val = shadowval;
749                 *new_val = *shadow;
750         }
751 Exit:
752         if (race) {
753                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
754                 if (raceset->add(race))
755                         assert_race(race);
756                 else model_free(race);
757         }
758
759         return shadow;
760 }
761
762 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
763         uint64_t *shadow = lookupAddressEntry(location);
764
765         uint64_t shadowval = *shadow;
766
767         ClockVector *currClock = get_execution()->get_cv(thread);
768         if (currClock == NULL)
769                 return;
770
771         struct DataRace * race = NULL;
772
773         /* Do full record */
774         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
775                 race = fullRaceCheckRead(thread, location, shadow, currClock);
776                 goto Exit;
777         }
778
779         {
780                 int threadid = id_to_int(thread);
781                 modelclock_t ourClock = currClock->getClock(thread);
782
783                 /* Thread ID is too large or clock is too large. */
784                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
785                         expandRecord(shadow);
786                         race = fullRaceCheckRead(thread, location, shadow, currClock);
787                         goto Exit;
788                 }
789
790                 /* Check for datarace against last write. */
791                 modelclock_t writeClock = WRITEVECTOR(shadowval);
792                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
793
794                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
795                         /* We have a datarace */
796                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
797                 }
798
799                 modelclock_t readClock = READVECTOR(shadowval);
800                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
801
802                 if (clock_may_race(currClock, thread, readClock, readThread)) {
803                         /* We don't subsume this read... Have to expand record. */
804                         expandRecord(shadow);
805                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
806                         record->thread[1] = thread;
807                         record->readClock[1] = ourClock;
808                         record->numReads++;
809
810                         goto Exit;
811                 }
812
813                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
814         }
815 Exit:
816         if (race) {
817                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
818                 if (raceset->add(race))
819                         assert_race(race);
820                 else model_free(race);
821         }
822 }
823
824 void raceCheckRead64(thread_id_t thread, const void *location)
825 {
826         uint64_t old_shadowval, new_shadowval;
827         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
828
829         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
830         if (CHECKBOUNDARY(location, 7)) {
831                 if (shadow[1]==old_shadowval)
832                         shadow[1] = new_shadowval;
833                 else goto L1;
834                 if (shadow[2]==old_shadowval)
835                         shadow[2] = new_shadowval;
836                 else goto L2;
837                 if (shadow[3]==old_shadowval)
838                         shadow[3] = new_shadowval;
839                 else goto L3;
840                 if (shadow[4]==old_shadowval)
841                         shadow[4] = new_shadowval;
842                 else goto L4;
843                 if (shadow[5]==old_shadowval)
844                         shadow[5] = new_shadowval;
845                 else goto L5;
846                 if (shadow[6]==old_shadowval)
847                         shadow[6] = new_shadowval;
848                 else goto L6;
849                 if (shadow[7]==old_shadowval)
850                         shadow[7] = new_shadowval;
851                 else goto L7;
852                 return;
853         }
854
855 L1:
856         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
857 L2:
858         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
859 L3:
860         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
861 L4:
862         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
863 L5:
864         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
865 L6:
866         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
867 L7:
868         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
869 }
870
871 void raceCheckRead32(thread_id_t thread, const void *location)
872 {
873         uint64_t old_shadowval, new_shadowval;
874         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
875
876         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
877         if (CHECKBOUNDARY(location, 3)) {
878                 if (shadow[1]==old_shadowval)
879                         shadow[1] = new_shadowval;
880                 else goto L1;
881                 if (shadow[2]==old_shadowval)
882                         shadow[2] = new_shadowval;
883                 else goto L2;
884                 if (shadow[3]==old_shadowval)
885                         shadow[3] = new_shadowval;
886                 else goto L3;
887                 return;
888         }
889
890 L1:
891         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
892 L2:
893         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
894 L3:
895         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
896 }
897
898 void raceCheckRead16(thread_id_t thread, const void *location)
899 {
900         uint64_t old_shadowval, new_shadowval;
901         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
902
903
904         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
905         if (CHECKBOUNDARY(location, 1)) {
906                 if (shadow[1]==old_shadowval) {
907                         shadow[1] = new_shadowval;
908                         return;
909                 }
910         }
911         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
912 }
913
914 void raceCheckRead8(thread_id_t thread, const void *location)
915 {
916         uint64_t old_shadowval, new_shadowval;
917         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
918
919         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
920 }
921
922 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
923 {
924         uint64_t *shadow = lookupAddressEntry(location);
925         uint64_t shadowval = *shadow;
926         ClockVector *currClock = get_execution()->get_cv(thread);
927         if (currClock == NULL)
928                 return shadow;
929
930         struct DataRace * race = NULL;
931         /* Do full record */
932         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
933                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
934                 goto Exit;
935         }
936
937         {
938                 int threadid = id_to_int(thread);
939                 modelclock_t ourClock = currClock->getClock(thread);
940
941                 /* Thread ID is too large or clock is too large. */
942                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
943                         expandRecord(shadow);
944                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
945                         goto Exit;
946                 }
947
948                 {
949                         /* Check for datarace against last read. */
950                         modelclock_t readClock = READVECTOR(shadowval);
951                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
952
953                         if (clock_may_race(currClock, thread, readClock, readThread)) {
954                                 /* We have a datarace */
955                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
956                                 goto ShadowExit;
957                         }
958                 }
959
960                 {
961                         /* Check for datarace against last write. */
962                         modelclock_t writeClock = WRITEVECTOR(shadowval);
963                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
964
965                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
966                                 /* We have a datarace */
967                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
968                                 goto ShadowExit;
969                         }
970                 }
971
972 ShadowExit:
973                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
974
975                 *old_val = shadowval;
976                 *new_val = *shadow;
977         }
978
979 Exit:
980         if (race) {
981                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
982                 if (raceset->add(race))
983                         assert_race(race);
984                 else model_free(race);
985         }
986
987         return shadow;
988 }
989
990 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
991         uint64_t *shadow = lookupAddressEntry(location);
992
993         uint64_t shadowval = *shadow;
994
995         ClockVector *currClock = get_execution()->get_cv(thread);
996         if (currClock == NULL)
997                 return;
998
999         struct DataRace * race = NULL;
1000         /* Do full record */
1001         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1002                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1003                 goto Exit;
1004         }
1005
1006         {
1007                 int threadid = id_to_int(thread);
1008                 modelclock_t ourClock = currClock->getClock(thread);
1009
1010                 /* Thread ID is too large or clock is too large. */
1011                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1012                         expandRecord(shadow);
1013                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1014                         goto Exit;
1015                 }
1016
1017                 {
1018                         /* Check for datarace against last read. */
1019                         modelclock_t readClock = READVECTOR(shadowval);
1020                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1021
1022                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1023                                 /* We have a datarace */
1024                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1025                                 goto ShadowExit;
1026                         }
1027                 }
1028
1029                 {
1030                         /* Check for datarace against last write. */
1031                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1032                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1033
1034                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1035                                 /* We have a datarace */
1036                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1037                                 goto ShadowExit;
1038                         }
1039                 }
1040
1041 ShadowExit:
1042                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1043         }
1044
1045 Exit:
1046         if (race) {
1047                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1048                 if (raceset->add(race))
1049                         assert_race(race);
1050                 else model_free(race);
1051         }
1052 }
1053
1054 void raceCheckWrite64(thread_id_t thread, const void *location)
1055 {
1056         uint64_t old_shadowval, new_shadowval;
1057         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1058
1059         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1060         if (CHECKBOUNDARY(location, 7)) {
1061                 if (shadow[1]==old_shadowval)
1062                         shadow[1] = new_shadowval;
1063                 else goto L1;
1064                 if (shadow[2]==old_shadowval)
1065                         shadow[2] = new_shadowval;
1066                 else goto L2;
1067                 if (shadow[3]==old_shadowval)
1068                         shadow[3] = new_shadowval;
1069                 else goto L3;
1070                 if (shadow[4]==old_shadowval)
1071                         shadow[4] = new_shadowval;
1072                 else goto L4;
1073                 if (shadow[5]==old_shadowval)
1074                         shadow[5] = new_shadowval;
1075                 else goto L5;
1076                 if (shadow[6]==old_shadowval)
1077                         shadow[6] = new_shadowval;
1078                 else goto L6;
1079                 if (shadow[7]==old_shadowval)
1080                         shadow[7] = new_shadowval;
1081                 else goto L7;
1082                 return;
1083         }
1084
1085 L1:
1086         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1087 L2:
1088         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1089 L3:
1090         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1091 L4:
1092         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1093 L5:
1094         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1095 L6:
1096         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1097 L7:
1098         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1099 }
1100
1101 void raceCheckWrite32(thread_id_t thread, const void *location)
1102 {
1103         uint64_t old_shadowval, new_shadowval;
1104         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1105
1106         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1107         if (CHECKBOUNDARY(location, 3)) {
1108                 if (shadow[1]==old_shadowval)
1109                         shadow[1] = new_shadowval;
1110                 else goto L1;
1111                 if (shadow[2]==old_shadowval)
1112                         shadow[2] = new_shadowval;
1113                 else goto L2;
1114                 if (shadow[3]==old_shadowval)
1115                         shadow[3] = new_shadowval;
1116                 else goto L3;
1117                 return;
1118         }
1119
1120 L1:
1121         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1122 L2:
1123         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1124 L3:
1125         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1126 }
1127
1128 void raceCheckWrite16(thread_id_t thread, const void *location)
1129 {
1130         uint64_t old_shadowval, new_shadowval;
1131         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1132
1133         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1134         if (CHECKBOUNDARY(location, 1)) {
1135                 if (shadow[1]==old_shadowval) {
1136                         shadow[1] = new_shadowval;
1137                         return;
1138                 }
1139         }
1140         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1141 }
1142
1143 void raceCheckWrite8(thread_id_t thread, const void *location)
1144 {
1145         uint64_t old_shadowval, new_shadowval;
1146         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1147
1148         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1149 }