changes
[c11tester.git] / datarace.cc
1 #include "datarace.h"
2 #include "model.h"
3 #include "threads-model.h"
4 #include <stdio.h>
5 #include <cstring>
6 #include "mymemory.h"
7 #include "clockvector.h"
8 #include "config.h"
9 #include "action.h"
10 #include "execution.h"
11 #include "stl-model.h"
12 #include <execinfo.h>
13
14 static struct ShadowTable *root;
15 static void *memory_base;
16 static void *memory_top;
17 static RaceSet * raceset;
18
19 static const ModelExecution * get_execution()
20 {
21         return model->get_execution();
22 }
23
24 /** This function initialized the data race detector. */
25 void initRaceDetector()
26 {
27         root = (struct ShadowTable *)snapshot_calloc(sizeof(struct ShadowTable), 1);
28         memory_base = snapshot_calloc(sizeof(struct ShadowBaseTable) * SHADOWBASETABLES, 1);
29         memory_top = ((char *)memory_base) + sizeof(struct ShadowBaseTable) * SHADOWBASETABLES;
30         raceset = new RaceSet();
31 }
32
33 void * table_calloc(size_t size)
34 {
35         if ((((char *)memory_base) + size) > memory_top) {
36                 return snapshot_calloc(size, 1);
37         } else {
38                 void *tmp = memory_base;
39                 memory_base = ((char *)memory_base) + size;
40                 return tmp;
41         }
42 }
43
44 /** This function looks up the entry in the shadow table corresponding to a
45  * given address.*/
46 static uint64_t * lookupAddressEntry(const void *address)
47 {
48         struct ShadowTable *currtable = root;
49 #if BIT48
50         currtable = (struct ShadowTable *) currtable->array[(((uintptr_t)address) >> 32) & MASK16BIT];
51         if (currtable == NULL) {
52                 currtable = (struct ShadowTable *)(root->array[(((uintptr_t)address) >> 32) & MASK16BIT] = table_calloc(sizeof(struct ShadowTable)));
53         }
54 #endif
55
56         struct ShadowBaseTable *basetable = (struct ShadowBaseTable *)currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT];
57         if (basetable == NULL) {
58                 basetable = (struct ShadowBaseTable *)(currtable->array[(((uintptr_t)address) >> 16) & MASK16BIT] = table_calloc(sizeof(struct ShadowBaseTable)));
59         }
60         return &basetable->array[((uintptr_t)address) & MASK16BIT];
61 }
62
63
64 bool hasNonAtomicStore(const void *address) {
65         uint64_t * shadow = lookupAddressEntry(address);
66         uint64_t shadowval = *shadow;
67         if (ISSHORTRECORD(shadowval)) {
68                 //Do we have a non atomic write with a non-zero clock
69                 return !(ATOMICMASK & shadowval);
70         } else {
71                 if (shadowval == 0)
72                         return true;
73                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
74                 return !record->isAtomic;
75         }
76 }
77
78 void setAtomicStoreFlag(const void *address) {
79         uint64_t * shadow = lookupAddressEntry(address);
80         uint64_t shadowval = *shadow;
81         if (ISSHORTRECORD(shadowval)) {
82                 *shadow = shadowval | ATOMICMASK;
83         } else {
84                 if (shadowval == 0) {
85                         *shadow = ATOMICMASK | ENCODEOP(0, 0, 0, 0);
86                         return;
87                 }
88                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
89                 record->isAtomic = 1;
90         }
91 }
92
93 void getStoreThreadAndClock(const void *address, thread_id_t * thread, modelclock_t * clock) {
94         uint64_t * shadow = lookupAddressEntry(address);
95         uint64_t shadowval = *shadow;
96         if (ISSHORTRECORD(shadowval) || shadowval == 0) {
97                 //Do we have a non atomic write with a non-zero clock
98                 *thread = WRTHREADID(shadowval);
99                 *clock = WRITEVECTOR(shadowval);
100         } else {
101                 struct RaceRecord *record = (struct RaceRecord *)shadowval;
102                 *thread = record->writeThread;
103                 *clock = record->writeClock;
104         }
105 }
106
107 /**
108  * Compares a current clock-vector/thread-ID pair with a clock/thread-ID pair
109  * to check the potential for a data race.
110  * @param clock1 The current clock vector
111  * @param tid1 The current thread; paired with clock1
112  * @param clock2 The clock value for the potentially-racing action
113  * @param tid2 The thread ID for the potentially-racing action
114  * @return true if the current clock allows a race with the event at clock2/tid2
115  */
116 static bool clock_may_race(ClockVector *clock1, thread_id_t tid1,
117                                                                                                          modelclock_t clock2, thread_id_t tid2)
118 {
119         return tid1 != tid2 && clock2 != 0 && clock1->getClock(tid2) <= clock2;
120 }
121
122 /**
123  * Expands a record from the compact form to the full form.  This is
124  * necessary for multiple readers or for very large thread ids or time
125  * stamps. */
126 static void expandRecord(uint64_t *shadow)
127 {
128         uint64_t shadowval = *shadow;
129
130         modelclock_t readClock = READVECTOR(shadowval);
131         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
132         modelclock_t writeClock = WRITEVECTOR(shadowval);
133         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
134
135         struct RaceRecord *record = (struct RaceRecord *)snapshot_calloc(1, sizeof(struct RaceRecord));
136         record->writeThread = writeThread;
137         record->writeClock = writeClock;
138
139         if (readClock != 0) {
140                 record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * INITCAPACITY);
141                 record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * INITCAPACITY);
142                 record->numReads = 1;
143                 ASSERT(readThread >= 0);
144                 record->thread[0] = readThread;
145                 record->readClock[0] = readClock;
146         }
147         if (shadowval & ATOMICMASK)
148                 record->isAtomic = 1;
149         *shadow = (uint64_t) record;
150 }
151
152 #define FIRST_STACK_FRAME 2
153
154 unsigned int race_hash(struct DataRace *race) {
155         unsigned int hash = 0;
156         for(int i=FIRST_STACK_FRAME;i < race->numframes;i++) {
157                 hash ^= ((uintptr_t)race->backtrace[i]);
158                 hash = (hash >> 3) | (hash << 29);
159         }
160         return hash;
161 }
162
163
164 bool race_equals(struct DataRace *r1, struct DataRace *r2) {
165         if (r1->numframes != r2->numframes)
166                 return false;
167         for(int i=FIRST_STACK_FRAME;i < r1->numframes;i++) {
168                 if (r1->backtrace[i] != r2->backtrace[i])
169                         return false;
170         }
171         return true;
172 }
173
174 /** This function is called when we detect a data race.*/
175 static struct DataRace * reportDataRace(thread_id_t oldthread, modelclock_t oldclock, bool isoldwrite, ModelAction *newaction, bool isnewwrite, const void *address)
176 {
177         struct DataRace *race = (struct DataRace *)model_malloc(sizeof(struct DataRace));
178         race->oldthread = oldthread;
179         race->oldclock = oldclock;
180         race->isoldwrite = isoldwrite;
181         race->newaction = newaction;
182         race->isnewwrite = isnewwrite;
183         race->address = address;
184         return race;
185 }
186
187 /**
188  * @brief Assert a data race
189  *
190  * Asserts a data race which is currently realized, causing the execution to
191  * end and stashing a message in the model-checker's bug list
192  *
193  * @param race The race to report
194  */
195 void assert_race(struct DataRace *race)
196 {
197         model_print("Race detected at location: \n");
198         backtrace_symbols_fd(race->backtrace, race->numframes, model_out);
199         model_print("\nData race detected @ address %p:\n"
200                                                         "    Access 1: %5s in thread %2d @ clock %3u\n"
201                                                         "    Access 2: %5s in thread %2d @ clock %3u\n\n",
202                                                         race->address,
203                                                         race->isoldwrite ? "write" : "read",
204                                                         id_to_int(race->oldthread),
205                                                         race->oldclock,
206                                                         race->isnewwrite ? "write" : "read",
207                                                         id_to_int(race->newaction->get_tid()),
208                                                         race->newaction->get_seq_number()
209                                                         );
210 }
211
212 /** This function does race detection for a write on an expanded record. */
213 struct DataRace * fullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
214 {
215         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
216         struct DataRace * race = NULL;
217
218         /* Check for datarace against last read. */
219
220         for (int i = 0;i < record->numReads;i++) {
221                 modelclock_t readClock = record->readClock[i];
222                 thread_id_t readThread = record->thread[i];
223
224                 /* Note that readClock can't actuall be zero here, so it could be
225                          optimized. */
226
227                 if (clock_may_race(currClock, thread, readClock, readThread)) {
228                         /* We have a datarace */
229                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
230                         goto Exit;
231                 }
232         }
233
234         /* Check for datarace against last write. */
235
236         {
237                 modelclock_t writeClock = record->writeClock;
238                 thread_id_t writeThread = record->writeThread;
239
240                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
241                         /* We have a datarace */
242                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
243                         goto Exit;
244                 }
245         }
246 Exit:
247         record->numReads = 0;
248         record->writeThread = thread;
249         record->isAtomic = 0;
250         modelclock_t ourClock = currClock->getClock(thread);
251         record->writeClock = ourClock;
252         return race;
253 }
254
255 /** This function does race detection on a write. */
256 void raceCheckWrite(thread_id_t thread, void *location)
257 {
258         uint64_t *shadow = lookupAddressEntry(location);
259         uint64_t shadowval = *shadow;
260         ClockVector *currClock = get_execution()->get_cv(thread);
261         if (currClock == NULL)
262                 return;
263
264         struct DataRace * race = NULL;
265         /* Do full record */
266         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
267                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
268                 goto Exit;
269         }
270
271         {
272                 int threadid = id_to_int(thread);
273                 modelclock_t ourClock = currClock->getClock(thread);
274
275                 /* Thread ID is too large or clock is too large. */
276                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
277                         expandRecord(shadow);
278                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
279                         goto Exit;
280                 }
281
282
283
284                 {
285                         /* Check for datarace against last read. */
286                         modelclock_t readClock = READVECTOR(shadowval);
287                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
288
289                         if (clock_may_race(currClock, thread, readClock, readThread)) {
290                                 /* We have a datarace */
291                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
292                                 goto ShadowExit;
293                         }
294                 }
295
296                 {
297                         /* Check for datarace against last write. */
298
299                         modelclock_t writeClock = WRITEVECTOR(shadowval);
300                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
301
302                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
303                                 /* We have a datarace */
304                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
305                                 goto ShadowExit;
306                         }
307                 }
308
309 ShadowExit:
310                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
311         }
312
313 Exit:
314         if (race) {
315                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
316                 if (raceset->add(race))
317                         assert_race(race);
318                 else model_free(race);
319         }
320 }
321
322
323 /** This function does race detection for a write on an expanded record. */
324 struct DataRace * atomfullRaceCheckWrite(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
325 {
326         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
327         struct DataRace * race = NULL;
328
329         if (record->isAtomic)
330                 goto Exit;
331
332         /* Check for datarace against last read. */
333
334         for (int i = 0;i < record->numReads;i++) {
335                 modelclock_t readClock = record->readClock[i];
336                 thread_id_t readThread = record->thread[i];
337
338                 /* Note that readClock can't actuall be zero here, so it could be
339                          optimized. */
340
341                 if (clock_may_race(currClock, thread, readClock, readThread)) {
342                         /* We have a datarace */
343                         race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
344                         goto Exit;
345                 }
346         }
347
348         /* Check for datarace against last write. */
349
350         {
351                 modelclock_t writeClock = record->writeClock;
352                 thread_id_t writeThread = record->writeThread;
353
354                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
355                         /* We have a datarace */
356                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
357                         goto Exit;
358                 }
359         }
360 Exit:
361         record->numReads = 0;
362         record->writeThread = thread;
363         record->isAtomic = 1;
364         modelclock_t ourClock = currClock->getClock(thread);
365         record->writeClock = ourClock;
366         return race;
367 }
368
369 /** This function does race detection on a write. */
370 void atomraceCheckWrite(thread_id_t thread, void *location)
371 {
372         uint64_t *shadow = lookupAddressEntry(location);
373         uint64_t shadowval = *shadow;
374         ClockVector *currClock = get_execution()->get_cv(thread);
375         if (currClock == NULL)
376                 return;
377
378         struct DataRace * race = NULL;
379         /* Do full record */
380         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
381                 race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
382                 goto Exit;
383         }
384
385         {
386                 int threadid = id_to_int(thread);
387                 modelclock_t ourClock = currClock->getClock(thread);
388
389                 /* Thread ID is too large or clock is too large. */
390                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
391                         expandRecord(shadow);
392                         race = atomfullRaceCheckWrite(thread, location, shadow, currClock);
393                         goto Exit;
394                 }
395
396                 /* Can't race with atomic */
397                 if (shadowval & ATOMICMASK)
398                         goto ShadowExit;
399
400                 {
401                         /* Check for datarace against last read. */
402
403                         modelclock_t readClock = READVECTOR(shadowval);
404                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
405
406                         if (clock_may_race(currClock, thread, readClock, readThread)) {
407                                 /* We have a datarace */
408                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
409                                 goto ShadowExit;
410                         }
411                 }
412
413                 {
414                         /* Check for datarace against last write. */
415
416                         modelclock_t writeClock = WRITEVECTOR(shadowval);
417                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
418
419                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
420                                 /* We have a datarace */
421                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
422                                 goto ShadowExit;
423                         }
424                 }
425
426 ShadowExit:
427                 *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
428         }
429
430 Exit:
431         if (race) {
432                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
433                 if (raceset->add(race))
434                         assert_race(race);
435                 else model_free(race);
436         }
437 }
438
439 /** This function does race detection for a write on an expanded record. */
440 void fullRecordWrite(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
441         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
442         record->numReads = 0;
443         record->writeThread = thread;
444         modelclock_t ourClock = currClock->getClock(thread);
445         record->writeClock = ourClock;
446         record->isAtomic = 1;
447 }
448
449 /** This function does race detection for a write on an expanded record. */
450 void fullRecordWriteNonAtomic(thread_id_t thread, void *location, uint64_t *shadow, ClockVector *currClock) {
451         struct RaceRecord *record = (struct RaceRecord *)(*shadow);
452         record->numReads = 0;
453         record->writeThread = thread;
454         modelclock_t ourClock = currClock->getClock(thread);
455         record->writeClock = ourClock;
456         record->isAtomic = 0;
457 }
458
459 /** This function just updates metadata on atomic write. */
460 void recordWrite(thread_id_t thread, void *location) {
461         uint64_t *shadow = lookupAddressEntry(location);
462         uint64_t shadowval = *shadow;
463         ClockVector *currClock = get_execution()->get_cv(thread);
464         /* Do full record */
465         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
466                 fullRecordWrite(thread, location, shadow, currClock);
467                 return;
468         }
469
470         int threadid = id_to_int(thread);
471         modelclock_t ourClock = currClock->getClock(thread);
472
473         /* Thread ID is too large or clock is too large. */
474         if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
475                 expandRecord(shadow);
476                 fullRecordWrite(thread, location, shadow, currClock);
477                 return;
478         }
479
480         *shadow = ENCODEOP(0, 0, threadid, ourClock) | ATOMICMASK;
481 }
482
483 /** This function just updates metadata on atomic write. */
484 void recordCalloc(void *location, size_t size) {
485         thread_id_t thread = thread_current()->get_id();
486         for(;size != 0;size--) {
487                 uint64_t *shadow = lookupAddressEntry(location);
488                 uint64_t shadowval = *shadow;
489                 ClockVector *currClock = get_execution()->get_cv(thread);
490                 /* Do full record */
491                 if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
492                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
493                         return;
494                 }
495
496                 int threadid = id_to_int(thread);
497                 modelclock_t ourClock = currClock->getClock(thread);
498
499                 /* Thread ID is too large or clock is too large. */
500                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
501                         expandRecord(shadow);
502                         fullRecordWriteNonAtomic(thread, location, shadow, currClock);
503                         return;
504                 }
505
506                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
507                 location = (void *)(((char *) location) + 1);
508         }
509 }
510
511
512
513 /** This function does race detection on a read for an expanded record. */
514 struct DataRace * fullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
515 {
516         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
517         struct DataRace * race = NULL;
518         /* Check for datarace against last write. */
519
520         modelclock_t writeClock = record->writeClock;
521         thread_id_t writeThread = record->writeThread;
522
523         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
524                 /* We have a datarace */
525                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
526         }
527
528         /* Shorten vector when possible */
529
530         int copytoindex = 0;
531
532         for (int i = 0;i < record->numReads;i++) {
533                 modelclock_t readClock = record->readClock[i];
534                 thread_id_t readThread = record->thread[i];
535
536                 /*  Note that is not really a datarace check as reads cannot
537                                 actually race.  It is just determining that this read subsumes
538                                 another in the sense that either this read races or neither
539                                 read races. Note that readClock can't actually be zero, so it
540                                 could be optimized.  */
541
542                 if (clock_may_race(currClock, thread, readClock, readThread)) {
543                         /* Still need this read in vector */
544                         if (copytoindex != i) {
545                                 ASSERT(record->thread[i] >= 0);
546                                 record->readClock[copytoindex] = record->readClock[i];
547                                 record->thread[copytoindex] = record->thread[i];
548                         }
549                         copytoindex++;
550                 }
551         }
552
553         if (__builtin_popcount(copytoindex) <= 1) {
554                 if (copytoindex == 0) {
555                         int newCapacity = INITCAPACITY;
556                         record->thread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
557                         record->readClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
558                 } else if (copytoindex>=INITCAPACITY) {
559                         int newCapacity = copytoindex * 2;
560                         thread_id_t *newthread = (thread_id_t *)snapshot_malloc(sizeof(thread_id_t) * newCapacity);
561                         modelclock_t *newreadClock = (modelclock_t *)snapshot_malloc(sizeof(modelclock_t) * newCapacity);
562                         std::memcpy(newthread, record->thread, copytoindex * sizeof(thread_id_t));
563                         std::memcpy(newreadClock, record->readClock, copytoindex * sizeof(modelclock_t));
564                         snapshot_free(record->readClock);
565                         snapshot_free(record->thread);
566                         record->readClock = newreadClock;
567                         record->thread = newthread;
568                 }
569         }
570
571         modelclock_t ourClock = currClock->getClock(thread);
572
573         ASSERT(thread >= 0);
574         record->thread[copytoindex] = thread;
575         record->readClock[copytoindex] = ourClock;
576         record->numReads = copytoindex + 1;
577         return race;
578 }
579
580 /** This function does race detection on a read. */
581 void raceCheckRead(thread_id_t thread, const void *location)
582 {
583         uint64_t *shadow = lookupAddressEntry(location);
584         uint64_t shadowval = *shadow;
585         ClockVector *currClock = get_execution()->get_cv(thread);
586         if (currClock == NULL)
587                 return;
588
589         struct DataRace * race = NULL;
590
591         /* Do full record */
592         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
593                 race = fullRaceCheckRead(thread, location, shadow, currClock);
594                 goto Exit;
595         }
596
597         {
598                 int threadid = id_to_int(thread);
599                 modelclock_t ourClock = currClock->getClock(thread);
600
601                 /* Thread ID is too large or clock is too large. */
602                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
603                         expandRecord(shadow);
604                         race = fullRaceCheckRead(thread, location, shadow, currClock);
605                         goto Exit;
606                 }
607
608                 /* Check for datarace against last write. */
609
610                 modelclock_t writeClock = WRITEVECTOR(shadowval);
611                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
612
613                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
614                         /* We have a datarace */
615                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
616                         goto ShadowExit;
617                 }
618
619 ShadowExit:
620                 {
621                         modelclock_t readClock = READVECTOR(shadowval);
622                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
623
624                         if (clock_may_race(currClock, thread, readClock, readThread)) {
625                                 /* We don't subsume this read... Have to expand record. */
626                                 expandRecord(shadow);
627                                 fullRaceCheckRead(thread, location, shadow, currClock);
628                                 goto Exit;
629                         }
630                 }
631
632                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
633         }
634 Exit:
635         if (race) {
636                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
637                 if (raceset->add(race))
638                         assert_race(race);
639                 else model_free(race);
640         }
641 }
642
643
644 /** This function does race detection on a read for an expanded record. */
645 struct DataRace * atomfullRaceCheckRead(thread_id_t thread, const void *location, uint64_t *shadow, ClockVector *currClock)
646 {
647         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
648         struct DataRace * race = NULL;
649         /* Check for datarace against last write. */
650         if (record->isAtomic)
651                 return NULL;
652
653         modelclock_t writeClock = record->writeClock;
654         thread_id_t writeThread = record->writeThread;
655
656         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
657                 /* We have a datarace */
658                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
659         }
660         return race;
661 }
662
663 /** This function does race detection on a read. */
664 void atomraceCheckRead(thread_id_t thread, const void *location)
665 {
666         uint64_t *shadow = lookupAddressEntry(location);
667         uint64_t shadowval = *shadow;
668         ClockVector *currClock = get_execution()->get_cv(thread);
669         if (currClock == NULL)
670                 return;
671
672         struct DataRace * race = NULL;
673
674         /* Do full record */
675         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
676                 race = atomfullRaceCheckRead(thread, location, shadow, currClock);
677                 goto Exit;
678         }
679
680         if (shadowval & ATOMICMASK)
681                 return;
682
683         {
684                 /* Check for datarace against last write. */
685
686                 modelclock_t writeClock = WRITEVECTOR(shadowval);
687                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
688
689                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
690                         /* We have a datarace */
691                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
692                         goto Exit;
693                 }
694
695
696         }
697 Exit:
698         if (race) {
699                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
700                 if (raceset->add(race))
701                         assert_race(race);
702                 else model_free(race);
703         }
704 }
705
706 static inline uint64_t * raceCheckRead_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
707 {
708         uint64_t *shadow = lookupAddressEntry(location);
709         uint64_t shadowval = *shadow;
710
711         ClockVector *currClock = get_execution()->get_cv(thread);
712         if (currClock == NULL)
713                 return shadow;
714
715         struct DataRace * race = NULL;
716
717         /* Do full record */
718         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
719                 race = fullRaceCheckRead(thread, location, shadow, currClock);
720                 goto Exit;
721         }
722
723         {
724                 int threadid = id_to_int(thread);
725                 modelclock_t ourClock = currClock->getClock(thread);
726
727                 /* Thread ID is too large or clock is too large. */
728                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
729                         expandRecord(shadow);
730                         race = fullRaceCheckRead(thread, location, shadow, currClock);
731                         goto Exit;
732                 }
733
734                 /* Check for datarace against last write. */
735
736                 modelclock_t writeClock = WRITEVECTOR(shadowval);
737                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
738
739                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
740                         /* We have a datarace */
741                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
742                 }
743
744                 modelclock_t readClock = READVECTOR(shadowval);
745                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
746
747                 if (clock_may_race(currClock, thread, readClock, readThread)) {
748                         /* We don't subsume this read... Have to expand record. */
749                         expandRecord(shadow);
750                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
751                         record->thread[1] = thread;
752                         record->readClock[1] = ourClock;
753                         record->numReads++;
754
755                         goto Exit;
756                 }
757
758                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
759
760                 *old_val = shadowval;
761                 *new_val = *shadow;
762         }
763 Exit:
764         if (race) {
765                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
766                 if (raceset->add(race))
767                         assert_race(race);
768                 else model_free(race);
769         }
770
771         return shadow;
772 }
773
774 static inline void raceCheckRead_otherIt(thread_id_t thread, const void * location) {
775         uint64_t *shadow = lookupAddressEntry(location);
776
777         uint64_t shadowval = *shadow;
778
779         ClockVector *currClock = get_execution()->get_cv(thread);
780         if (currClock == NULL)
781                 return;
782
783         struct DataRace * race = NULL;
784
785         /* Do full record */
786         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
787                 race = fullRaceCheckRead(thread, location, shadow, currClock);
788                 goto Exit;
789         }
790
791         {
792                 int threadid = id_to_int(thread);
793                 modelclock_t ourClock = currClock->getClock(thread);
794
795                 /* Thread ID is too large or clock is too large. */
796                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
797                         expandRecord(shadow);
798                         race = fullRaceCheckRead(thread, location, shadow, currClock);
799                         goto Exit;
800                 }
801
802                 /* Check for datarace against last write. */
803                 modelclock_t writeClock = WRITEVECTOR(shadowval);
804                 thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
805
806                 if (clock_may_race(currClock, thread, writeClock, writeThread)) {
807                         /* We have a datarace */
808                         race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), false, location);
809                 }
810
811                 modelclock_t readClock = READVECTOR(shadowval);
812                 thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
813
814                 if (clock_may_race(currClock, thread, readClock, readThread)) {
815                         /* We don't subsume this read... Have to expand record. */
816                         expandRecord(shadow);
817                         struct RaceRecord *record = (struct RaceRecord *) (*shadow);
818                         record->thread[1] = thread;
819                         record->readClock[1] = ourClock;
820                         record->numReads++;
821
822                         goto Exit;
823                 }
824
825                 *shadow = ENCODEOP(threadid, ourClock, id_to_int(writeThread), writeClock) | (shadowval & ATOMICMASK);
826         }
827 Exit:
828         if (race) {
829                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
830                 if (raceset->add(race))
831                         assert_race(race);
832                 else model_free(race);
833         }
834 }
835
836
837
838 void raceCheckRead64(thread_id_t thread, const void *location)
839 {
840         uint64_t old_shadowval, new_shadowval;
841         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
842
843
844         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
845         if (CHECKBOUNDARY(location, 7)) {
846                 if (shadow[1]==old_shadowval)
847                         shadow[1] = new_shadowval;
848                 else goto L1;
849                 if (shadow[2]==old_shadowval)
850                         shadow[2] = new_shadowval;
851                 else goto L2;
852                 if (shadow[3]==old_shadowval)
853                         shadow[3] = new_shadowval;
854                 else goto L3;
855                 if (shadow[4]==old_shadowval)
856                         shadow[4] = new_shadowval;
857                 else goto L4;
858                 if (shadow[5]==old_shadowval)
859                         shadow[5] = new_shadowval;
860                 else goto L5;
861                 if (shadow[6]==old_shadowval)
862                         shadow[6] = new_shadowval;
863                 else goto L6;
864                 if (shadow[7]==old_shadowval)
865                         shadow[7] = new_shadowval;
866                 else goto L7;
867                 return;
868         }
869
870 L1:
871         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
872 L2:
873         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
874 L3:
875         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
876 L4:
877         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
878 L5:
879         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
880 L6:
881         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
882 L7:
883         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
884 }
885
886 void raceCheckRead32(thread_id_t thread, const void *location)
887 {
888         uint64_t old_shadowval, new_shadowval;
889         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
890
891
892         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
893         if (CHECKBOUNDARY(location, 3)) {
894                 if (shadow[1]==old_shadowval)
895                         shadow[1] = new_shadowval;
896                 else goto L1;
897                 if (shadow[2]==old_shadowval)
898                         shadow[2] = new_shadowval;
899                 else goto L2;
900                 if (shadow[3]==old_shadowval)
901                         shadow[3] = new_shadowval;
902                 else goto L3;
903                 return;
904         }
905
906 L1:
907         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
908 L2:
909         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
910 L3:
911         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
912 }
913
914 void raceCheckRead16(thread_id_t thread, const void *location)
915 {
916         uint64_t old_shadowval, new_shadowval;
917         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
918
919
920         uint64_t * shadow = raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
921         if (CHECKBOUNDARY(location, 1)) {
922                 if (shadow[1]==old_shadowval) {
923                         shadow[1] = new_shadowval;
924                         return;
925                 }
926         }
927         raceCheckRead_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
928 }
929
930 void raceCheckRead8(thread_id_t thread, const void *location)
931 {
932         uint64_t old_shadowval, new_shadowval;
933         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
934
935
936         raceCheckRead_firstIt(thread, location, &old_shadowval, &new_shadowval);
937 }
938
939 static inline uint64_t * raceCheckWrite_firstIt(thread_id_t thread, const void * location, uint64_t *old_val, uint64_t *new_val)
940 {
941         uint64_t *shadow = lookupAddressEntry(location);
942         uint64_t shadowval = *shadow;
943         ClockVector *currClock = get_execution()->get_cv(thread);
944         if (currClock == NULL)
945                 return shadow;
946
947         struct DataRace * race = NULL;
948         /* Do full record */
949         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
950                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
951                 goto Exit;
952         }
953
954         {
955                 int threadid = id_to_int(thread);
956                 modelclock_t ourClock = currClock->getClock(thread);
957
958                 /* Thread ID is too large or clock is too large. */
959                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
960                         expandRecord(shadow);
961                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
962                         goto Exit;
963                 }
964
965                 {
966                         /* Check for datarace against last read. */
967                         modelclock_t readClock = READVECTOR(shadowval);
968                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
969
970                         if (clock_may_race(currClock, thread, readClock, readThread)) {
971                                 /* We have a datarace */
972                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
973                                 goto ShadowExit;
974                         }
975                 }
976
977                 {
978                         /* Check for datarace against last write. */
979                         modelclock_t writeClock = WRITEVECTOR(shadowval);
980                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
981
982                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
983                                 /* We have a datarace */
984                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
985                                 goto ShadowExit;
986                         }
987                 }
988
989 ShadowExit:
990                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
991
992                 *old_val = shadowval;
993                 *new_val = *shadow;
994         }
995
996 Exit:
997         if (race) {
998                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
999                 if (raceset->add(race))
1000                         assert_race(race);
1001                 else model_free(race);
1002         }
1003
1004         return shadow;
1005 }
1006
1007 static inline void raceCheckWrite_otherIt(thread_id_t thread, const void * location) {
1008         uint64_t *shadow = lookupAddressEntry(location);
1009
1010         uint64_t shadowval = *shadow;
1011
1012         ClockVector *currClock = get_execution()->get_cv(thread);
1013         if (currClock == NULL)
1014                 return;
1015
1016         struct DataRace * race = NULL;
1017         /* Do full record */
1018         if (shadowval != 0 && !ISSHORTRECORD(shadowval)) {
1019                 race = fullRaceCheckWrite(thread, location, shadow, currClock);
1020                 goto Exit;
1021         }
1022
1023         {
1024                 int threadid = id_to_int(thread);
1025                 modelclock_t ourClock = currClock->getClock(thread);
1026
1027                 /* Thread ID is too large or clock is too large. */
1028                 if (threadid > MAXTHREADID || ourClock > MAXWRITEVECTOR) {
1029                         expandRecord(shadow);
1030                         race = fullRaceCheckWrite(thread, location, shadow, currClock);
1031                         goto Exit;
1032                 }
1033
1034                 {
1035                         /* Check for datarace against last read. */
1036                         modelclock_t readClock = READVECTOR(shadowval);
1037                         thread_id_t readThread = int_to_id(RDTHREADID(shadowval));
1038
1039                         if (clock_may_race(currClock, thread, readClock, readThread)) {
1040                                 /* We have a datarace */
1041                                 race = reportDataRace(readThread, readClock, false, get_execution()->get_parent_action(thread), true, location);
1042                                 goto ShadowExit;
1043                         }
1044                 }
1045
1046                 {
1047                         /* Check for datarace against last write. */
1048                         modelclock_t writeClock = WRITEVECTOR(shadowval);
1049                         thread_id_t writeThread = int_to_id(WRTHREADID(shadowval));
1050
1051                         if (clock_may_race(currClock, thread, writeClock, writeThread)) {
1052                                 /* We have a datarace */
1053                                 race = reportDataRace(writeThread, writeClock, true, get_execution()->get_parent_action(thread), true, location);
1054                                 goto ShadowExit;
1055                         }
1056                 }
1057
1058 ShadowExit:
1059                 *shadow = ENCODEOP(0, 0, threadid, ourClock);
1060         }
1061
1062 Exit:
1063         if (race) {
1064                 race->numframes=backtrace(race->backtrace, sizeof(race->backtrace)/sizeof(void*));
1065                 if (raceset->add(race))
1066                         assert_race(race);
1067                 else model_free(race);
1068         }
1069 }
1070
1071 void raceCheckWrite64(thread_id_t thread, const void *location)
1072 {
1073         uint64_t old_shadowval, new_shadowval;
1074         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1075
1076
1077         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1078         if (CHECKBOUNDARY(location, 7)) {
1079                 if (shadow[1]==old_shadowval)
1080                         shadow[1] = new_shadowval;
1081                 else goto L1;
1082                 if (shadow[2]==old_shadowval)
1083                         shadow[2] = new_shadowval;
1084                 else goto L2;
1085                 if (shadow[3]==old_shadowval)
1086                         shadow[3] = new_shadowval;
1087                 else goto L3;
1088                 if (shadow[4]==old_shadowval)
1089                         shadow[4] = new_shadowval;
1090                 else goto L4;
1091                 if (shadow[5]==old_shadowval)
1092                         shadow[5] = new_shadowval;
1093                 else goto L5;
1094                 if (shadow[6]==old_shadowval)
1095                         shadow[6] = new_shadowval;
1096                 else goto L6;
1097                 if (shadow[7]==old_shadowval)
1098                         shadow[7] = new_shadowval;
1099                 else goto L7;
1100                 return;
1101         }
1102
1103 L1:
1104         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1105 L2:
1106         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1107 L3:
1108         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1109 L4:
1110         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 4));
1111 L5:
1112         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 5));
1113 L6:
1114         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 6));
1115 L7:
1116         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 7));
1117 }
1118
1119 void raceCheckWrite32(thread_id_t thread, const void *location)
1120 {
1121         uint64_t old_shadowval, new_shadowval;
1122         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1123
1124
1125         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1126         if (CHECKBOUNDARY(location, 3)) {
1127                 if (shadow[1]==old_shadowval)
1128                         shadow[1] = new_shadowval;
1129                 else goto L1;
1130                 if (shadow[2]==old_shadowval)
1131                         shadow[2] = new_shadowval;
1132                 else goto L2;
1133                 if (shadow[3]==old_shadowval)
1134                         shadow[3] = new_shadowval;
1135                 else goto L3;
1136                 return;
1137         }
1138
1139 L1:
1140         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1141 L2:
1142         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 2));
1143 L3:
1144         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 3));
1145 }
1146
1147 void raceCheckWrite16(thread_id_t thread, const void *location)
1148 {
1149         uint64_t old_shadowval, new_shadowval;
1150         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1151
1152
1153         uint64_t * shadow = raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1154         if (CHECKBOUNDARY(location, 1)) {
1155                 if (shadow[1]==old_shadowval) {
1156                         shadow[1] = new_shadowval;
1157                         return;
1158                 }
1159         }
1160         raceCheckWrite_otherIt(thread, (const void *)(((uintptr_t)location) + 1));
1161 }
1162
1163 void raceCheckWrite8(thread_id_t thread, const void *location)
1164 {
1165         uint64_t old_shadowval, new_shadowval;
1166         old_shadowval = new_shadowval = INVALIDSHADOWVAL;
1167
1168
1169         raceCheckWrite_firstIt(thread, location, &old_shadowval, &new_shadowval);
1170 }