1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer, a race detector.
12 // The tool is under development, for the details about previous versions see
13 // http://code.google.com/p/data-race-test
15 // The instrumentation phase is quite simple:
16 // - Insert calls to run-time library before every memory access.
17 // - Optimizations may apply to avoid instrumenting some of the accesses.
18 // - Insert calls at function entry/exit.
19 // The rest is handled by the run-time library.
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallString.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/Metadata.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
42 #include "llvm/Transforms/Utils/ModuleUtils.h"
43 #include "llvm/Transforms/Utils/SpecialCaseList.h"
47 #define DEBUG_TYPE "tsan"
49 static cl::opt<bool> ClInstrumentMemoryAccesses(
50 "tsan-instrument-memory-accesses", cl::init(true),
51 cl::desc("Instrument memory accesses"), cl::Hidden);
52 static cl::opt<bool> ClInstrumentFuncEntryExit(
53 "tsan-instrument-func-entry-exit", cl::init(true),
54 cl::desc("Instrument function entry and exit"), cl::Hidden);
55 static cl::opt<bool> ClInstrumentAtomics(
56 "tsan-instrument-atomics", cl::init(true),
57 cl::desc("Instrument atomics"), cl::Hidden);
58 static cl::opt<bool> ClInstrumentMemIntrinsics(
59 "tsan-instrument-memintrinsics", cl::init(true),
60 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
62 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
63 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
64 STATISTIC(NumOmittedReadsBeforeWrite,
65 "Number of reads ignored due to following writes");
66 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
67 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
68 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
69 STATISTIC(NumOmittedReadsFromConstantGlobals,
70 "Number of reads from constant globals");
71 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
75 /// ThreadSanitizer: instrument the code in module to find races.
76 struct ThreadSanitizer : public FunctionPass {
77 ThreadSanitizer() : FunctionPass(ID), DL(nullptr) {}
78 const char *getPassName() const override;
79 bool runOnFunction(Function &F) override;
80 bool doInitialization(Module &M) override;
81 static char ID; // Pass identification, replacement for typeid.
84 void initializeCallbacks(Module &M);
85 bool instrumentLoadOrStore(Instruction *I);
86 bool instrumentAtomic(Instruction *I);
87 bool instrumentMemIntrinsic(Instruction *I);
88 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
89 SmallVectorImpl<Instruction*> &All);
90 bool addrPointsToConstantData(Value *Addr);
91 int getMemoryAccessFuncIndex(Value *Addr);
96 // Callbacks to run-time library are computed in doInitialization.
97 Function *TsanFuncEntry;
98 Function *TsanFuncExit;
99 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
100 static const size_t kNumberOfAccessSizes = 5;
101 Function *TsanRead[kNumberOfAccessSizes];
102 Function *TsanWrite[kNumberOfAccessSizes];
103 Function *TsanAtomicLoad[kNumberOfAccessSizes];
104 Function *TsanAtomicStore[kNumberOfAccessSizes];
105 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
106 Function *TsanAtomicCAS[kNumberOfAccessSizes];
107 Function *TsanAtomicThreadFence;
108 Function *TsanAtomicSignalFence;
109 Function *TsanVptrUpdate;
110 Function *TsanVptrLoad;
111 Function *MemmoveFn, *MemcpyFn, *MemsetFn;
115 char ThreadSanitizer::ID = 0;
116 INITIALIZE_PASS(ThreadSanitizer, "tsan",
117 "ThreadSanitizer: detects data races.",
120 const char *ThreadSanitizer::getPassName() const {
121 return "ThreadSanitizer";
124 FunctionPass *llvm::createThreadSanitizerPass() {
125 return new ThreadSanitizer();
128 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
129 if (Function *F = dyn_cast<Function>(FuncOrBitcast))
131 FuncOrBitcast->dump();
132 report_fatal_error("ThreadSanitizer interface function redefined");
135 void ThreadSanitizer::initializeCallbacks(Module &M) {
136 IRBuilder<> IRB(M.getContext());
137 // Initialize the callbacks.
138 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
139 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
140 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
141 "__tsan_func_exit", IRB.getVoidTy(), NULL));
142 OrdTy = IRB.getInt32Ty();
143 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
144 const size_t ByteSize = 1 << i;
145 const size_t BitSize = ByteSize * 8;
146 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
147 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
148 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
150 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
151 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
152 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
154 Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
155 Type *PtrTy = Ty->getPointerTo();
156 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
158 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
159 AtomicLoadName, Ty, PtrTy, OrdTy, NULL));
161 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
163 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
164 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
167 for (int op = AtomicRMWInst::FIRST_BINOP;
168 op <= AtomicRMWInst::LAST_BINOP; ++op) {
169 TsanAtomicRMW[op][i] = nullptr;
170 const char *NamePart = nullptr;
171 if (op == AtomicRMWInst::Xchg)
172 NamePart = "_exchange";
173 else if (op == AtomicRMWInst::Add)
174 NamePart = "_fetch_add";
175 else if (op == AtomicRMWInst::Sub)
176 NamePart = "_fetch_sub";
177 else if (op == AtomicRMWInst::And)
178 NamePart = "_fetch_and";
179 else if (op == AtomicRMWInst::Or)
180 NamePart = "_fetch_or";
181 else if (op == AtomicRMWInst::Xor)
182 NamePart = "_fetch_xor";
183 else if (op == AtomicRMWInst::Nand)
184 NamePart = "_fetch_nand";
187 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
188 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
189 RMWName, Ty, PtrTy, Ty, OrdTy, NULL));
192 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
193 "_compare_exchange_val");
194 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
195 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL));
197 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
198 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
199 IRB.getInt8PtrTy(), NULL));
200 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
201 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
202 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
203 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
204 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
205 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
207 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
208 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
209 IRB.getInt8PtrTy(), IntptrTy, NULL));
210 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
211 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
213 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
214 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
218 bool ThreadSanitizer::doInitialization(Module &M) {
219 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
221 report_fatal_error("data layout missing");
222 DL = &DLP->getDataLayout();
224 // Always insert a call to __tsan_init into the module's CTORs.
225 IRBuilder<> IRB(M.getContext());
226 IntptrTy = IRB.getIntPtrTy(DL);
227 Value *TsanInit = M.getOrInsertFunction("__tsan_init",
228 IRB.getVoidTy(), NULL);
229 appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
234 static bool isVtableAccess(Instruction *I) {
235 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
236 return Tag->isTBAAVtableAccess();
240 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
241 // If this is a GEP, just analyze its pointer operand.
242 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
243 Addr = GEP->getPointerOperand();
245 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
246 if (GV->isConstant()) {
247 // Reads from constant globals can not race with any writes.
248 NumOmittedReadsFromConstantGlobals++;
251 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
252 if (isVtableAccess(L)) {
253 // Reads from a vtable pointer can not race with any writes.
254 NumOmittedReadsFromVtable++;
261 // Instrumenting some of the accesses may be proven redundant.
262 // Currently handled:
263 // - read-before-write (within same BB, no calls between)
265 // We do not handle some of the patterns that should not survive
266 // after the classic compiler optimizations.
267 // E.g. two reads from the same temp should be eliminated by CSE,
268 // two writes should be eliminated by DSE, etc.
270 // 'Local' is a vector of insns within the same BB (no calls between).
271 // 'All' is a vector of insns that will be instrumented.
272 void ThreadSanitizer::chooseInstructionsToInstrument(
273 SmallVectorImpl<Instruction*> &Local,
274 SmallVectorImpl<Instruction*> &All) {
275 SmallSet<Value*, 8> WriteTargets;
276 // Iterate from the end.
277 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
278 E = Local.rend(); It != E; ++It) {
279 Instruction *I = *It;
280 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
281 WriteTargets.insert(Store->getPointerOperand());
283 LoadInst *Load = cast<LoadInst>(I);
284 Value *Addr = Load->getPointerOperand();
285 if (WriteTargets.count(Addr)) {
286 // We will write to this temp, so no reason to analyze the read.
287 NumOmittedReadsBeforeWrite++;
290 if (addrPointsToConstantData(Addr)) {
291 // Addr points to some constant data -- it can not race with any writes.
300 static bool isAtomic(Instruction *I) {
301 if (LoadInst *LI = dyn_cast<LoadInst>(I))
302 return LI->isAtomic() && LI->getSynchScope() == CrossThread;
303 if (StoreInst *SI = dyn_cast<StoreInst>(I))
304 return SI->isAtomic() && SI->getSynchScope() == CrossThread;
305 if (isa<AtomicRMWInst>(I))
307 if (isa<AtomicCmpXchgInst>(I))
309 if (isa<FenceInst>(I))
314 bool ThreadSanitizer::runOnFunction(Function &F) {
315 if (!DL) return false;
316 initializeCallbacks(*F.getParent());
317 SmallVector<Instruction*, 8> RetVec;
318 SmallVector<Instruction*, 8> AllLoadsAndStores;
319 SmallVector<Instruction*, 8> LocalLoadsAndStores;
320 SmallVector<Instruction*, 8> AtomicAccesses;
321 SmallVector<Instruction*, 8> MemIntrinCalls;
323 bool HasCalls = false;
324 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
326 // Traverse all instructions, collect loads/stores/returns, check for calls.
328 for (auto &Inst : BB) {
330 AtomicAccesses.push_back(&Inst);
331 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
332 LocalLoadsAndStores.push_back(&Inst);
333 else if (isa<ReturnInst>(Inst))
334 RetVec.push_back(&Inst);
335 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
336 if (isa<MemIntrinsic>(Inst))
337 MemIntrinCalls.push_back(&Inst);
339 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
342 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
345 // We have collected all loads and stores.
346 // FIXME: many of these accesses do not need to be checked for races
347 // (e.g. variables that do not escape, etc).
349 // Instrument memory accesses only if we want to report bugs in the function.
350 if (ClInstrumentMemoryAccesses && SanitizeFunction)
351 for (auto Inst : AllLoadsAndStores) {
352 Res |= instrumentLoadOrStore(Inst);
355 // Instrument atomic memory accesses in any case (they can be used to
356 // implement synchronization).
357 if (ClInstrumentAtomics)
358 for (auto Inst : AtomicAccesses) {
359 Res |= instrumentAtomic(Inst);
362 if (ClInstrumentMemIntrinsics && SanitizeFunction)
363 for (auto Inst : MemIntrinCalls) {
364 Res |= instrumentMemIntrinsic(Inst);
367 // Instrument function entry/exit points if there were instrumented accesses.
368 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
369 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
370 Value *ReturnAddress = IRB.CreateCall(
371 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
373 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
374 for (auto RetInst : RetVec) {
375 IRBuilder<> IRBRet(RetInst);
376 IRBRet.CreateCall(TsanFuncExit);
383 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
385 bool IsWrite = isa<StoreInst>(*I);
386 Value *Addr = IsWrite
387 ? cast<StoreInst>(I)->getPointerOperand()
388 : cast<LoadInst>(I)->getPointerOperand();
389 int Idx = getMemoryAccessFuncIndex(Addr);
392 if (IsWrite && isVtableAccess(I)) {
393 DEBUG(dbgs() << " VPTR : " << *I << "\n");
394 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
395 // StoredValue may be a vector type if we are storing several vptrs at once.
396 // In this case, just take the first element of the vector since this is
397 // enough to find vptr races.
398 if (isa<VectorType>(StoredValue->getType()))
399 StoredValue = IRB.CreateExtractElement(
400 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
401 if (StoredValue->getType()->isIntegerTy())
402 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
403 // Call TsanVptrUpdate.
404 IRB.CreateCall2(TsanVptrUpdate,
405 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
406 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
407 NumInstrumentedVtableWrites++;
410 if (!IsWrite && isVtableAccess(I)) {
411 IRB.CreateCall(TsanVptrLoad,
412 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
413 NumInstrumentedVtableReads++;
416 Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
417 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
418 if (IsWrite) NumInstrumentedWrites++;
419 else NumInstrumentedReads++;
423 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
426 case NotAtomic: assert(false);
427 case Unordered: // Fall-through.
428 case Monotonic: v = 0; break;
429 // case Consume: v = 1; break; // Not specified yet.
430 case Acquire: v = 2; break;
431 case Release: v = 3; break;
432 case AcquireRelease: v = 4; break;
433 case SequentiallyConsistent: v = 5; break;
435 return IRB->getInt32(v);
438 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
439 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
440 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
441 // instead we simply replace them with regular function calls, which are then
442 // intercepted by the run-time.
443 // Since tsan is running after everyone else, the calls should not be
444 // replaced back with intrinsics. If that becomes wrong at some point,
445 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
446 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
448 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
449 IRB.CreateCall3(MemsetFn,
450 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
451 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
452 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
453 I->eraseFromParent();
454 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
455 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
456 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
457 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
458 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
459 I->eraseFromParent();
464 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
465 // standards. For background see C++11 standard. A slightly older, publicly
466 // available draft of the standard (not entirely up-to-date, but close enough
467 // for casual browsing) is available here:
468 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
469 // The following page contains more background information:
470 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
472 bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
474 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
475 Value *Addr = LI->getPointerOperand();
476 int Idx = getMemoryAccessFuncIndex(Addr);
479 const size_t ByteSize = 1 << Idx;
480 const size_t BitSize = ByteSize * 8;
481 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
482 Type *PtrTy = Ty->getPointerTo();
483 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
484 createOrdering(&IRB, LI->getOrdering())};
485 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx],
486 ArrayRef<Value*>(Args));
487 ReplaceInstWithInst(I, C);
489 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
490 Value *Addr = SI->getPointerOperand();
491 int Idx = getMemoryAccessFuncIndex(Addr);
494 const size_t ByteSize = 1 << Idx;
495 const size_t BitSize = ByteSize * 8;
496 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
497 Type *PtrTy = Ty->getPointerTo();
498 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
499 IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
500 createOrdering(&IRB, SI->getOrdering())};
501 CallInst *C = CallInst::Create(TsanAtomicStore[Idx],
502 ArrayRef<Value*>(Args));
503 ReplaceInstWithInst(I, C);
504 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
505 Value *Addr = RMWI->getPointerOperand();
506 int Idx = getMemoryAccessFuncIndex(Addr);
509 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
512 const size_t ByteSize = 1 << Idx;
513 const size_t BitSize = ByteSize * 8;
514 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
515 Type *PtrTy = Ty->getPointerTo();
516 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
517 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
518 createOrdering(&IRB, RMWI->getOrdering())};
519 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
520 ReplaceInstWithInst(I, C);
521 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
522 Value *Addr = CASI->getPointerOperand();
523 int Idx = getMemoryAccessFuncIndex(Addr);
526 const size_t ByteSize = 1 << Idx;
527 const size_t BitSize = ByteSize * 8;
528 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
529 Type *PtrTy = Ty->getPointerTo();
530 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
531 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
532 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
533 createOrdering(&IRB, CASI->getSuccessOrdering()),
534 createOrdering(&IRB, CASI->getFailureOrdering())};
535 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
536 Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
538 Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
539 Res = IRB.CreateInsertValue(Res, Success, 1);
541 I->replaceAllUsesWith(Res);
542 I->eraseFromParent();
543 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
544 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
545 Function *F = FI->getSynchScope() == SingleThread ?
546 TsanAtomicSignalFence : TsanAtomicThreadFence;
547 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
548 ReplaceInstWithInst(I, C);
553 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
554 Type *OrigPtrTy = Addr->getType();
555 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
556 assert(OrigTy->isSized());
557 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
558 if (TypeSize != 8 && TypeSize != 16 &&
559 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
560 NumAccessesWithBadSize++;
561 // Ignore all unusual sizes.
564 size_t Idx = countTrailingZeros(TypeSize / 8);
565 assert(Idx < kNumberOfAccessSizes);