1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer, a race detector.
12 // The tool is under development, for the details about previous versions see
13 // http://code.google.com/p/data-race-test
15 // The instrumentation phase is quite simple:
16 // - Insert calls to run-time library before every memory access.
17 // - Optimizations may apply to avoid instrumenting some of the accesses.
18 // - Insert calls at function entry/exit.
19 // The rest is handled by the run-time library.
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Transforms/Instrumentation.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallString.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
48 #define DEBUG_TYPE "tsan"
50 static cl::opt<bool> ClInstrumentMemoryAccesses(
51 "tsan-instrument-memory-accesses", cl::init(true),
52 cl::desc("Instrument memory accesses"), cl::Hidden);
53 static cl::opt<bool> ClInstrumentFuncEntryExit(
54 "tsan-instrument-func-entry-exit", cl::init(true),
55 cl::desc("Instrument function entry and exit"), cl::Hidden);
56 static cl::opt<bool> ClInstrumentAtomics(
57 "tsan-instrument-atomics", cl::init(true),
58 cl::desc("Instrument atomics"), cl::Hidden);
59 static cl::opt<bool> ClInstrumentMemIntrinsics(
60 "tsan-instrument-memintrinsics", cl::init(true),
61 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
63 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
64 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
65 STATISTIC(NumOmittedReadsBeforeWrite,
66 "Number of reads ignored due to following writes");
67 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
68 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
69 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
70 STATISTIC(NumOmittedReadsFromConstantGlobals,
71 "Number of reads from constant globals");
72 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
73 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
77 /// ThreadSanitizer: instrument the code in module to find races.
78 struct ThreadSanitizer : public FunctionPass {
79 ThreadSanitizer() : FunctionPass(ID) {}
80 const char *getPassName() const override;
81 bool runOnFunction(Function &F) override;
82 bool doInitialization(Module &M) override;
83 static char ID; // Pass identification, replacement for typeid.
86 void initializeCallbacks(Module &M);
87 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
88 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
89 bool instrumentMemIntrinsic(Instruction *I);
90 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
91 SmallVectorImpl<Instruction *> &All,
92 const DataLayout &DL);
93 bool addrPointsToConstantData(Value *Addr);
94 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
98 // Callbacks to run-time library are computed in doInitialization.
99 Function *TsanFuncEntry;
100 Function *TsanFuncExit;
101 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
102 static const size_t kNumberOfAccessSizes = 5;
103 Function *TsanRead[kNumberOfAccessSizes];
104 Function *TsanWrite[kNumberOfAccessSizes];
105 Function *TsanUnalignedRead[kNumberOfAccessSizes];
106 Function *TsanUnalignedWrite[kNumberOfAccessSizes];
107 Function *TsanAtomicLoad[kNumberOfAccessSizes];
108 Function *TsanAtomicStore[kNumberOfAccessSizes];
109 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
110 Function *TsanAtomicCAS[kNumberOfAccessSizes];
111 Function *TsanAtomicThreadFence;
112 Function *TsanAtomicSignalFence;
113 Function *TsanVptrUpdate;
114 Function *TsanVptrLoad;
115 Function *MemmoveFn, *MemcpyFn, *MemsetFn;
119 char ThreadSanitizer::ID = 0;
120 INITIALIZE_PASS(ThreadSanitizer, "tsan",
121 "ThreadSanitizer: detects data races.",
124 const char *ThreadSanitizer::getPassName() const {
125 return "ThreadSanitizer";
128 FunctionPass *llvm::createThreadSanitizerPass() {
129 return new ThreadSanitizer();
132 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
133 if (Function *F = dyn_cast<Function>(FuncOrBitcast))
135 FuncOrBitcast->dump();
136 report_fatal_error("ThreadSanitizer interface function redefined");
139 void ThreadSanitizer::initializeCallbacks(Module &M) {
140 IRBuilder<> IRB(M.getContext());
141 // Initialize the callbacks.
142 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
143 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
144 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
145 "__tsan_func_exit", IRB.getVoidTy(), nullptr));
146 OrdTy = IRB.getInt32Ty();
147 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
148 const size_t ByteSize = 1 << i;
149 const size_t BitSize = ByteSize * 8;
150 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
151 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
152 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
154 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
155 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
156 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
158 SmallString<64> UnalignedReadName("__tsan_unaligned_read" +
160 TsanUnalignedRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
161 UnalignedReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
163 SmallString<64> UnalignedWriteName("__tsan_unaligned_write" +
165 TsanUnalignedWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
166 UnalignedWriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
168 Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
169 Type *PtrTy = Ty->getPointerTo();
170 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
172 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
173 AtomicLoadName, Ty, PtrTy, OrdTy, nullptr));
175 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
177 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
178 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
181 for (int op = AtomicRMWInst::FIRST_BINOP;
182 op <= AtomicRMWInst::LAST_BINOP; ++op) {
183 TsanAtomicRMW[op][i] = nullptr;
184 const char *NamePart = nullptr;
185 if (op == AtomicRMWInst::Xchg)
186 NamePart = "_exchange";
187 else if (op == AtomicRMWInst::Add)
188 NamePart = "_fetch_add";
189 else if (op == AtomicRMWInst::Sub)
190 NamePart = "_fetch_sub";
191 else if (op == AtomicRMWInst::And)
192 NamePart = "_fetch_and";
193 else if (op == AtomicRMWInst::Or)
194 NamePart = "_fetch_or";
195 else if (op == AtomicRMWInst::Xor)
196 NamePart = "_fetch_xor";
197 else if (op == AtomicRMWInst::Nand)
198 NamePart = "_fetch_nand";
201 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
202 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
203 RMWName, Ty, PtrTy, Ty, OrdTy, nullptr));
206 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
207 "_compare_exchange_val");
208 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
209 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, nullptr));
211 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
212 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
213 IRB.getInt8PtrTy(), nullptr));
214 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
215 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), nullptr));
216 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
217 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, nullptr));
218 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
219 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, nullptr));
221 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
222 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
223 IRB.getInt8PtrTy(), IntptrTy, nullptr));
224 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
225 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
227 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
228 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
232 bool ThreadSanitizer::doInitialization(Module &M) {
233 const DataLayout &DL = M.getDataLayout();
235 // Always insert a call to __tsan_init into the module's CTORs.
236 IRBuilder<> IRB(M.getContext());
237 IntptrTy = IRB.getIntPtrTy(DL);
238 Value *TsanInit = M.getOrInsertFunction("__tsan_init",
239 IRB.getVoidTy(), nullptr);
240 appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
245 static bool isVtableAccess(Instruction *I) {
246 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
247 return Tag->isTBAAVtableAccess();
251 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
252 // If this is a GEP, just analyze its pointer operand.
253 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
254 Addr = GEP->getPointerOperand();
256 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
257 if (GV->isConstant()) {
258 // Reads from constant globals can not race with any writes.
259 NumOmittedReadsFromConstantGlobals++;
262 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
263 if (isVtableAccess(L)) {
264 // Reads from a vtable pointer can not race with any writes.
265 NumOmittedReadsFromVtable++;
272 // Instrumenting some of the accesses may be proven redundant.
273 // Currently handled:
274 // - read-before-write (within same BB, no calls between)
275 // - not captured variables
277 // We do not handle some of the patterns that should not survive
278 // after the classic compiler optimizations.
279 // E.g. two reads from the same temp should be eliminated by CSE,
280 // two writes should be eliminated by DSE, etc.
282 // 'Local' is a vector of insns within the same BB (no calls between).
283 // 'All' is a vector of insns that will be instrumented.
284 void ThreadSanitizer::chooseInstructionsToInstrument(
285 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
286 const DataLayout &DL) {
287 SmallSet<Value*, 8> WriteTargets;
288 // Iterate from the end.
289 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
290 E = Local.rend(); It != E; ++It) {
291 Instruction *I = *It;
292 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
293 WriteTargets.insert(Store->getPointerOperand());
295 LoadInst *Load = cast<LoadInst>(I);
296 Value *Addr = Load->getPointerOperand();
297 if (WriteTargets.count(Addr)) {
298 // We will write to this temp, so no reason to analyze the read.
299 NumOmittedReadsBeforeWrite++;
302 if (addrPointsToConstantData(Addr)) {
303 // Addr points to some constant data -- it can not race with any writes.
307 Value *Addr = isa<StoreInst>(*I)
308 ? cast<StoreInst>(I)->getPointerOperand()
309 : cast<LoadInst>(I)->getPointerOperand();
310 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
311 !PointerMayBeCaptured(Addr, true, true)) {
312 // The variable is addressable but not captured, so it cannot be
313 // referenced from a different thread and participate in a data race
314 // (see llvm/Analysis/CaptureTracking.h for details).
315 NumOmittedNonCaptured++;
323 static bool isAtomic(Instruction *I) {
324 if (LoadInst *LI = dyn_cast<LoadInst>(I))
325 return LI->isAtomic() && LI->getSynchScope() == CrossThread;
326 if (StoreInst *SI = dyn_cast<StoreInst>(I))
327 return SI->isAtomic() && SI->getSynchScope() == CrossThread;
328 if (isa<AtomicRMWInst>(I))
330 if (isa<AtomicCmpXchgInst>(I))
332 if (isa<FenceInst>(I))
337 bool ThreadSanitizer::runOnFunction(Function &F) {
338 initializeCallbacks(*F.getParent());
339 SmallVector<Instruction*, 8> RetVec;
340 SmallVector<Instruction*, 8> AllLoadsAndStores;
341 SmallVector<Instruction*, 8> LocalLoadsAndStores;
342 SmallVector<Instruction*, 8> AtomicAccesses;
343 SmallVector<Instruction*, 8> MemIntrinCalls;
345 bool HasCalls = false;
346 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeThread);
347 const DataLayout &DL = F.getParent()->getDataLayout();
349 // Traverse all instructions, collect loads/stores/returns, check for calls.
351 for (auto &Inst : BB) {
353 AtomicAccesses.push_back(&Inst);
354 else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst))
355 LocalLoadsAndStores.push_back(&Inst);
356 else if (isa<ReturnInst>(Inst))
357 RetVec.push_back(&Inst);
358 else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
359 if (isa<MemIntrinsic>(Inst))
360 MemIntrinCalls.push_back(&Inst);
362 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
366 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
369 // We have collected all loads and stores.
370 // FIXME: many of these accesses do not need to be checked for races
371 // (e.g. variables that do not escape, etc).
373 // Instrument memory accesses only if we want to report bugs in the function.
374 if (ClInstrumentMemoryAccesses && SanitizeFunction)
375 for (auto Inst : AllLoadsAndStores) {
376 Res |= instrumentLoadOrStore(Inst, DL);
379 // Instrument atomic memory accesses in any case (they can be used to
380 // implement synchronization).
381 if (ClInstrumentAtomics)
382 for (auto Inst : AtomicAccesses) {
383 Res |= instrumentAtomic(Inst, DL);
386 if (ClInstrumentMemIntrinsics && SanitizeFunction)
387 for (auto Inst : MemIntrinCalls) {
388 Res |= instrumentMemIntrinsic(Inst);
391 // Instrument function entry/exit points if there were instrumented accesses.
392 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
393 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
394 Value *ReturnAddress = IRB.CreateCall(
395 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
397 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
398 for (auto RetInst : RetVec) {
399 IRBuilder<> IRBRet(RetInst);
400 IRBRet.CreateCall(TsanFuncExit);
407 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
408 const DataLayout &DL) {
410 bool IsWrite = isa<StoreInst>(*I);
411 Value *Addr = IsWrite
412 ? cast<StoreInst>(I)->getPointerOperand()
413 : cast<LoadInst>(I)->getPointerOperand();
414 int Idx = getMemoryAccessFuncIndex(Addr, DL);
417 if (IsWrite && isVtableAccess(I)) {
418 DEBUG(dbgs() << " VPTR : " << *I << "\n");
419 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
420 // StoredValue may be a vector type if we are storing several vptrs at once.
421 // In this case, just take the first element of the vector since this is
422 // enough to find vptr races.
423 if (isa<VectorType>(StoredValue->getType()))
424 StoredValue = IRB.CreateExtractElement(
425 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
426 if (StoredValue->getType()->isIntegerTy())
427 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
428 // Call TsanVptrUpdate.
429 IRB.CreateCall2(TsanVptrUpdate,
430 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
431 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
432 NumInstrumentedVtableWrites++;
435 if (!IsWrite && isVtableAccess(I)) {
436 IRB.CreateCall(TsanVptrLoad,
437 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
438 NumInstrumentedVtableReads++;
441 const unsigned Alignment = IsWrite
442 ? cast<StoreInst>(I)->getAlignment()
443 : cast<LoadInst>(I)->getAlignment();
444 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
445 const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
446 Value *OnAccessFunc = nullptr;
447 if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0)
448 OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
450 OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx];
451 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
452 if (IsWrite) NumInstrumentedWrites++;
453 else NumInstrumentedReads++;
457 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
460 case NotAtomic: llvm_unreachable("unexpected atomic ordering!");
461 case Unordered: // Fall-through.
462 case Monotonic: v = 0; break;
463 // case Consume: v = 1; break; // Not specified yet.
464 case Acquire: v = 2; break;
465 case Release: v = 3; break;
466 case AcquireRelease: v = 4; break;
467 case SequentiallyConsistent: v = 5; break;
469 return IRB->getInt32(v);
472 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
473 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
474 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
475 // instead we simply replace them with regular function calls, which are then
476 // intercepted by the run-time.
477 // Since tsan is running after everyone else, the calls should not be
478 // replaced back with intrinsics. If that becomes wrong at some point,
479 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
480 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
482 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
483 IRB.CreateCall3(MemsetFn,
484 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
485 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
486 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
487 I->eraseFromParent();
488 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
489 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
490 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
491 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
492 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
493 I->eraseFromParent();
498 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
499 // standards. For background see C++11 standard. A slightly older, publicly
500 // available draft of the standard (not entirely up-to-date, but close enough
501 // for casual browsing) is available here:
502 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
503 // The following page contains more background information:
504 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
506 bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
508 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
509 Value *Addr = LI->getPointerOperand();
510 int Idx = getMemoryAccessFuncIndex(Addr, DL);
513 const size_t ByteSize = 1 << Idx;
514 const size_t BitSize = ByteSize * 8;
515 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
516 Type *PtrTy = Ty->getPointerTo();
517 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
518 createOrdering(&IRB, LI->getOrdering())};
519 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx], Args);
520 ReplaceInstWithInst(I, C);
522 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
523 Value *Addr = SI->getPointerOperand();
524 int Idx = getMemoryAccessFuncIndex(Addr, DL);
527 const size_t ByteSize = 1 << Idx;
528 const size_t BitSize = ByteSize * 8;
529 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
530 Type *PtrTy = Ty->getPointerTo();
531 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
532 IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
533 createOrdering(&IRB, SI->getOrdering())};
534 CallInst *C = CallInst::Create(TsanAtomicStore[Idx], Args);
535 ReplaceInstWithInst(I, C);
536 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
537 Value *Addr = RMWI->getPointerOperand();
538 int Idx = getMemoryAccessFuncIndex(Addr, DL);
541 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
544 const size_t ByteSize = 1 << Idx;
545 const size_t BitSize = ByteSize * 8;
546 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
547 Type *PtrTy = Ty->getPointerTo();
548 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
549 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
550 createOrdering(&IRB, RMWI->getOrdering())};
551 CallInst *C = CallInst::Create(F, Args);
552 ReplaceInstWithInst(I, C);
553 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
554 Value *Addr = CASI->getPointerOperand();
555 int Idx = getMemoryAccessFuncIndex(Addr, DL);
558 const size_t ByteSize = 1 << Idx;
559 const size_t BitSize = ByteSize * 8;
560 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
561 Type *PtrTy = Ty->getPointerTo();
562 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
563 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
564 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
565 createOrdering(&IRB, CASI->getSuccessOrdering()),
566 createOrdering(&IRB, CASI->getFailureOrdering())};
567 CallInst *C = IRB.CreateCall(TsanAtomicCAS[Idx], Args);
568 Value *Success = IRB.CreateICmpEQ(C, CASI->getCompareOperand());
570 Value *Res = IRB.CreateInsertValue(UndefValue::get(CASI->getType()), C, 0);
571 Res = IRB.CreateInsertValue(Res, Success, 1);
573 I->replaceAllUsesWith(Res);
574 I->eraseFromParent();
575 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
576 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
577 Function *F = FI->getSynchScope() == SingleThread ?
578 TsanAtomicSignalFence : TsanAtomicThreadFence;
579 CallInst *C = CallInst::Create(F, Args);
580 ReplaceInstWithInst(I, C);
585 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr,
586 const DataLayout &DL) {
587 Type *OrigPtrTy = Addr->getType();
588 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
589 assert(OrigTy->isSized());
590 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
591 if (TypeSize != 8 && TypeSize != 16 &&
592 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
593 NumAccessesWithBadSize++;
594 // Ignore all unusual sizes.
597 size_t Idx = countTrailingZeros(TypeSize / 8);
598 assert(Idx < kNumberOfAccessSizes);