1 //===-- ThreadSanitizer.cpp - race detector -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer, a race detector.
12 // The tool is under development, for the details about previous versions see
13 // http://code.google.com/p/data-race-test
15 // The instrumentation phase is quite simple:
16 // - Insert calls to run-time library before every memory access.
17 // - Optimizations may apply to avoid instrumenting some of the accesses.
18 // - Insert calls at function entry/exit.
19 // The rest is handled by the run-time library.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "tsan"
24 #include "llvm/Transforms/Instrumentation.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallString.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include "llvm/Transforms/Utils/SpecialCaseList.h"
49 static cl::opt<std::string> ClBlacklistFile("tsan-blacklist",
50 cl::desc("Blacklist file"), cl::Hidden);
51 static cl::opt<bool> ClInstrumentMemoryAccesses(
52 "tsan-instrument-memory-accesses", cl::init(true),
53 cl::desc("Instrument memory accesses"), cl::Hidden);
54 static cl::opt<bool> ClInstrumentFuncEntryExit(
55 "tsan-instrument-func-entry-exit", cl::init(true),
56 cl::desc("Instrument function entry and exit"), cl::Hidden);
57 static cl::opt<bool> ClInstrumentAtomics(
58 "tsan-instrument-atomics", cl::init(true),
59 cl::desc("Instrument atomics"), cl::Hidden);
60 static cl::opt<bool> ClInstrumentMemIntrinsics(
61 "tsan-instrument-memintrinsics", cl::init(true),
62 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
64 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
65 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
66 STATISTIC(NumOmittedReadsBeforeWrite,
67 "Number of reads ignored due to following writes");
68 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
69 STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
70 STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
71 STATISTIC(NumOmittedReadsFromConstantGlobals,
72 "Number of reads from constant globals");
73 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
77 /// ThreadSanitizer: instrument the code in module to find races.
78 struct ThreadSanitizer : public FunctionPass {
79 ThreadSanitizer(StringRef BlacklistFile = StringRef())
82 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
84 const char *getPassName() const override;
85 bool runOnFunction(Function &F) override;
86 bool doInitialization(Module &M) override;
87 static char ID; // Pass identification, replacement for typeid.
90 void initializeCallbacks(Module &M);
91 bool instrumentLoadOrStore(Instruction *I);
92 bool instrumentAtomic(Instruction *I);
93 bool instrumentMemIntrinsic(Instruction *I);
94 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction*> &Local,
95 SmallVectorImpl<Instruction*> &All);
96 bool addrPointsToConstantData(Value *Addr);
97 int getMemoryAccessFuncIndex(Value *Addr);
101 SmallString<64> BlacklistFile;
102 OwningPtr<SpecialCaseList> BL;
104 // Callbacks to run-time library are computed in doInitialization.
105 Function *TsanFuncEntry;
106 Function *TsanFuncExit;
107 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
108 static const size_t kNumberOfAccessSizes = 5;
109 Function *TsanRead[kNumberOfAccessSizes];
110 Function *TsanWrite[kNumberOfAccessSizes];
111 Function *TsanAtomicLoad[kNumberOfAccessSizes];
112 Function *TsanAtomicStore[kNumberOfAccessSizes];
113 Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
114 Function *TsanAtomicCAS[kNumberOfAccessSizes];
115 Function *TsanAtomicThreadFence;
116 Function *TsanAtomicSignalFence;
117 Function *TsanVptrUpdate;
118 Function *TsanVptrLoad;
119 Function *MemmoveFn, *MemcpyFn, *MemsetFn;
123 char ThreadSanitizer::ID = 0;
124 INITIALIZE_PASS(ThreadSanitizer, "tsan",
125 "ThreadSanitizer: detects data races.",
128 const char *ThreadSanitizer::getPassName() const {
129 return "ThreadSanitizer";
132 FunctionPass *llvm::createThreadSanitizerPass(StringRef BlacklistFile) {
133 return new ThreadSanitizer(BlacklistFile);
136 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
137 if (Function *F = dyn_cast<Function>(FuncOrBitcast))
139 FuncOrBitcast->dump();
140 report_fatal_error("ThreadSanitizer interface function redefined");
143 void ThreadSanitizer::initializeCallbacks(Module &M) {
144 IRBuilder<> IRB(M.getContext());
145 // Initialize the callbacks.
146 TsanFuncEntry = checkInterfaceFunction(M.getOrInsertFunction(
147 "__tsan_func_entry", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
148 TsanFuncExit = checkInterfaceFunction(M.getOrInsertFunction(
149 "__tsan_func_exit", IRB.getVoidTy(), NULL));
150 OrdTy = IRB.getInt32Ty();
151 for (size_t i = 0; i < kNumberOfAccessSizes; ++i) {
152 const size_t ByteSize = 1 << i;
153 const size_t BitSize = ByteSize * 8;
154 SmallString<32> ReadName("__tsan_read" + itostr(ByteSize));
155 TsanRead[i] = checkInterfaceFunction(M.getOrInsertFunction(
156 ReadName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
158 SmallString<32> WriteName("__tsan_write" + itostr(ByteSize));
159 TsanWrite[i] = checkInterfaceFunction(M.getOrInsertFunction(
160 WriteName, IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
162 Type *Ty = Type::getIntNTy(M.getContext(), BitSize);
163 Type *PtrTy = Ty->getPointerTo();
164 SmallString<32> AtomicLoadName("__tsan_atomic" + itostr(BitSize) +
166 TsanAtomicLoad[i] = checkInterfaceFunction(M.getOrInsertFunction(
167 AtomicLoadName, Ty, PtrTy, OrdTy, NULL));
169 SmallString<32> AtomicStoreName("__tsan_atomic" + itostr(BitSize) +
171 TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction(
172 AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy,
175 for (int op = AtomicRMWInst::FIRST_BINOP;
176 op <= AtomicRMWInst::LAST_BINOP; ++op) {
177 TsanAtomicRMW[op][i] = NULL;
178 const char *NamePart = NULL;
179 if (op == AtomicRMWInst::Xchg)
180 NamePart = "_exchange";
181 else if (op == AtomicRMWInst::Add)
182 NamePart = "_fetch_add";
183 else if (op == AtomicRMWInst::Sub)
184 NamePart = "_fetch_sub";
185 else if (op == AtomicRMWInst::And)
186 NamePart = "_fetch_and";
187 else if (op == AtomicRMWInst::Or)
188 NamePart = "_fetch_or";
189 else if (op == AtomicRMWInst::Xor)
190 NamePart = "_fetch_xor";
191 else if (op == AtomicRMWInst::Nand)
192 NamePart = "_fetch_nand";
195 SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart);
196 TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction(
197 RMWName, Ty, PtrTy, Ty, OrdTy, NULL));
200 SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) +
201 "_compare_exchange_val");
202 TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction(
203 AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, NULL));
205 TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction(
206 "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(),
207 IRB.getInt8PtrTy(), NULL));
208 TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction(
209 "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL));
210 TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction(
211 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
212 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
213 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
215 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
216 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
217 IRB.getInt8PtrTy(), IntptrTy, NULL));
218 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
219 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
221 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
222 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
226 bool ThreadSanitizer::doInitialization(Module &M) {
227 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
230 DL = &DLP->getDataLayout();
231 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
233 // Always insert a call to __tsan_init into the module's CTORs.
234 IRBuilder<> IRB(M.getContext());
235 IntptrTy = IRB.getIntPtrTy(DL);
236 Value *TsanInit = M.getOrInsertFunction("__tsan_init",
237 IRB.getVoidTy(), NULL);
238 appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
243 static bool isVtableAccess(Instruction *I) {
244 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
245 return Tag->isTBAAVtableAccess();
249 bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) {
250 // If this is a GEP, just analyze its pointer operand.
251 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
252 Addr = GEP->getPointerOperand();
254 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
255 if (GV->isConstant()) {
256 // Reads from constant globals can not race with any writes.
257 NumOmittedReadsFromConstantGlobals++;
260 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
261 if (isVtableAccess(L)) {
262 // Reads from a vtable pointer can not race with any writes.
263 NumOmittedReadsFromVtable++;
270 // Instrumenting some of the accesses may be proven redundant.
271 // Currently handled:
272 // - read-before-write (within same BB, no calls between)
274 // We do not handle some of the patterns that should not survive
275 // after the classic compiler optimizations.
276 // E.g. two reads from the same temp should be eliminated by CSE,
277 // two writes should be eliminated by DSE, etc.
279 // 'Local' is a vector of insns within the same BB (no calls between).
280 // 'All' is a vector of insns that will be instrumented.
281 void ThreadSanitizer::chooseInstructionsToInstrument(
282 SmallVectorImpl<Instruction*> &Local,
283 SmallVectorImpl<Instruction*> &All) {
284 SmallSet<Value*, 8> WriteTargets;
285 // Iterate from the end.
286 for (SmallVectorImpl<Instruction*>::reverse_iterator It = Local.rbegin(),
287 E = Local.rend(); It != E; ++It) {
288 Instruction *I = *It;
289 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
290 WriteTargets.insert(Store->getPointerOperand());
292 LoadInst *Load = cast<LoadInst>(I);
293 Value *Addr = Load->getPointerOperand();
294 if (WriteTargets.count(Addr)) {
295 // We will write to this temp, so no reason to analyze the read.
296 NumOmittedReadsBeforeWrite++;
299 if (addrPointsToConstantData(Addr)) {
300 // Addr points to some constant data -- it can not race with any writes.
309 static bool isAtomic(Instruction *I) {
310 if (LoadInst *LI = dyn_cast<LoadInst>(I))
311 return LI->isAtomic() && LI->getSynchScope() == CrossThread;
312 if (StoreInst *SI = dyn_cast<StoreInst>(I))
313 return SI->isAtomic() && SI->getSynchScope() == CrossThread;
314 if (isa<AtomicRMWInst>(I))
316 if (isa<AtomicCmpXchgInst>(I))
318 if (isa<FenceInst>(I))
323 bool ThreadSanitizer::runOnFunction(Function &F) {
324 if (!DL) return false;
325 if (BL->isIn(F)) return false;
326 initializeCallbacks(*F.getParent());
327 SmallVector<Instruction*, 8> RetVec;
328 SmallVector<Instruction*, 8> AllLoadsAndStores;
329 SmallVector<Instruction*, 8> LocalLoadsAndStores;
330 SmallVector<Instruction*, 8> AtomicAccesses;
331 SmallVector<Instruction*, 8> MemIntrinCalls;
333 bool HasCalls = false;
335 // Traverse all instructions, collect loads/stores/returns, check for calls.
336 for (Function::iterator FI = F.begin(), FE = F.end();
338 BasicBlock &BB = *FI;
339 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end();
342 AtomicAccesses.push_back(BI);
343 else if (isa<LoadInst>(BI) || isa<StoreInst>(BI))
344 LocalLoadsAndStores.push_back(BI);
345 else if (isa<ReturnInst>(BI))
346 RetVec.push_back(BI);
347 else if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
348 if (isa<MemIntrinsic>(BI))
349 MemIntrinCalls.push_back(BI);
351 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
354 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
357 // We have collected all loads and stores.
358 // FIXME: many of these accesses do not need to be checked for races
359 // (e.g. variables that do not escape, etc).
361 // Instrument memory accesses.
362 if (ClInstrumentMemoryAccesses && F.hasFnAttribute(Attribute::SanitizeThread))
363 for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) {
364 Res |= instrumentLoadOrStore(AllLoadsAndStores[i]);
367 // Instrument atomic memory accesses.
368 if (ClInstrumentAtomics)
369 for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
370 Res |= instrumentAtomic(AtomicAccesses[i]);
373 if (ClInstrumentMemIntrinsics)
374 for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
375 Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
378 // Instrument function entry/exit points if there were instrumented accesses.
379 if ((Res || HasCalls) && ClInstrumentFuncEntryExit) {
380 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
381 Value *ReturnAddress = IRB.CreateCall(
382 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
384 IRB.CreateCall(TsanFuncEntry, ReturnAddress);
385 for (size_t i = 0, n = RetVec.size(); i < n; ++i) {
386 IRBuilder<> IRBRet(RetVec[i]);
387 IRBRet.CreateCall(TsanFuncExit);
394 bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) {
396 bool IsWrite = isa<StoreInst>(*I);
397 Value *Addr = IsWrite
398 ? cast<StoreInst>(I)->getPointerOperand()
399 : cast<LoadInst>(I)->getPointerOperand();
400 int Idx = getMemoryAccessFuncIndex(Addr);
403 if (IsWrite && isVtableAccess(I)) {
404 DEBUG(dbgs() << " VPTR : " << *I << "\n");
405 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
406 // StoredValue may be a vector type if we are storing several vptrs at once.
407 // In this case, just take the first element of the vector since this is
408 // enough to find vptr races.
409 if (isa<VectorType>(StoredValue->getType()))
410 StoredValue = IRB.CreateExtractElement(
411 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
412 if (StoredValue->getType()->isIntegerTy())
413 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
414 // Call TsanVptrUpdate.
415 IRB.CreateCall2(TsanVptrUpdate,
416 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
417 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy()));
418 NumInstrumentedVtableWrites++;
421 if (!IsWrite && isVtableAccess(I)) {
422 IRB.CreateCall(TsanVptrLoad,
423 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
424 NumInstrumentedVtableReads++;
427 Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx];
428 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
429 if (IsWrite) NumInstrumentedWrites++;
430 else NumInstrumentedReads++;
434 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
437 case NotAtomic: assert(false);
438 case Unordered: // Fall-through.
439 case Monotonic: v = 0; break;
440 // case Consume: v = 1; break; // Not specified yet.
441 case Acquire: v = 2; break;
442 case Release: v = 3; break;
443 case AcquireRelease: v = 4; break;
444 case SequentiallyConsistent: v = 5; break;
446 return IRB->getInt32(v);
449 static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
452 case NotAtomic: assert(false);
453 case Unordered: // Fall-through.
454 case Monotonic: v = 0; break;
455 // case Consume: v = 1; break; // Not specified yet.
456 case Acquire: v = 2; break;
457 case Release: v = 0; break;
458 case AcquireRelease: v = 2; break;
459 case SequentiallyConsistent: v = 5; break;
461 return IRB->getInt32(v);
464 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
465 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
466 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
467 // instead we simply replace them with regular function calls, which are then
468 // intercepted by the run-time.
469 // Since tsan is running after everyone else, the calls should not be
470 // replaced back with intrinsics. If that becomes wrong at some point,
471 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
472 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
474 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
475 IRB.CreateCall3(MemsetFn,
476 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
477 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
478 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
479 I->eraseFromParent();
480 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
481 IRB.CreateCall3(isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
482 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
483 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
484 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
485 I->eraseFromParent();
490 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
491 // standards. For background see C++11 standard. A slightly older, publicly
492 // available draft of the standard (not entirely up-to-date, but close enough
493 // for casual browsing) is available here:
494 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
495 // The following page contains more background information:
496 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
498 bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
500 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
501 Value *Addr = LI->getPointerOperand();
502 int Idx = getMemoryAccessFuncIndex(Addr);
505 const size_t ByteSize = 1 << Idx;
506 const size_t BitSize = ByteSize * 8;
507 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
508 Type *PtrTy = Ty->getPointerTo();
509 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
510 createOrdering(&IRB, LI->getOrdering())};
511 CallInst *C = CallInst::Create(TsanAtomicLoad[Idx],
512 ArrayRef<Value*>(Args));
513 ReplaceInstWithInst(I, C);
515 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
516 Value *Addr = SI->getPointerOperand();
517 int Idx = getMemoryAccessFuncIndex(Addr);
520 const size_t ByteSize = 1 << Idx;
521 const size_t BitSize = ByteSize * 8;
522 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
523 Type *PtrTy = Ty->getPointerTo();
524 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
525 IRB.CreateIntCast(SI->getValueOperand(), Ty, false),
526 createOrdering(&IRB, SI->getOrdering())};
527 CallInst *C = CallInst::Create(TsanAtomicStore[Idx],
528 ArrayRef<Value*>(Args));
529 ReplaceInstWithInst(I, C);
530 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
531 Value *Addr = RMWI->getPointerOperand();
532 int Idx = getMemoryAccessFuncIndex(Addr);
535 Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
538 const size_t ByteSize = 1 << Idx;
539 const size_t BitSize = ByteSize * 8;
540 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
541 Type *PtrTy = Ty->getPointerTo();
542 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
543 IRB.CreateIntCast(RMWI->getValOperand(), Ty, false),
544 createOrdering(&IRB, RMWI->getOrdering())};
545 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
546 ReplaceInstWithInst(I, C);
547 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
548 Value *Addr = CASI->getPointerOperand();
549 int Idx = getMemoryAccessFuncIndex(Addr);
552 const size_t ByteSize = 1 << Idx;
553 const size_t BitSize = ByteSize * 8;
554 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
555 Type *PtrTy = Ty->getPointerTo();
556 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
557 IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false),
558 IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false),
559 createOrdering(&IRB, CASI->getOrdering()),
560 createFailOrdering(&IRB, CASI->getOrdering())};
561 CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef<Value*>(Args));
562 ReplaceInstWithInst(I, C);
563 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
564 Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
565 Function *F = FI->getSynchScope() == SingleThread ?
566 TsanAtomicSignalFence : TsanAtomicThreadFence;
567 CallInst *C = CallInst::Create(F, ArrayRef<Value*>(Args));
568 ReplaceInstWithInst(I, C);
573 int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
574 Type *OrigPtrTy = Addr->getType();
575 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
576 assert(OrigTy->isSized());
577 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
578 if (TypeSize != 8 && TypeSize != 16 &&
579 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
580 NumAccessesWithBadSize++;
581 // Ignore all unusual sizes.
584 size_t Idx = countTrailingZeros(TypeSize / 8);
585 assert(Idx < kNumberOfAccessSizes);