1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/LegacyPassManager.h"
36 #include "llvm/IR/Module.h"
37 #include "llvm/IR/PassManager.h"
38 #include "llvm/Pass.h"
39 #include "llvm/ProfileData/InstrProf.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Support/AtomicOrdering.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
46 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
52 #define DEBUG_TYPE "CDS"
53 #include <llvm/IR/DebugLoc.h>
55 Value *getPosition( Instruction * I, IRBuilder <> &IRB, bool print = false)
57 const DebugLoc & debug_location = I->getDebugLoc ();
58 std::string position_string;
60 llvm::raw_string_ostream position_stream (position_string);
61 debug_location . print (position_stream);
65 errs() << position_string << "\n";
68 return IRB.CreateGlobalStringPtr (position_string);
71 static inline bool checkSignature(Function * func, Value * args[]) {
72 FunctionType * FType = func->getFunctionType();
73 for (unsigned i = 0 ; i < FType->getNumParams(); i++) {
74 if (FType->getParamType(i) != args[i]->getType()) {
76 errs() << "expects: " << *FType->getParamType(i)
77 << "\tbut receives: " << *args[i]->getType() << "\n";
86 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
87 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
88 STATISTIC(NumOmittedReadsBeforeWrite,
89 "Number of reads ignored due to following writes");
90 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
91 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
92 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
93 STATISTIC(NumOmittedReadsFromConstantGlobals,
94 "Number of reads from constant globals");
95 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
96 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
98 // static const char *const kCDSModuleCtorName = "cds.module_ctor";
99 // static const char *const kCDSInitName = "cds_init";
110 static const size_t kNumberOfAccessSizes = 4;
112 int getAtomicOrderIndex(AtomicOrdering order) {
114 case AtomicOrdering::Monotonic:
115 return (int)AtomicOrderingCABI::relaxed;
116 //case AtomicOrdering::Consume: // not specified yet
117 // return AtomicOrderingCABI::consume;
118 case AtomicOrdering::Acquire:
119 return (int)AtomicOrderingCABI::acquire;
120 case AtomicOrdering::Release:
121 return (int)AtomicOrderingCABI::release;
122 case AtomicOrdering::AcquireRelease:
123 return (int)AtomicOrderingCABI::acq_rel;
124 case AtomicOrdering::SequentiallyConsistent:
125 return (int)AtomicOrderingCABI::seq_cst;
127 // unordered or Not Atomic
132 AtomicOrderingCABI indexToAtomicOrder(int index) {
135 return AtomicOrderingCABI::relaxed;
137 return AtomicOrderingCABI::consume;
139 return AtomicOrderingCABI::acquire;
141 return AtomicOrderingCABI::release;
143 return AtomicOrderingCABI::acq_rel;
145 return AtomicOrderingCABI::seq_cst;
147 errs() << "Bad Atomic index\n";
148 return AtomicOrderingCABI::seq_cst;
152 /* According to atomic_base.h: __cmpexch_failure_order */
153 int AtomicCasFailureOrderIndex(int index) {
154 AtomicOrderingCABI succ_order = indexToAtomicOrder(index);
155 AtomicOrderingCABI fail_order;
156 if (succ_order == AtomicOrderingCABI::acq_rel)
157 fail_order = AtomicOrderingCABI::acquire;
158 else if (succ_order == AtomicOrderingCABI::release)
159 fail_order = AtomicOrderingCABI::relaxed;
161 fail_order = succ_order;
163 return (int) fail_order;
166 /* The original function checkSanitizerInterfaceFunction was defined
167 * in llvm/Transforms/Utils/ModuleUtils.h
169 static Function * checkCDSPassInterfaceFunction(Value *FuncOrBitcast) {
170 if (isa<Function>(FuncOrBitcast))
171 return cast<Function>(FuncOrBitcast);
172 FuncOrBitcast->print(errs());
175 raw_string_ostream Stream(Err);
176 Stream << "CDSPass interface function redefined: " << *FuncOrBitcast;
177 report_fatal_error(Err);
181 struct CDSPass : public FunctionPass {
182 CDSPass() : FunctionPass(ID) {}
183 StringRef getPassName() const override;
184 bool runOnFunction(Function &F) override;
185 bool doInitialization(Module &M) override;
189 void initializeCallbacks(Module &M);
190 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
191 bool instrumentVolatile(Instruction *I, const DataLayout &DL);
192 bool instrumentMemIntrinsic(Instruction *I);
193 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
194 bool shouldInstrumentBeforeAtomics(Instruction *I);
195 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
196 SmallVectorImpl<Instruction *> &All,
197 const DataLayout &DL);
198 bool addrPointsToConstantData(Value *Addr);
199 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
201 Function * CDSFuncEntry;
202 Function * CDSFuncExit;
204 Function * CDSLoad[kNumberOfAccessSizes];
205 Function * CDSStore[kNumberOfAccessSizes];
206 Function * CDSVolatileLoad[kNumberOfAccessSizes];
207 Function * CDSVolatileStore[kNumberOfAccessSizes];
208 Function * CDSAtomicInit[kNumberOfAccessSizes];
209 Function * CDSAtomicLoad[kNumberOfAccessSizes];
210 Function * CDSAtomicStore[kNumberOfAccessSizes];
211 Function * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
212 Function * CDSAtomicCAS_V1[kNumberOfAccessSizes];
213 Function * CDSAtomicCAS_V2[kNumberOfAccessSizes];
214 Function * CDSAtomicThreadFence;
215 Function * MemmoveFn, * MemcpyFn, * MemsetFn;
216 // Function * CDSCtorFunction;
218 std::vector<StringRef> AtomicFuncNames;
219 std::vector<StringRef> PartialAtomicFuncNames;
223 StringRef CDSPass::getPassName() const {
227 void CDSPass::initializeCallbacks(Module &M) {
228 LLVMContext &Ctx = M.getContext();
230 Attr = Attr.addAttribute(Ctx, AttributeList::FunctionIndex,
231 Attribute::NoUnwind);
233 Type * Int1Ty = Type::getInt1Ty(Ctx);
234 Type * Int32Ty = Type::getInt32Ty(Ctx);
235 OrdTy = Type::getInt32Ty(Ctx);
237 Int8PtrTy = Type::getInt8PtrTy(Ctx);
238 Int16PtrTy = Type::getInt16PtrTy(Ctx);
239 Int32PtrTy = Type::getInt32PtrTy(Ctx);
240 Int64PtrTy = Type::getInt64PtrTy(Ctx);
242 VoidTy = Type::getVoidTy(Ctx);
244 CDSFuncEntry = checkCDSPassInterfaceFunction(
245 M.getOrInsertFunction("cds_func_entry",
246 Attr, VoidTy, Int8PtrTy).getCallee());
247 CDSFuncExit = checkCDSPassInterfaceFunction(
248 M.getOrInsertFunction("cds_func_exit",
249 Attr, VoidTy, Int8PtrTy).getCallee());
251 // Get the function to call from our untime library.
252 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
253 const unsigned ByteSize = 1U << i;
254 const unsigned BitSize = ByteSize * 8;
256 std::string ByteSizeStr = utostr(ByteSize);
257 std::string BitSizeStr = utostr(BitSize);
259 Type *Ty = Type::getIntNTy(Ctx, BitSize);
260 Type *PtrTy = Ty->getPointerTo();
262 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
263 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
264 SmallString<32> LoadName("cds_load" + BitSizeStr);
265 SmallString<32> StoreName("cds_store" + BitSizeStr);
266 SmallString<32> VolatileLoadName("cds_volatile_load" + BitSizeStr);
267 SmallString<32> VolatileStoreName("cds_volatile_store" + BitSizeStr);
268 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
269 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
270 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
272 CDSLoad[i] = checkCDSPassInterfaceFunction(
273 M.getOrInsertFunction(LoadName, Attr, VoidTy, Int8PtrTy).getCallee());
274 CDSStore[i] = checkCDSPassInterfaceFunction(
275 M.getOrInsertFunction(StoreName, Attr, VoidTy, Int8PtrTy).getCallee());
276 CDSVolatileLoad[i] = checkCDSPassInterfaceFunction(
277 M.getOrInsertFunction(VolatileLoadName,
278 Attr, Ty, PtrTy, Int8PtrTy).getCallee());
279 CDSVolatileStore[i] = checkCDSPassInterfaceFunction(
280 M.getOrInsertFunction(VolatileStoreName,
281 Attr, VoidTy, PtrTy, Ty, Int8PtrTy).getCallee());
282 CDSAtomicInit[i] = checkCDSPassInterfaceFunction(
283 M.getOrInsertFunction(AtomicInitName,
284 Attr, VoidTy, PtrTy, Ty, Int8PtrTy).getCallee());
285 CDSAtomicLoad[i] = checkCDSPassInterfaceFunction(
286 M.getOrInsertFunction(AtomicLoadName,
287 Attr, Ty, PtrTy, OrdTy, Int8PtrTy).getCallee());
288 CDSAtomicStore[i] = checkCDSPassInterfaceFunction(
289 M.getOrInsertFunction(AtomicStoreName,
290 Attr, VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy).getCallee());
292 for (unsigned op = AtomicRMWInst::FIRST_BINOP;
293 op <= AtomicRMWInst::LAST_BINOP; ++op) {
294 CDSAtomicRMW[op][i] = nullptr;
295 std::string NamePart;
297 if (op == AtomicRMWInst::Xchg)
298 NamePart = "_exchange";
299 else if (op == AtomicRMWInst::Add)
300 NamePart = "_fetch_add";
301 else if (op == AtomicRMWInst::Sub)
302 NamePart = "_fetch_sub";
303 else if (op == AtomicRMWInst::And)
304 NamePart = "_fetch_and";
305 else if (op == AtomicRMWInst::Or)
306 NamePart = "_fetch_or";
307 else if (op == AtomicRMWInst::Xor)
308 NamePart = "_fetch_xor";
312 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
313 CDSAtomicRMW[op][i] = checkCDSPassInterfaceFunction(
314 M.getOrInsertFunction(AtomicRMWName,
315 Attr, Ty, PtrTy, Ty, OrdTy, Int8PtrTy).getCallee());
318 // only supportes strong version
319 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
320 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
321 CDSAtomicCAS_V1[i] = checkCDSPassInterfaceFunction(
322 M.getOrInsertFunction(AtomicCASName_V1,
323 Attr, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy).getCallee());
324 CDSAtomicCAS_V2[i] = checkCDSPassInterfaceFunction(
325 M.getOrInsertFunction(AtomicCASName_V2,
326 Attr, Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy).getCallee());
329 CDSAtomicThreadFence = checkCDSPassInterfaceFunction(
330 M.getOrInsertFunction("cds_atomic_thread_fence", Attr, VoidTy, OrdTy, Int8PtrTy).getCallee());
332 MemmoveFn = checkCDSPassInterfaceFunction(
333 M.getOrInsertFunction("memmove", Attr, Int8PtrTy, Int8PtrTy,
334 Int8PtrTy, IntPtrTy).getCallee());
335 MemcpyFn = checkCDSPassInterfaceFunction(
336 M.getOrInsertFunction("memcpy", Attr, Int8PtrTy, Int8PtrTy,
337 Int8PtrTy, IntPtrTy).getCallee());
338 MemsetFn = checkCDSPassInterfaceFunction(
339 M.getOrInsertFunction("memset", Attr, Int8PtrTy, Int8PtrTy,
340 Int32Ty, IntPtrTy).getCallee());
343 bool CDSPass::doInitialization(Module &M) {
344 const DataLayout &DL = M.getDataLayout();
345 IntPtrTy = DL.getIntPtrType(M.getContext());
347 // createSanitizerCtorAndInitFunctions is defined in "llvm/Transforms/Utils/ModuleUtils.h"
348 // We do not support it yet
350 std::tie(CDSCtorFunction, std::ignore) = createSanitizerCtorAndInitFunctions(
351 M, kCDSModuleCtorName, kCDSInitName, {}, {});
353 appendToGlobalCtors(M, CDSCtorFunction, 0);
358 "atomic_init", "atomic_load", "atomic_store",
359 "atomic_fetch_", "atomic_exchange", "atomic_compare_exchange_"
362 PartialAtomicFuncNames =
364 "load", "store", "fetch", "exchange", "compare_exchange_"
370 static bool isVtableAccess(Instruction *I) {
371 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
372 return Tag->isTBAAVtableAccess();
376 // Do not instrument known races/"benign races" that come from compiler
377 // instrumentatin. The user has no way of suppressing them.
378 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
379 // Peel off GEPs and BitCasts.
380 Addr = Addr->stripInBoundsOffsets();
382 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
383 if (GV->hasSection()) {
384 StringRef SectionName = GV->getSection();
385 // Check if the global is in the PGO counters section.
386 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
387 if (SectionName.endswith(
388 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
392 // Check if the global is private gcov data.
393 if (GV->getName().startswith("__llvm_gcov") ||
394 GV->getName().startswith("__llvm_gcda"))
398 // Do not instrument acesses from different address spaces; we cannot deal
401 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
402 if (PtrTy->getPointerAddressSpace() != 0)
409 bool CDSPass::addrPointsToConstantData(Value *Addr) {
410 // If this is a GEP, just analyze its pointer operand.
411 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
412 Addr = GEP->getPointerOperand();
414 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
415 if (GV->isConstant()) {
416 // Reads from constant globals can not race with any writes.
417 NumOmittedReadsFromConstantGlobals++;
420 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
421 if (isVtableAccess(L)) {
422 // Reads from a vtable pointer can not race with any writes.
423 NumOmittedReadsFromVtable++;
430 bool CDSPass::shouldInstrumentBeforeAtomics(Instruction * Inst) {
431 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
432 AtomicOrdering ordering = LI->getOrdering();
433 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
435 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
436 AtomicOrdering ordering = SI->getOrdering();
437 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
439 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Inst)) {
440 AtomicOrdering ordering = RMWI->getOrdering();
441 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
443 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
444 AtomicOrdering ordering = CASI->getSuccessOrdering();
445 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
447 } else if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) {
448 AtomicOrdering ordering = FI->getOrdering();
449 if ( isAtLeastOrStrongerThan(ordering, AtomicOrdering::Acquire) )
456 void CDSPass::chooseInstructionsToInstrument(
457 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
458 const DataLayout &DL) {
459 SmallPtrSet<Value*, 8> WriteTargets;
460 // Iterate from the end.
461 for (Instruction *I : reverse(Local)) {
462 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
463 Value *Addr = Store->getPointerOperand();
464 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
466 WriteTargets.insert(Addr);
468 LoadInst *Load = cast<LoadInst>(I);
469 Value *Addr = Load->getPointerOperand();
470 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
472 if (WriteTargets.count(Addr)) {
473 // We will write to this temp, so no reason to analyze the read.
474 NumOmittedReadsBeforeWrite++;
477 if (addrPointsToConstantData(Addr)) {
478 // Addr points to some constant data -- it can not race with any writes.
482 Value *Addr = isa<StoreInst>(*I)
483 ? cast<StoreInst>(I)->getPointerOperand()
484 : cast<LoadInst>(I)->getPointerOperand();
485 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
486 !PointerMayBeCaptured(Addr, true, true)) {
487 // The variable is addressable but not captured, so it cannot be
488 // referenced from a different thread and participate in a data race
489 // (see llvm/Analysis/CaptureTracking.h for details).
490 NumOmittedNonCaptured++;
499 void CDSPass::InsertRuntimeIgnores(Function &F) {
500 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
501 IRB.CreateCall(CDSIgnoreBegin);
502 EscapeEnumerator EE(F, "cds_ignore_cleanup", ClHandleCxxExceptions);
503 while (IRBuilder<> *AtExit = EE.Next()) {
504 AtExit->CreateCall(CDSIgnoreEnd);
508 bool CDSPass::runOnFunction(Function &F) {
509 initializeCallbacks( *F.getParent() );
510 SmallVector<Instruction*, 8> AllLoadsAndStores;
511 SmallVector<Instruction*, 8> LocalLoadsAndStores;
512 SmallVector<Instruction*, 8> VolatileLoadsAndStores;
513 SmallVector<Instruction*, 8> AtomicAccesses;
514 SmallVector<Instruction*, 8> MemIntrinCalls;
517 bool HasAtomic = false;
518 bool HasVolatile = false;
519 const DataLayout &DL = F.getParent()->getDataLayout();
522 for (auto &Inst : BB) {
523 if ( (&Inst)->isAtomic() ) {
524 AtomicAccesses.push_back(&Inst);
527 if (shouldInstrumentBeforeAtomics(&Inst)) {
528 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
531 } else if (isa<LoadInst>(Inst) || isa<StoreInst>(Inst)) {
532 LoadInst *LI = dyn_cast<LoadInst>(&Inst);
533 StoreInst *SI = dyn_cast<StoreInst>(&Inst);
534 bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
537 VolatileLoadsAndStores.push_back(&Inst);
540 LocalLoadsAndStores.push_back(&Inst);
541 } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
542 if (isa<MemIntrinsic>(Inst))
543 MemIntrinCalls.push_back(&Inst);
545 /*if (CallInst *CI = dyn_cast<CallInst>(&Inst))
546 maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI);
549 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores,
554 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
557 for (auto Inst : AllLoadsAndStores) {
558 Res |= instrumentLoadOrStore(Inst, DL);
561 for (auto Inst : VolatileLoadsAndStores) {
562 Res |= instrumentVolatile(Inst, DL);
565 for (auto Inst : AtomicAccesses) {
566 Res |= instrumentAtomic(Inst, DL);
569 for (auto Inst : MemIntrinCalls) {
570 Res |= instrumentMemIntrinsic(Inst);
573 // Instrument function entry and exit for functions containing atomics or volatiles
574 if (Res && ( HasAtomic || HasVolatile) ) {
575 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
577 Value *ReturnAddress = IRB.CreateCall(
578 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
582 Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
583 IRB.CreateCall(CDSFuncEntry, FuncName);
585 EscapeEnumerator EE(F, "cds_cleanup", true);
586 while (IRBuilder<> *AtExit = EE.Next()) {
587 AtExit->CreateCall(CDSFuncExit, FuncName);
596 bool CDSPass::instrumentLoadOrStore(Instruction *I,
597 const DataLayout &DL) {
599 bool IsWrite = isa<StoreInst>(*I);
600 Value *Addr = IsWrite
601 ? cast<StoreInst>(I)->getPointerOperand()
602 : cast<LoadInst>(I)->getPointerOperand();
604 // swifterror memory addresses are mem2reg promoted by instruction selection.
605 // As such they cannot have regular uses like an instrumentation function and
606 // it makes no sense to track them as memory.
607 if (Addr->isSwiftError())
610 int Idx = getMemoryAccessFuncIndex(Addr, DL);
614 if (IsWrite && isVtableAccess(I)) {
616 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
617 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
618 // StoredValue may be a vector type if we are storing several vptrs at once.
619 // In this case, just take the first element of the vector since this is
620 // enough to find vptr races.
621 if (isa<VectorType>(StoredValue->getType()))
622 StoredValue = IRB.CreateExtractElement(
623 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
624 if (StoredValue->getType()->isIntegerTy())
625 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
626 // Call TsanVptrUpdate.
627 IRB.CreateCall(TsanVptrUpdate,
628 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
629 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
630 NumInstrumentedVtableWrites++;
635 if (!IsWrite && isVtableAccess(I)) {
637 IRB.CreateCall(TsanVptrLoad,
638 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
639 NumInstrumentedVtableReads++;
644 // TODO: unaligned reads and writes
645 FunctionCallee OnAccessFunc = nullptr;
646 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
647 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
648 if (IsWrite) NumInstrumentedWrites++;
649 else NumInstrumentedReads++;
653 bool CDSPass::instrumentVolatile(Instruction * I, const DataLayout &DL) {
655 Value *position = getPosition(I, IRB);
657 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
658 Value *Addr = LI->getPointerOperand();
659 int Idx=getMemoryAccessFuncIndex(Addr, DL);
662 const unsigned ByteSize = 1U << Idx;
663 const unsigned BitSize = ByteSize * 8;
664 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
665 Type *PtrTy = Ty->getPointerTo();
666 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), position};
668 Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType();
669 Value *C = IRB.CreateCall(CDSVolatileLoad[Idx], Args);
670 Value *Cast = IRB.CreateBitOrPointerCast(C, OrigTy);
671 I->replaceAllUsesWith(Cast);
672 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
673 Value *Addr = SI->getPointerOperand();
674 int Idx=getMemoryAccessFuncIndex(Addr, DL);
677 const unsigned ByteSize = 1U << Idx;
678 const unsigned BitSize = ByteSize * 8;
679 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
680 Type *PtrTy = Ty->getPointerTo();
681 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
682 IRB.CreateBitOrPointerCast(SI->getValueOperand(), Ty),
684 CallInst *C = CallInst::Create(CDSVolatileStore[Idx], Args);
685 ReplaceInstWithInst(I, C);
693 bool CDSPass::instrumentMemIntrinsic(Instruction *I) {
695 if (MemSetInst *M = dyn_cast<MemSetInst>(I)) {
698 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
699 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
700 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
701 I->eraseFromParent();
702 } else if (MemTransferInst *M = dyn_cast<MemTransferInst>(I)) {
704 isa<MemCpyInst>(M) ? MemcpyFn : MemmoveFn,
705 {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
706 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
707 IRB.CreateIntCast(M->getArgOperand(2), IntPtrTy, false)});
708 I->eraseFromParent();
713 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
716 Value *position = getPosition(I, IRB);
717 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
718 Value *Addr = LI->getPointerOperand();
719 int Idx=getMemoryAccessFuncIndex(Addr, DL);
723 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
724 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
725 Value *Args[] = {Addr, order, position};
726 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], Args);
727 ReplaceInstWithInst(LI, funcInst);
728 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
729 Value *Addr = SI->getPointerOperand();
730 int Idx=getMemoryAccessFuncIndex(Addr, DL);
734 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
735 Value *val = SI->getValueOperand();
736 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
737 Value *Args[] = {Addr, val, order, position};
738 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], Args);
739 ReplaceInstWithInst(SI, funcInst);
740 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
741 Value *Addr = RMWI->getPointerOperand();
742 int Idx=getMemoryAccessFuncIndex(Addr, DL);
746 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
747 Value *val = RMWI->getValOperand();
748 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
749 Value *Args[] = {Addr, val, order, position};
750 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], Args);
751 ReplaceInstWithInst(RMWI, funcInst);
752 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
753 IRBuilder<> IRB(CASI);
755 Value *Addr = CASI->getPointerOperand();
756 int Idx=getMemoryAccessFuncIndex(Addr, DL);
760 const unsigned ByteSize = 1U << Idx;
761 const unsigned BitSize = ByteSize * 8;
762 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
763 Type *PtrTy = Ty->getPointerTo();
765 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
766 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
768 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
769 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
770 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
771 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
773 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
774 CmpOperand, NewOperand,
775 order_succ, order_fail, position};
777 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
778 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
780 Value *OldVal = funcInst;
781 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
782 if (Ty != OrigOldValTy) {
783 // The value is a pointer, so we need to cast the return value.
784 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
788 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
789 Res = IRB.CreateInsertValue(Res, Success, 1);
791 I->replaceAllUsesWith(Res);
792 I->eraseFromParent();
793 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
794 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
795 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
796 Value *Args[] = {order, position};
798 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
799 ReplaceInstWithInst(FI, funcInst);
800 // errs() << "Thread Fences replaced\n";
805 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
806 const DataLayout &DL) {
807 Type *OrigPtrTy = Addr->getType();
808 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
809 assert(OrigTy->isSized());
810 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
811 if (TypeSize != 8 && TypeSize != 16 &&
812 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
813 NumAccessesWithBadSize++;
814 // Ignore all unusual sizes.
817 size_t Idx = countTrailingZeros(TypeSize / 8);
818 //assert(Idx < kNumberOfAccessSizes);
819 if (Idx >= kNumberOfAccessSizes) {
825 char CDSPass::ID = 0;
827 // Automatically enable the pass.
828 static void registerCDSPass(const PassManagerBuilder &,
829 legacy::PassManagerBase &PM) {
830 PM.add(new CDSPass());
833 /* Enable the pass when opt level is greater than 0 */
834 static RegisterStandardPasses
835 RegisterMyPass1(PassManagerBuilder::EP_OptimizerLast,
838 /* Enable the pass when opt level is 0 */
839 static RegisterStandardPasses
840 RegisterMyPass2(PassManagerBuilder::EP_EnabledOnOptLevel0,