1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/PassManager.h"
36 #include "llvm/Pass.h"
37 #include "llvm/ProfileData/InstrProf.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
45 #include "llvm/Transforms/Utils/EscapeEnumerator.h"
50 #define DEBUG_TYPE "CDS"
51 #include <llvm/IR/DebugLoc.h>
53 Value *getPosition( Instruction * I, IRBuilder <> IRB, bool print = false)
55 const DebugLoc & debug_location = I->getDebugLoc ();
56 std::string position_string;
58 llvm::raw_string_ostream position_stream (position_string);
59 debug_location . print (position_stream);
63 errs() << position_string << "\n";
66 return IRB.CreateGlobalStringPtr (position_string);
69 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
70 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
71 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
72 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
73 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
75 STATISTIC(NumOmittedReadsBeforeWrite,
76 "Number of reads ignored due to following writes");
77 STATISTIC(NumOmittedReadsFromConstantGlobals,
78 "Number of reads from constant globals");
79 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
80 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
91 static const size_t kNumberOfAccessSizes = 4;
93 int getAtomicOrderIndex(AtomicOrdering order){
95 case AtomicOrdering::Monotonic:
96 return (int)AtomicOrderingCABI::relaxed;
97 // case AtomicOrdering::Consume: // not specified yet
98 // return AtomicOrderingCABI::consume;
99 case AtomicOrdering::Acquire:
100 return (int)AtomicOrderingCABI::acquire;
101 case AtomicOrdering::Release:
102 return (int)AtomicOrderingCABI::release;
103 case AtomicOrdering::AcquireRelease:
104 return (int)AtomicOrderingCABI::acq_rel;
105 case AtomicOrdering::SequentiallyConsistent:
106 return (int)AtomicOrderingCABI::seq_cst;
108 // unordered or Not Atomic
114 struct CDSPass : public FunctionPass {
116 CDSPass() : FunctionPass(ID) {}
117 bool runOnFunction(Function &F) override;
120 void initializeCallbacks(Module &M);
121 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
122 bool instrumentVolatile(Instruction *I, const DataLayout &DL);
123 bool isAtomicCall(Instruction *I);
124 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
125 bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
126 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
127 SmallVectorImpl<Instruction *> &All,
128 const DataLayout &DL);
129 bool addrPointsToConstantData(Value *Addr);
130 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
132 // Callbacks to run-time library are computed in doInitialization.
133 Constant * CDSFuncEntry;
134 Constant * CDSFuncExit;
136 Constant * CDSLoad[kNumberOfAccessSizes];
137 Constant * CDSStore[kNumberOfAccessSizes];
138 Constant * CDSVolatileLoad[kNumberOfAccessSizes];
139 Constant * CDSVolatileStore[kNumberOfAccessSizes];
140 Constant * CDSAtomicInit[kNumberOfAccessSizes];
141 Constant * CDSAtomicLoad[kNumberOfAccessSizes];
142 Constant * CDSAtomicStore[kNumberOfAccessSizes];
143 Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
144 Constant * CDSAtomicCAS_V1[kNumberOfAccessSizes];
145 Constant * CDSAtomicCAS_V2[kNumberOfAccessSizes];
146 Constant * CDSAtomicThreadFence;
148 std::vector<StringRef> AtomicFuncNames;
149 std::vector<StringRef> PartialAtomicFuncNames;
153 static bool isVtableAccess(Instruction *I) {
154 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
155 return Tag->isTBAAVtableAccess();
159 void CDSPass::initializeCallbacks(Module &M) {
160 LLVMContext &Ctx = M.getContext();
162 Type * Int1Ty = Type::getInt1Ty(Ctx);
163 OrdTy = Type::getInt32Ty(Ctx);
165 Int8PtrTy = Type::getInt8PtrTy(Ctx);
166 Int16PtrTy = Type::getInt16PtrTy(Ctx);
167 Int32PtrTy = Type::getInt32PtrTy(Ctx);
168 Int64PtrTy = Type::getInt64PtrTy(Ctx);
170 VoidTy = Type::getVoidTy(Ctx);
172 CDSFuncEntry = M.getOrInsertFunction("cds_func_entry",
174 CDSFuncExit = M.getOrInsertFunction("cds_func_exit",
177 // Get the function to call from our untime library.
178 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
179 const unsigned ByteSize = 1U << i;
180 const unsigned BitSize = ByteSize * 8;
182 std::string ByteSizeStr = utostr(ByteSize);
183 std::string BitSizeStr = utostr(BitSize);
185 Type *Ty = Type::getIntNTy(Ctx, BitSize);
186 Type *PtrTy = Ty->getPointerTo();
188 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
189 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
190 SmallString<32> LoadName("cds_load" + BitSizeStr);
191 SmallString<32> StoreName("cds_store" + BitSizeStr);
192 SmallString<32> VolatileLoadName("cds_volatile_load" + BitSizeStr);
193 SmallString<32> VolatileStoreName("cds_volatile_store" + BitSizeStr);
194 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
195 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
196 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
198 CDSLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
199 CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
200 CDSVolatileLoad[i] = M.getOrInsertFunction(VolatileLoadName,
201 Ty, PtrTy, Int8PtrTy);
202 CDSVolatileStore[i] = M.getOrInsertFunction(VolatileStoreName,
203 VoidTy, PtrTy, Ty, Int8PtrTy);
204 CDSAtomicInit[i] = M.getOrInsertFunction(AtomicInitName,
205 VoidTy, PtrTy, Ty, Int8PtrTy);
206 CDSAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName,
207 Ty, PtrTy, OrdTy, Int8PtrTy);
208 CDSAtomicStore[i] = M.getOrInsertFunction(AtomicStoreName,
209 VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy);
211 for (int op = AtomicRMWInst::FIRST_BINOP;
212 op <= AtomicRMWInst::LAST_BINOP; ++op) {
213 CDSAtomicRMW[op][i] = nullptr;
214 std::string NamePart;
216 if (op == AtomicRMWInst::Xchg)
217 NamePart = "_exchange";
218 else if (op == AtomicRMWInst::Add)
219 NamePart = "_fetch_add";
220 else if (op == AtomicRMWInst::Sub)
221 NamePart = "_fetch_sub";
222 else if (op == AtomicRMWInst::And)
223 NamePart = "_fetch_and";
224 else if (op == AtomicRMWInst::Or)
225 NamePart = "_fetch_or";
226 else if (op == AtomicRMWInst::Xor)
227 NamePart = "_fetch_xor";
231 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
232 CDSAtomicRMW[op][i] = M.getOrInsertFunction(AtomicRMWName,
233 Ty, PtrTy, Ty, OrdTy, Int8PtrTy);
236 // only supportes strong version
237 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
238 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
239 CDSAtomicCAS_V1[i] = M.getOrInsertFunction(AtomicCASName_V1,
240 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy);
241 CDSAtomicCAS_V2[i] = M.getOrInsertFunction(AtomicCASName_V2,
242 Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy);
245 CDSAtomicThreadFence = M.getOrInsertFunction("cds_atomic_thread_fence",
246 VoidTy, OrdTy, Int8PtrTy);
249 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
250 // Peel off GEPs and BitCasts.
251 Addr = Addr->stripInBoundsOffsets();
253 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
254 if (GV->hasSection()) {
255 StringRef SectionName = GV->getSection();
256 // Check if the global is in the PGO counters section.
257 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
258 if (SectionName.endswith(
259 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
263 // Check if the global is private gcov data.
264 if (GV->getName().startswith("__llvm_gcov") ||
265 GV->getName().startswith("__llvm_gcda"))
269 // Do not instrument acesses from different address spaces; we cannot deal
272 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
273 if (PtrTy->getPointerAddressSpace() != 0)
280 bool CDSPass::addrPointsToConstantData(Value *Addr) {
281 // If this is a GEP, just analyze its pointer operand.
282 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
283 Addr = GEP->getPointerOperand();
285 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
286 if (GV->isConstant()) {
287 // Reads from constant globals can not race with any writes.
288 NumOmittedReadsFromConstantGlobals++;
291 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
292 if (isVtableAccess(L)) {
293 // Reads from a vtable pointer can not race with any writes.
294 NumOmittedReadsFromVtable++;
301 bool CDSPass::runOnFunction(Function &F) {
302 if (F.getName() == "main") {
303 F.setName("user_main");
304 errs() << "main replaced by user_main\n";
308 initializeCallbacks( *F.getParent() );
312 "atomic_init", "atomic_load", "atomic_store",
313 "atomic_fetch_", "atomic_exchange", "atomic_compare_exchange_"
316 PartialAtomicFuncNames =
318 "load", "store", "fetch", "exchange", "compare_exchange_"
321 SmallVector<Instruction*, 8> AllLoadsAndStores;
322 SmallVector<Instruction*, 8> LocalLoadsAndStores;
323 SmallVector<Instruction*, 8> VolatileLoadsAndStores;
324 SmallVector<Instruction*, 8> AtomicAccesses;
326 std::vector<Instruction *> worklist;
329 bool HasAtomic = false;
330 const DataLayout &DL = F.getParent()->getDataLayout();
332 // errs() << "--- " << F.getName() << "---\n";
336 if ( (&I)->isAtomic() || isAtomicCall(&I) ) {
337 AtomicAccesses.push_back(&I);
339 } else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
340 LoadInst *LI = dyn_cast<LoadInst>(&I);
341 StoreInst *SI = dyn_cast<StoreInst>(&I);
342 bool isVolatile = ( LI ? LI->isVolatile() : SI->isVolatile() );
345 VolatileLoadsAndStores.push_back(&I);
347 LocalLoadsAndStores.push_back(&I);
348 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
349 // not implemented yet
353 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
356 for (auto Inst : AllLoadsAndStores) {
357 Res |= instrumentLoadOrStore(Inst, DL);
360 for (auto Inst : VolatileLoadsAndStores) {
361 Res |= instrumentVolatile(Inst, DL);
364 for (auto Inst : AtomicAccesses) {
365 Res |= instrumentAtomic(Inst, DL);
368 // only instrument functions that contain atomics
369 if (Res && HasAtomic) {
370 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
372 Value *ReturnAddress = IRB.CreateCall(
373 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
377 Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
378 IRB.CreateCall(CDSFuncEntry, FuncName);
380 EscapeEnumerator EE(F, "cds_cleanup", true);
381 while (IRBuilder<> *AtExit = EE.Next()) {
382 AtExit->CreateCall(CDSFuncExit, FuncName);
394 void CDSPass::chooseInstructionsToInstrument(
395 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
396 const DataLayout &DL) {
397 SmallPtrSet<Value*, 8> WriteTargets;
398 // Iterate from the end.
399 for (Instruction *I : reverse(Local)) {
400 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
401 Value *Addr = Store->getPointerOperand();
402 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
404 WriteTargets.insert(Addr);
406 LoadInst *Load = cast<LoadInst>(I);
407 Value *Addr = Load->getPointerOperand();
408 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
410 if (WriteTargets.count(Addr)) {
411 // We will write to this temp, so no reason to analyze the read.
412 NumOmittedReadsBeforeWrite++;
415 if (addrPointsToConstantData(Addr)) {
416 // Addr points to some constant data -- it can not race with any writes.
420 Value *Addr = isa<StoreInst>(*I)
421 ? cast<StoreInst>(I)->getPointerOperand()
422 : cast<LoadInst>(I)->getPointerOperand();
423 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
424 !PointerMayBeCaptured(Addr, true, true)) {
425 // The variable is addressable but not captured, so it cannot be
426 // referenced from a different thread and participate in a data race
427 // (see llvm/Analysis/CaptureTracking.h for details).
428 NumOmittedNonCaptured++;
437 bool CDSPass::instrumentLoadOrStore(Instruction *I,
438 const DataLayout &DL) {
440 bool IsWrite = isa<StoreInst>(*I);
441 Value *Addr = IsWrite
442 ? cast<StoreInst>(I)->getPointerOperand()
443 : cast<LoadInst>(I)->getPointerOperand();
445 // swifterror memory addresses are mem2reg promoted by instruction selection.
446 // As such they cannot have regular uses like an instrumentation function and
447 // it makes no sense to track them as memory.
448 if (Addr->isSwiftError())
451 int Idx = getMemoryAccessFuncIndex(Addr, DL);
455 // not supported by CDS yet
456 /* if (IsWrite && isVtableAccess(I)) {
457 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
458 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
459 // StoredValue may be a vector type if we are storing several vptrs at once.
460 // In this case, just take the first element of the vector since this is
461 // enough to find vptr races.
462 if (isa<VectorType>(StoredValue->getType()))
463 StoredValue = IRB.CreateExtractElement(
464 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
465 if (StoredValue->getType()->isIntegerTy())
466 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
467 // Call TsanVptrUpdate.
468 IRB.CreateCall(TsanVptrUpdate,
469 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
470 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
471 NumInstrumentedVtableWrites++;
475 if (!IsWrite && isVtableAccess(I)) {
476 IRB.CreateCall(TsanVptrLoad,
477 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
478 NumInstrumentedVtableReads++;
483 Value *OnAccessFunc = nullptr;
484 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
486 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
488 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
489 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
490 // if other types of load or stores are passed in
493 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
494 if (IsWrite) NumInstrumentedWrites++;
495 else NumInstrumentedReads++;
499 bool CDSPass::instrumentVolatile(Instruction * I, const DataLayout &DL) {
501 Value *position = getPosition(I, IRB);
503 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
504 assert( LI->isVolatile() );
505 Value *Addr = LI->getPointerOperand();
506 int Idx=getMemoryAccessFuncIndex(Addr, DL);
510 Value *args[] = {Addr, position};
511 Instruction* funcInst=CallInst::Create(CDSVolatileLoad[Idx], args);
512 ReplaceInstWithInst(LI, funcInst);
513 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
514 assert( SI->isVolatile() );
515 Value *Addr = SI->getPointerOperand();
516 int Idx=getMemoryAccessFuncIndex(Addr, DL);
520 Value *val = SI->getValueOperand();
521 Value *args[] = {Addr, val, position};
522 Instruction* funcInst=CallInst::Create(CDSVolatileStore[Idx], args);
523 ReplaceInstWithInst(SI, funcInst);
531 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
534 if (auto *CI = dyn_cast<CallInst>(I)) {
535 return instrumentAtomicCall(CI, DL);
538 Value *position = getPosition(I, IRB);
540 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
541 Value *Addr = LI->getPointerOperand();
542 int Idx=getMemoryAccessFuncIndex(Addr, DL);
546 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
547 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
548 Value *args[] = {Addr, order, position};
549 Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args);
550 ReplaceInstWithInst(LI, funcInst);
551 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
552 Value *Addr = SI->getPointerOperand();
553 int Idx=getMemoryAccessFuncIndex(Addr, DL);
557 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
558 Value *val = SI->getValueOperand();
559 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
560 Value *args[] = {Addr, val, order, position};
561 Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args);
562 ReplaceInstWithInst(SI, funcInst);
563 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
564 Value *Addr = RMWI->getPointerOperand();
565 int Idx=getMemoryAccessFuncIndex(Addr, DL);
569 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
570 Value *val = RMWI->getValOperand();
571 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
572 Value *args[] = {Addr, val, order, position};
573 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args);
574 ReplaceInstWithInst(RMWI, funcInst);
575 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
576 IRBuilder<> IRB(CASI);
578 Value *Addr = CASI->getPointerOperand();
579 int Idx=getMemoryAccessFuncIndex(Addr, DL);
583 const unsigned ByteSize = 1U << Idx;
584 const unsigned BitSize = ByteSize * 8;
585 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
586 Type *PtrTy = Ty->getPointerTo();
588 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
589 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
591 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
592 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
593 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
594 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
596 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
597 CmpOperand, NewOperand,
598 order_succ, order_fail, position};
600 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
601 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
603 Value *OldVal = funcInst;
604 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
605 if (Ty != OrigOldValTy) {
606 // The value is a pointer, so we need to cast the return value.
607 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
611 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
612 Res = IRB.CreateInsertValue(Res, Success, 1);
614 I->replaceAllUsesWith(Res);
615 I->eraseFromParent();
616 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
617 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
618 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
619 Value *Args[] = {order, position};
621 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
622 ReplaceInstWithInst(FI, funcInst);
623 // errs() << "Thread Fences replaced\n";
628 bool CDSPass::isAtomicCall(Instruction *I) {
629 if ( auto *CI = dyn_cast<CallInst>(I) ) {
630 Function *fun = CI->getCalledFunction();
634 StringRef funName = fun->getName();
636 // todo: come up with better rules for function name checking
637 for (StringRef name : AtomicFuncNames) {
638 if ( funName.contains(name) )
642 for (StringRef PartialName : PartialAtomicFuncNames) {
643 if (funName.contains(PartialName) &&
644 funName.contains("atomic") )
652 bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) {
654 Function *fun = CI->getCalledFunction();
655 StringRef funName = fun->getName();
656 std::vector<Value *> parameters;
658 User::op_iterator begin = CI->arg_begin();
659 User::op_iterator end = CI->arg_end();
660 for (User::op_iterator it = begin; it != end; ++it) {
662 parameters.push_back(param);
665 // obtain source line number of the CallInst
666 Value *position = getPosition(CI, IRB);
668 // the pointer to the address is always the first argument
669 Value *OrigPtr = parameters[0];
671 int Idx = getMemoryAccessFuncIndex(OrigPtr, DL);
675 const unsigned ByteSize = 1U << Idx;
676 const unsigned BitSize = ByteSize * 8;
677 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
678 Type *PtrTy = Ty->getPointerTo();
680 // atomic_init; args = {obj, order}
681 if (funName.contains("atomic_init")) {
682 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
683 Value *val = IRB.CreateBitOrPointerCast(parameters[1], Ty);
684 Value *args[] = {ptr, val, position};
686 Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args);
687 ReplaceInstWithInst(CI, funcInst);
692 // atomic_load; args = {obj, order}
693 if (funName.contains("atomic_load")) {
694 bool isExplicit = funName.contains("atomic_load_explicit");
696 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
699 order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
701 order = ConstantInt::get(OrdTy,
702 (int) AtomicOrderingCABI::seq_cst);
703 Value *args[] = {ptr, order, position};
705 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args);
706 ReplaceInstWithInst(CI, funcInst);
709 } else if (funName.contains("atomic") &&
710 funName.contains("load") ) {
711 // does this version of call always have an atomic order as an argument?
712 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
713 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
714 Value *args[] = {ptr, order, position};
716 if (!CI->getType()->isPointerTy()) {
720 CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args);
721 Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType());
723 CI->replaceAllUsesWith(RetVal);
724 CI->eraseFromParent();
729 // atomic_store; args = {obj, val, order}
730 if (funName.contains("atomic_store")) {
731 bool isExplicit = funName.contains("atomic_store_explicit");
732 Value *OrigVal = parameters[1];
734 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
735 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
738 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
740 order = ConstantInt::get(OrdTy,
741 (int) AtomicOrderingCABI::seq_cst);
742 Value *args[] = {ptr, val, order, position};
744 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
745 ReplaceInstWithInst(CI, funcInst);
748 } else if (funName.contains("atomic") &&
749 funName.contains("EEEE5store") ) {
750 // does this version of call always have an atomic order as an argument?
751 Value *OrigVal = parameters[1];
753 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
754 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
755 Value *order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
756 Value *args[] = {ptr, val, order, position};
758 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
759 ReplaceInstWithInst(CI, funcInst);
764 // atomic_fetch_*; args = {obj, val, order}
765 if (funName.contains("atomic_fetch_") ||
766 funName.contains("atomic_exchange") ) {
767 bool isExplicit = funName.contains("_explicit");
768 Value *OrigVal = parameters[1];
771 if ( funName.contains("_fetch_add") )
772 op = AtomicRMWInst::Add;
773 else if ( funName.contains("_fetch_sub") )
774 op = AtomicRMWInst::Sub;
775 else if ( funName.contains("_fetch_and") )
776 op = AtomicRMWInst::And;
777 else if ( funName.contains("_fetch_or") )
778 op = AtomicRMWInst::Or;
779 else if ( funName.contains("_fetch_xor") )
780 op = AtomicRMWInst::Xor;
781 else if ( funName.contains("atomic_exchange") )
782 op = AtomicRMWInst::Xchg;
784 errs() << "Unknown atomic read-modify-write operation\n";
788 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
789 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
792 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
794 order = ConstantInt::get(OrdTy,
795 (int) AtomicOrderingCABI::seq_cst);
796 Value *args[] = {ptr, val, order, position};
798 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
799 ReplaceInstWithInst(CI, funcInst);
802 } else if (funName.contains("fetch")) {
803 errs() << "atomic exchange captured. Not implemented yet. ";
804 errs() << "See source file :";
805 getPosition(CI, IRB, true);
806 } else if (funName.contains("exchange") &&
807 !funName.contains("compare_exchange") ) {
808 errs() << "atomic exchange captured. Not implemented yet. ";
809 errs() << "See source file :";
810 getPosition(CI, IRB, true);
813 /* atomic_compare_exchange_*;
814 args = {obj, expected, new value, order1, order2}
816 if ( funName.contains("atomic_compare_exchange_") ) {
817 bool isExplicit = funName.contains("_explicit");
819 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
820 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
821 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
823 Value *order_succ, *order_fail;
825 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
826 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
828 order_succ = ConstantInt::get(OrdTy,
829 (int) AtomicOrderingCABI::seq_cst);
830 order_fail = ConstantInt::get(OrdTy,
831 (int) AtomicOrderingCABI::seq_cst);
834 Value *args[] = {Addr, CmpOperand, NewOperand,
835 order_succ, order_fail, position};
837 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
838 ReplaceInstWithInst(CI, funcInst);
841 } else if ( funName.contains("compare_exchange_strong") ||
842 funName.contains("compare_exchange_weak") ) {
843 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
844 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
845 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
847 Value *order_succ, *order_fail;
848 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
849 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
851 Value *args[] = {Addr, CmpOperand, NewOperand,
852 order_succ, order_fail, position};
853 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
854 ReplaceInstWithInst(CI, funcInst);
862 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
863 const DataLayout &DL) {
864 Type *OrigPtrTy = Addr->getType();
865 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
866 assert(OrigTy->isSized());
867 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
868 if (TypeSize != 8 && TypeSize != 16 &&
869 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
870 NumAccessesWithBadSize++;
871 // Ignore all unusual sizes.
874 size_t Idx = countTrailingZeros(TypeSize / 8);
875 //assert(Idx < kNumberOfAccessSizes);
876 if (Idx >= kNumberOfAccessSizes) {
883 char CDSPass::ID = 0;
885 // Automatically enable the pass.
886 static void registerCDSPass(const PassManagerBuilder &,
887 legacy::PassManagerBase &PM) {
888 PM.add(new CDSPass());
891 /* Enable the pass when opt level is greater than 0 */
892 static RegisterStandardPasses
893 RegisterMyPass1(PassManagerBuilder::EP_OptimizerLast,
896 /* Enable the pass when opt level is 0 */
897 static RegisterStandardPasses
898 RegisterMyPass2(PassManagerBuilder::EP_EnabledOnOptLevel0,