1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/PassManager.h"
36 #include "llvm/Pass.h"
37 #include "llvm/ProfileData/InstrProf.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
49 #define DEBUG_TYPE "CDS"
50 #include <llvm/IR/DebugLoc.h>
52 Value *getPosition( Instruction * I, IRBuilder <> IRB, bool print = false)
54 const DebugLoc & debug_location = I->getDebugLoc ();
55 std::string position_string;
57 llvm::raw_string_ostream position_stream (position_string);
58 debug_location . print (position_stream);
62 errs() << position_string;
65 return IRB.CreateGlobalStringPtr (position_string);
68 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
69 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
70 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
71 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
72 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
74 STATISTIC(NumOmittedReadsBeforeWrite,
75 "Number of reads ignored due to following writes");
76 STATISTIC(NumOmittedReadsFromConstantGlobals,
77 "Number of reads from constant globals");
78 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
79 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
90 static const size_t kNumberOfAccessSizes = 4;
92 int getAtomicOrderIndex(AtomicOrdering order){
94 case AtomicOrdering::Monotonic:
95 return (int)AtomicOrderingCABI::relaxed;
96 // case AtomicOrdering::Consume: // not specified yet
97 // return AtomicOrderingCABI::consume;
98 case AtomicOrdering::Acquire:
99 return (int)AtomicOrderingCABI::acquire;
100 case AtomicOrdering::Release:
101 return (int)AtomicOrderingCABI::release;
102 case AtomicOrdering::AcquireRelease:
103 return (int)AtomicOrderingCABI::acq_rel;
104 case AtomicOrdering::SequentiallyConsistent:
105 return (int)AtomicOrderingCABI::seq_cst;
107 // unordered or Not Atomic
113 struct CDSPass : public FunctionPass {
115 CDSPass() : FunctionPass(ID) {}
116 bool runOnFunction(Function &F) override;
119 void initializeCallbacks(Module &M);
120 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
121 bool isAtomicCall(Instruction *I);
122 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
123 bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
124 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
125 SmallVectorImpl<Instruction *> &All,
126 const DataLayout &DL);
127 bool addrPointsToConstantData(Value *Addr);
128 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
130 // Callbacks to run-time library are computed in doInitialization.
131 Constant * CDSFuncEntry;
132 Constant * CDSFuncExit;
134 Constant * CDSLoad[kNumberOfAccessSizes];
135 Constant * CDSStore[kNumberOfAccessSizes];
136 Constant * CDSAtomicInit[kNumberOfAccessSizes];
137 Constant * CDSAtomicLoad[kNumberOfAccessSizes];
138 Constant * CDSAtomicStore[kNumberOfAccessSizes];
139 Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
140 Constant * CDSAtomicCAS_V1[kNumberOfAccessSizes];
141 Constant * CDSAtomicCAS_V2[kNumberOfAccessSizes];
142 Constant * CDSAtomicThreadFence;
144 std::vector<StringRef> AtomicFuncNames;
145 std::vector<StringRef> PartialAtomicFuncNames;
149 static bool isVtableAccess(Instruction *I) {
150 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
151 return Tag->isTBAAVtableAccess();
155 void CDSPass::initializeCallbacks(Module &M) {
156 LLVMContext &Ctx = M.getContext();
158 Type * Int1Ty = Type::getInt1Ty(Ctx);
159 OrdTy = Type::getInt32Ty(Ctx);
161 Int8PtrTy = Type::getInt8PtrTy(Ctx);
162 Int16PtrTy = Type::getInt16PtrTy(Ctx);
163 Int32PtrTy = Type::getInt32PtrTy(Ctx);
164 Int64PtrTy = Type::getInt64PtrTy(Ctx);
166 VoidTy = Type::getVoidTy(Ctx);
168 CDSFuncEntry = M.getOrInsertFunction("cds_func_entry",
171 // Get the function to call from our untime library.
172 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
173 const unsigned ByteSize = 1U << i;
174 const unsigned BitSize = ByteSize * 8;
176 std::string ByteSizeStr = utostr(ByteSize);
177 std::string BitSizeStr = utostr(BitSize);
179 Type *Ty = Type::getIntNTy(Ctx, BitSize);
180 Type *PtrTy = Ty->getPointerTo();
182 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
183 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
184 SmallString<32> LoadName("cds_load" + BitSizeStr);
185 SmallString<32> StoreName("cds_store" + BitSizeStr);
186 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
187 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
188 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
190 CDSLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
191 CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
192 CDSAtomicInit[i] = M.getOrInsertFunction(AtomicInitName,
193 VoidTy, PtrTy, Ty, Int8PtrTy);
194 CDSAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName,
195 Ty, PtrTy, OrdTy, Int8PtrTy);
196 CDSAtomicStore[i] = M.getOrInsertFunction(AtomicStoreName,
197 VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy);
199 for (int op = AtomicRMWInst::FIRST_BINOP;
200 op <= AtomicRMWInst::LAST_BINOP; ++op) {
201 CDSAtomicRMW[op][i] = nullptr;
202 std::string NamePart;
204 if (op == AtomicRMWInst::Xchg)
205 NamePart = "_exchange";
206 else if (op == AtomicRMWInst::Add)
207 NamePart = "_fetch_add";
208 else if (op == AtomicRMWInst::Sub)
209 NamePart = "_fetch_sub";
210 else if (op == AtomicRMWInst::And)
211 NamePart = "_fetch_and";
212 else if (op == AtomicRMWInst::Or)
213 NamePart = "_fetch_or";
214 else if (op == AtomicRMWInst::Xor)
215 NamePart = "_fetch_xor";
219 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
220 CDSAtomicRMW[op][i] = M.getOrInsertFunction(AtomicRMWName,
221 Ty, PtrTy, Ty, OrdTy, Int8PtrTy);
224 // only supportes strong version
225 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
226 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
227 CDSAtomicCAS_V1[i] = M.getOrInsertFunction(AtomicCASName_V1,
228 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy);
229 CDSAtomicCAS_V2[i] = M.getOrInsertFunction(AtomicCASName_V2,
230 Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy);
233 CDSAtomicThreadFence = M.getOrInsertFunction("cds_atomic_thread_fence",
234 VoidTy, OrdTy, Int8PtrTy);
237 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
238 // Peel off GEPs and BitCasts.
239 Addr = Addr->stripInBoundsOffsets();
241 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
242 if (GV->hasSection()) {
243 StringRef SectionName = GV->getSection();
244 // Check if the global is in the PGO counters section.
245 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
246 if (SectionName.endswith(
247 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
251 // Check if the global is private gcov data.
252 if (GV->getName().startswith("__llvm_gcov") ||
253 GV->getName().startswith("__llvm_gcda"))
257 // Do not instrument acesses from different address spaces; we cannot deal
260 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
261 if (PtrTy->getPointerAddressSpace() != 0)
268 bool CDSPass::addrPointsToConstantData(Value *Addr) {
269 // If this is a GEP, just analyze its pointer operand.
270 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
271 Addr = GEP->getPointerOperand();
273 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
274 if (GV->isConstant()) {
275 // Reads from constant globals can not race with any writes.
276 NumOmittedReadsFromConstantGlobals++;
279 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
280 if (isVtableAccess(L)) {
281 // Reads from a vtable pointer can not race with any writes.
282 NumOmittedReadsFromVtable++;
289 bool CDSPass::runOnFunction(Function &F) {
290 if (F.getName() == "main") {
291 F.setName("user_main");
292 errs() << "main replaced by user_main\n";
296 initializeCallbacks( *F.getParent() );
300 "atomic_init", "atomic_load", "atomic_store",
301 "atomic_fetch_", "atomic_exchange", "atomic_compare_exchange_"
304 PartialAtomicFuncNames =
306 "load", "store", "fetch", "exchange", "compare_exchange_"
309 SmallVector<Instruction*, 8> AllLoadsAndStores;
310 SmallVector<Instruction*, 8> LocalLoadsAndStores;
311 SmallVector<Instruction*, 8> AtomicAccesses;
313 std::vector<Instruction *> worklist;
316 bool HasAtomic = false;
317 const DataLayout &DL = F.getParent()->getDataLayout();
319 // errs() << "--- " << F.getName() << "---\n";
323 if ( (&I)->isAtomic() || isAtomicCall(&I) ) {
324 AtomicAccesses.push_back(&I);
326 } else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
327 LocalLoadsAndStores.push_back(&I);
328 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
329 // not implemented yet
333 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
336 for (auto Inst : AllLoadsAndStores) {
337 // Res |= instrumentLoadOrStore(Inst, DL);
338 // errs() << "load and store are replaced\n";
341 for (auto Inst : AtomicAccesses) {
342 Res |= instrumentAtomic(Inst, DL);
345 // only instrument functions that contain atomics
346 if (Res && HasAtomic) {
348 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
349 Value *ReturnAddress = IRB.CreateCall(
350 Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress),
353 Value * FuncName = IRB.CreateGlobalStringPtr(F.getName());
355 //errs() << "function name: " << F.getName() << "\n";
356 //IRB.CreateCall(CDSFuncEntry, FuncName);
359 EscapeEnumerator EE(F, "tsan_cleanup", ClHandleCxxExceptions);
360 while (IRBuilder<> *AtExit = EE.Next()) {
361 AtExit->CreateCall(TsanFuncExit, {});
371 void CDSPass::chooseInstructionsToInstrument(
372 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
373 const DataLayout &DL) {
374 SmallPtrSet<Value*, 8> WriteTargets;
375 // Iterate from the end.
376 for (Instruction *I : reverse(Local)) {
377 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
378 Value *Addr = Store->getPointerOperand();
379 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
381 WriteTargets.insert(Addr);
383 LoadInst *Load = cast<LoadInst>(I);
384 Value *Addr = Load->getPointerOperand();
385 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
387 if (WriteTargets.count(Addr)) {
388 // We will write to this temp, so no reason to analyze the read.
389 NumOmittedReadsBeforeWrite++;
392 if (addrPointsToConstantData(Addr)) {
393 // Addr points to some constant data -- it can not race with any writes.
397 Value *Addr = isa<StoreInst>(*I)
398 ? cast<StoreInst>(I)->getPointerOperand()
399 : cast<LoadInst>(I)->getPointerOperand();
400 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
401 !PointerMayBeCaptured(Addr, true, true)) {
402 // The variable is addressable but not captured, so it cannot be
403 // referenced from a different thread and participate in a data race
404 // (see llvm/Analysis/CaptureTracking.h for details).
405 NumOmittedNonCaptured++;
414 bool CDSPass::instrumentLoadOrStore(Instruction *I,
415 const DataLayout &DL) {
417 bool IsWrite = isa<StoreInst>(*I);
418 Value *Addr = IsWrite
419 ? cast<StoreInst>(I)->getPointerOperand()
420 : cast<LoadInst>(I)->getPointerOperand();
422 // swifterror memory addresses are mem2reg promoted by instruction selection.
423 // As such they cannot have regular uses like an instrumentation function and
424 // it makes no sense to track them as memory.
425 if (Addr->isSwiftError())
428 int Idx = getMemoryAccessFuncIndex(Addr, DL);
430 // not supported by CDS yet
431 /* if (IsWrite && isVtableAccess(I)) {
432 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
433 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
434 // StoredValue may be a vector type if we are storing several vptrs at once.
435 // In this case, just take the first element of the vector since this is
436 // enough to find vptr races.
437 if (isa<VectorType>(StoredValue->getType()))
438 StoredValue = IRB.CreateExtractElement(
439 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
440 if (StoredValue->getType()->isIntegerTy())
441 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
442 // Call TsanVptrUpdate.
443 IRB.CreateCall(TsanVptrUpdate,
444 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
445 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
446 NumInstrumentedVtableWrites++;
450 if (!IsWrite && isVtableAccess(I)) {
451 IRB.CreateCall(TsanVptrLoad,
452 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
453 NumInstrumentedVtableReads++;
458 Value *OnAccessFunc = nullptr;
459 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
461 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
463 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
464 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
465 //errs() << "A load or store of type ";
466 //errs() << *ArgType;
467 //errs() << " is passed in\n";
468 return false; // if other types of load or stores are passed in
470 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
471 if (IsWrite) NumInstrumentedWrites++;
472 else NumInstrumentedReads++;
476 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
479 if (auto *CI = dyn_cast<CallInst>(I)) {
480 return instrumentAtomicCall(CI, DL);
483 Value *position = getPosition(I, IRB);
485 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
486 Value *Addr = LI->getPointerOperand();
487 int Idx=getMemoryAccessFuncIndex(Addr, DL);
488 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
489 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
490 Value *args[] = {Addr, order, position};
491 Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args);
492 ReplaceInstWithInst(LI, funcInst);
493 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
494 Value *Addr = SI->getPointerOperand();
495 int Idx=getMemoryAccessFuncIndex(Addr, DL);
496 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
497 Value *val = SI->getValueOperand();
498 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
499 Value *args[] = {Addr, val, order, position};
500 Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args);
501 ReplaceInstWithInst(SI, funcInst);
502 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
503 Value *Addr = RMWI->getPointerOperand();
504 int Idx=getMemoryAccessFuncIndex(Addr, DL);
505 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
506 Value *val = RMWI->getValOperand();
507 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
508 Value *args[] = {Addr, val, order, position};
509 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args);
510 ReplaceInstWithInst(RMWI, funcInst);
511 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
512 IRBuilder<> IRB(CASI);
514 Value *Addr = CASI->getPointerOperand();
515 int Idx=getMemoryAccessFuncIndex(Addr, DL);
517 const unsigned ByteSize = 1U << Idx;
518 const unsigned BitSize = ByteSize * 8;
519 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
520 Type *PtrTy = Ty->getPointerTo();
522 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
523 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
525 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
526 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
527 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
528 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
530 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
531 CmpOperand, NewOperand,
532 order_succ, order_fail, position};
534 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
535 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
537 Value *OldVal = funcInst;
538 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
539 if (Ty != OrigOldValTy) {
540 // The value is a pointer, so we need to cast the return value.
541 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
545 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
546 Res = IRB.CreateInsertValue(Res, Success, 1);
548 I->replaceAllUsesWith(Res);
549 I->eraseFromParent();
550 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
551 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
552 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
553 Value *Args[] = {order, position};
555 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
556 ReplaceInstWithInst(FI, funcInst);
557 // errs() << "Thread Fences replaced\n";
562 bool CDSPass::isAtomicCall(Instruction *I) {
563 if ( auto *CI = dyn_cast<CallInst>(I) ) {
564 Function *fun = CI->getCalledFunction();
568 StringRef funName = fun->getName();
570 // todo: come up with better rules for function name checking
571 for (StringRef name : AtomicFuncNames) {
572 if ( funName.contains(name) )
576 for (StringRef PartialName : PartialAtomicFuncNames) {
577 if (funName.contains(PartialName) &&
578 funName.contains("atomic") )
586 bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) {
588 Function *fun = CI->getCalledFunction();
589 StringRef funName = fun->getName();
590 std::vector<Value *> parameters;
592 User::op_iterator begin = CI->arg_begin();
593 User::op_iterator end = CI->arg_end();
594 for (User::op_iterator it = begin; it != end; ++it) {
596 parameters.push_back(param);
599 // obtain source line number of the CallInst
600 Value *position = getPosition(CI, IRB);
602 // the pointer to the address is always the first argument
603 Value *OrigPtr = parameters[0];
605 int Idx = getMemoryAccessFuncIndex(OrigPtr, DL);
609 const unsigned ByteSize = 1U << Idx;
610 const unsigned BitSize = ByteSize * 8;
611 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
612 Type *PtrTy = Ty->getPointerTo();
614 // atomic_init; args = {obj, order}
615 if (funName.contains("atomic_init")) {
616 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
617 Value *val = IRB.CreateBitOrPointerCast(parameters[1], Ty);
618 Value *args[] = {ptr, val, position};
620 Instruction* funcInst = CallInst::Create(CDSAtomicInit[Idx], args);
621 ReplaceInstWithInst(CI, funcInst);
626 // atomic_load; args = {obj, order}
627 if (funName.contains("atomic_load")) {
628 bool isExplicit = funName.contains("atomic_load_explicit");
630 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
633 order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
635 order = ConstantInt::get(OrdTy,
636 (int) AtomicOrderingCABI::seq_cst);
637 Value *args[] = {ptr, order, position};
639 Instruction* funcInst = CallInst::Create(CDSAtomicLoad[Idx], args);
640 ReplaceInstWithInst(CI, funcInst);
643 } else if (funName.contains("atomic") &&
644 funName.contains("load") ) {
645 // does this version of call always have an atomic order as an argument?
646 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
647 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
648 Value *args[] = {ptr, order, position};
650 if (!CI->getType()->isPointerTy()) {
654 CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args);
655 Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType());
657 CI->replaceAllUsesWith(RetVal);
658 CI->eraseFromParent();
663 // atomic_store; args = {obj, val, order}
664 if (funName.contains("atomic_store")) {
665 bool isExplicit = funName.contains("atomic_store_explicit");
666 Value *OrigVal = parameters[1];
668 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
669 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
672 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
674 order = ConstantInt::get(OrdTy,
675 (int) AtomicOrderingCABI::seq_cst);
676 Value *args[] = {ptr, val, order, position};
678 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
679 ReplaceInstWithInst(CI, funcInst);
682 } else if (funName.contains("atomic") &&
683 funName.contains("EEEE5store") ) {
684 // does this version of call always have an atomic order as an argument?
685 Value *OrigVal = parameters[1];
687 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
688 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
689 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
690 Value *args[] = {ptr, val, order, position};
692 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
693 ReplaceInstWithInst(CI, funcInst);
698 // atomic_fetch_*; args = {obj, val, order}
699 if (funName.contains("atomic_fetch_") ||
700 funName.contains("atomic_exchange") ) {
701 bool isExplicit = funName.contains("_explicit");
702 Value *OrigVal = parameters[1];
705 if ( funName.contains("_fetch_add") )
706 op = AtomicRMWInst::Add;
707 else if ( funName.contains("_fetch_sub") )
708 op = AtomicRMWInst::Sub;
709 else if ( funName.contains("_fetch_and") )
710 op = AtomicRMWInst::And;
711 else if ( funName.contains("_fetch_or") )
712 op = AtomicRMWInst::Or;
713 else if ( funName.contains("_fetch_xor") )
714 op = AtomicRMWInst::Xor;
715 else if ( funName.contains("atomic_exchange") )
716 op = AtomicRMWInst::Xchg;
718 errs() << "Unknown atomic read-modify-write operation\n";
722 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
723 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
726 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
728 order = ConstantInt::get(OrdTy,
729 (int) AtomicOrderingCABI::seq_cst);
730 Value *args[] = {ptr, val, order, position};
732 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[op][Idx], args);
733 ReplaceInstWithInst(CI, funcInst);
736 } else if (funName.contains("fetch")) {
737 errs() << "atomic exchange captured. Not implemented yet. ";
738 errs() << "See source file :";
739 getPosition(CI, IRB, true);
740 } else if (funName.contains("exchange") &&
741 !funName.contains("compare_exchange") ) {
742 errs() << "atomic exchange captured. Not implemented yet. ";
743 errs() << "See source file :";
744 getPosition(CI, IRB, true);
747 /* atomic_compare_exchange_*;
748 args = {obj, expected, new value, order1, order2}
750 if ( funName.contains("atomic_compare_exchange_") ) {
751 bool isExplicit = funName.contains("_explicit");
753 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
754 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
755 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
757 Value *order_succ, *order_fail;
759 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
760 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
762 order_succ = ConstantInt::get(OrdTy,
763 (int) AtomicOrderingCABI::seq_cst);
764 order_fail = ConstantInt::get(OrdTy,
765 (int) AtomicOrderingCABI::seq_cst);
768 Value *args[] = {Addr, CmpOperand, NewOperand,
769 order_succ, order_fail, position};
771 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
772 ReplaceInstWithInst(CI, funcInst);
775 } else if ( funName.contains("compare_exchange_strong") ||
776 funName.contains("compare_exchange_weak") ) {
777 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
778 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
779 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
781 Value *order_succ, *order_fail;
782 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
783 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
785 Value *args[] = {Addr, CmpOperand, NewOperand,
786 order_succ, order_fail, position};
787 Instruction* funcInst = CallInst::Create(CDSAtomicCAS_V2[Idx], args);
788 ReplaceInstWithInst(CI, funcInst);
796 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
797 const DataLayout &DL) {
798 Type *OrigPtrTy = Addr->getType();
799 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
800 assert(OrigTy->isSized());
801 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
802 if (TypeSize != 8 && TypeSize != 16 &&
803 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
804 NumAccessesWithBadSize++;
805 // Ignore all unusual sizes.
808 size_t Idx = countTrailingZeros(TypeSize / 8);
809 assert(Idx < kNumberOfAccessSizes);
814 char CDSPass::ID = 0;
816 // Automatically enable the pass.
817 static void registerCDSPass(const PassManagerBuilder &,
818 legacy::PassManagerBase &PM) {
819 PM.add(new CDSPass());
821 static RegisterStandardPasses
822 RegisterMyPass(PassManagerBuilder::EP_OptimizerLast,