1 //===-- CDSPass.cpp - xxx -------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
13 // The tool is under development, for the details about previous versions see
14 // http://code.google.com/p/data-race-test
16 // The instrumentation phase is quite simple:
17 // - Insert calls to run-time library before every memory access.
18 // - Optimizations may apply to avoid instrumenting some of the accesses.
19 // - Insert calls at function entry/exit.
20 // The rest is handled by the run-time library.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/PassManager.h"
36 #include "llvm/Pass.h"
37 #include "llvm/ProfileData/InstrProf.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
49 #define DEBUG_TYPE "CDS"
50 #include <llvm/IR/DebugLoc.h>
52 Value *getPosition( Instruction * I, IRBuilder <> IRB)
54 const DebugLoc & debug_location = I->getDebugLoc ();
55 std::string position_string;
57 llvm::raw_string_ostream position_stream (position_string);
58 debug_location . print (position_stream);
61 return IRB . CreateGlobalStringPtr (position_string);
64 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
65 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
66 STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size");
67 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
68 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
70 STATISTIC(NumOmittedReadsBeforeWrite,
71 "Number of reads ignored due to following writes");
72 STATISTIC(NumOmittedReadsFromConstantGlobals,
73 "Number of reads from constant globals");
74 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
75 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
86 static const size_t kNumberOfAccessSizes = 4;
87 Constant * CDSLoad[kNumberOfAccessSizes];
88 Constant * CDSStore[kNumberOfAccessSizes];
89 Constant * CDSAtomicInit[kNumberOfAccessSizes];
90 Constant * CDSAtomicLoad[kNumberOfAccessSizes];
91 Constant * CDSAtomicStore[kNumberOfAccessSizes];
92 Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes];
93 Constant * CDSAtomicCAS_V1[kNumberOfAccessSizes];
94 Constant * CDSAtomicCAS_V2[kNumberOfAccessSizes];
95 Constant * CDSAtomicThreadFence;
97 int getAtomicOrderIndex(AtomicOrdering order){
99 case AtomicOrdering::Monotonic:
100 return (int)AtomicOrderingCABI::relaxed;
101 // case AtomicOrdering::Consume: // not specified yet
102 // return AtomicOrderingCABI::consume;
103 case AtomicOrdering::Acquire:
104 return (int)AtomicOrderingCABI::acquire;
105 case AtomicOrdering::Release:
106 return (int)AtomicOrderingCABI::release;
107 case AtomicOrdering::AcquireRelease:
108 return (int)AtomicOrderingCABI::acq_rel;
109 case AtomicOrdering::SequentiallyConsistent:
110 return (int)AtomicOrderingCABI::seq_cst;
112 // unordered or Not Atomic
118 struct CDSPass : public FunctionPass {
120 CDSPass() : FunctionPass(ID) {}
121 bool runOnFunction(Function &F) override;
124 void initializeCallbacks(Module &M);
125 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
126 bool instrumentAtomic(Instruction *I, const DataLayout &DL);
127 bool instrumentAtomicCall(CallInst *CI, const DataLayout &DL);
128 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
129 SmallVectorImpl<Instruction *> &All,
130 const DataLayout &DL);
131 bool addrPointsToConstantData(Value *Addr);
132 int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL);
136 static bool isVtableAccess(Instruction *I) {
137 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
138 return Tag->isTBAAVtableAccess();
142 void CDSPass::initializeCallbacks(Module &M) {
143 LLVMContext &Ctx = M.getContext();
145 Type * Int1Ty = Type::getInt1Ty(Ctx);
146 OrdTy = Type::getInt32Ty(Ctx);
148 Int8PtrTy = Type::getInt8PtrTy(Ctx);
149 Int16PtrTy = Type::getInt16PtrTy(Ctx);
150 Int32PtrTy = Type::getInt32PtrTy(Ctx);
151 Int64PtrTy = Type::getInt64PtrTy(Ctx);
153 VoidTy = Type::getVoidTy(Ctx);
155 // Get the function to call from our untime library.
156 for (unsigned i = 0; i < kNumberOfAccessSizes; i++) {
157 const unsigned ByteSize = 1U << i;
158 const unsigned BitSize = ByteSize * 8;
160 std::string ByteSizeStr = utostr(ByteSize);
161 std::string BitSizeStr = utostr(BitSize);
163 Type *Ty = Type::getIntNTy(Ctx, BitSize);
164 Type *PtrTy = Ty->getPointerTo();
166 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
167 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
168 SmallString<32> LoadName("cds_load" + BitSizeStr);
169 SmallString<32> StoreName("cds_store" + BitSizeStr);
170 SmallString<32> AtomicInitName("cds_atomic_init" + BitSizeStr);
171 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
172 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
174 CDSLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
175 CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
176 CDSAtomicInit[i] = M.getOrInsertFunction(AtomicInitName,
177 VoidTy, PtrTy, Ty, Int8PtrTy);
178 CDSAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName,
179 Ty, PtrTy, OrdTy, Int8PtrTy);
180 CDSAtomicStore[i] = M.getOrInsertFunction(AtomicStoreName,
181 VoidTy, PtrTy, Ty, OrdTy, Int8PtrTy);
183 for (int op = AtomicRMWInst::FIRST_BINOP;
184 op <= AtomicRMWInst::LAST_BINOP; ++op) {
185 CDSAtomicRMW[op][i] = nullptr;
186 std::string NamePart;
188 if (op == AtomicRMWInst::Xchg)
189 NamePart = "_exchange";
190 else if (op == AtomicRMWInst::Add)
191 NamePart = "_fetch_add";
192 else if (op == AtomicRMWInst::Sub)
193 NamePart = "_fetch_sub";
194 else if (op == AtomicRMWInst::And)
195 NamePart = "_fetch_and";
196 else if (op == AtomicRMWInst::Or)
197 NamePart = "_fetch_or";
198 else if (op == AtomicRMWInst::Xor)
199 NamePart = "_fetch_xor";
203 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
204 CDSAtomicRMW[op][i] = M.getOrInsertFunction(AtomicRMWName,
205 Ty, PtrTy, Ty, OrdTy, Int8PtrTy);
208 // only supportes strong version
209 SmallString<32> AtomicCASName_V1("cds_atomic_compare_exchange" + BitSizeStr + "_v1");
210 SmallString<32> AtomicCASName_V2("cds_atomic_compare_exchange" + BitSizeStr + "_v2");
211 CDSAtomicCAS_V1[i] = M.getOrInsertFunction(AtomicCASName_V1,
212 Ty, PtrTy, Ty, Ty, OrdTy, OrdTy, Int8PtrTy);
213 CDSAtomicCAS_V2[i] = M.getOrInsertFunction(AtomicCASName_V2,
214 Int1Ty, PtrTy, PtrTy, Ty, OrdTy, OrdTy, Int8PtrTy);
217 CDSAtomicThreadFence = M.getOrInsertFunction("cds_atomic_thread_fence",
218 VoidTy, OrdTy, Int8PtrTy);
221 void printArgs(CallInst *);
223 bool isAtomicCall(Instruction *I) {
224 if ( auto *CI = dyn_cast<CallInst>(I) ) {
225 Function *fun = CI->getCalledFunction();
229 StringRef funName = fun->getName();
230 // todo: come up with better rules for function name checking
231 if ( funName.contains("atomic_") ) {
233 } else if (funName.contains("atomic") ) {
241 void printArgs (CallInst *CI) {
242 Function *fun = CI->getCalledFunction();
243 StringRef funName = fun->getName();
245 User::op_iterator begin = CI->arg_begin();
246 User::op_iterator end = CI->arg_end();
248 if ( funName.contains("atomic_") ) {
249 std::vector<Value *> parameters;
251 for (User::op_iterator it = begin; it != end; ++it) {
253 parameters.push_back(param);
254 errs() << *param << " type: " << *param->getType() << "\n";
260 bool CDSPass::instrumentAtomicCall(CallInst *CI, const DataLayout &DL) {
262 Function *fun = CI->getCalledFunction();
263 StringRef funName = fun->getName();
264 std::vector<Value *> parameters;
266 User::op_iterator begin = CI->arg_begin();
267 User::op_iterator end = CI->arg_end();
268 for (User::op_iterator it = begin; it != end; ++it) {
270 parameters.push_back(param);
273 // obtain source line number of the CallInst
274 Value *position = getPosition(CI, IRB);
276 // the pointer to the address is always the first argument
277 Value *OrigPtr = parameters[0];
278 int Idx = getMemoryAccessFuncIndex(OrigPtr, DL);
282 const unsigned ByteSize = 1U << Idx;
283 const unsigned BitSize = ByteSize * 8;
284 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
285 Type *PtrTy = Ty->getPointerTo();
287 // atomic_init; args = {obj, order}
288 if (funName.contains("atomic_init")) {
289 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
290 Value *val = IRB.CreateBitOrPointerCast(parameters[1], Ty);
291 Value *args[] = {ptr, val, position};
293 Instruction* funcInst=CallInst::Create(CDSAtomicInit[Idx], args);
294 ReplaceInstWithInst(CI, funcInst);
299 // atomic_load; args = {obj, order}
300 if (funName.contains("atomic_load")) {
301 bool isExplicit = funName.contains("atomic_load_explicit");
303 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
306 order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
308 order = ConstantInt::get(OrdTy,
309 (int) AtomicOrderingCABI::seq_cst);
310 Value *args[] = {ptr, order, position};
312 Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args);
313 ReplaceInstWithInst(CI, funcInst);
316 } else if (funName.contains("atomic") &&
317 funName.contains("load")) {
318 // does this version of call always have an atomic order as an argument?
319 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
320 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
321 Value *args[] = {ptr, order, position};
323 //Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args);
324 CallInst *funcInst = IRB.CreateCall(CDSAtomicLoad[Idx], args);
325 Value *RetVal = IRB.CreateIntToPtr(funcInst, CI->getType());
327 CI->replaceAllUsesWith(RetVal);
328 CI->eraseFromParent();
333 // atomic_store; args = {obj, val, order}
334 if (funName.contains("atomic_store")) {
335 bool isExplicit = funName.contains("atomic_store_explicit");
336 Value *OrigVal = parameters[1];
338 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
339 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
342 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
344 order = ConstantInt::get(OrdTy,
345 (int) AtomicOrderingCABI::seq_cst);
346 Value *args[] = {ptr, val, order, position};
348 Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args);
349 ReplaceInstWithInst(CI, funcInst);
352 } else if (funName.contains("atomic") &&
353 funName.contains("EEEE5store")) {
354 // does this version of call always have an atomic order as an argument?
355 Value *OrigVal = parameters[1];
357 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
358 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
359 Value *order = IRB.CreateBitOrPointerCast(parameters[1], OrdTy);
360 Value *args[] = {ptr, val, order, position};
362 Instruction* funcInst = CallInst::Create(CDSAtomicStore[Idx], args);
363 ReplaceInstWithInst(CI, funcInst);
369 // atomic_fetch_*; args = {obj, val, order}
370 if (funName.contains("atomic_fetch_") ||
371 funName.contains("atomic_exchange") ) {
372 bool isExplicit = funName.contains("_explicit");
373 Value *OrigVal = parameters[1];
376 if ( funName.contains("_fetch_add") )
377 op = AtomicRMWInst::Add;
378 else if ( funName.contains("_fetch_sub") )
379 op = AtomicRMWInst::Sub;
380 else if ( funName.contains("_fetch_and") )
381 op = AtomicRMWInst::And;
382 else if ( funName.contains("_fetch_or") )
383 op = AtomicRMWInst::Or;
384 else if ( funName.contains("_fetch_xor") )
385 op = AtomicRMWInst::Xor;
386 else if ( funName.contains("atomic_exchange") )
387 op = AtomicRMWInst::Xchg;
389 errs() << "Unknown atomic read modify write operation\n";
393 Value *ptr = IRB.CreatePointerCast(OrigPtr, PtrTy);
394 Value *val = IRB.CreatePointerCast(OrigVal, Ty);
397 order = IRB.CreateBitOrPointerCast(parameters[2], OrdTy);
399 order = ConstantInt::get(OrdTy,
400 (int) AtomicOrderingCABI::seq_cst);
401 Value *args[] = {ptr, val, order, position};
403 Instruction* funcInst=CallInst::Create(CDSAtomicRMW[op][Idx], args);
404 ReplaceInstWithInst(CI, funcInst);
407 } else if (funName.contains("fetch")) {
408 errs() << "atomic exchange captured. Not implemented yet. ";
409 errs() << "See source file :";
410 getPositionPrint(CI, IRB);
411 } else if (funName.contains("exchange") &&
412 !funName.contains("compare_exchange") ) {
413 errs() << "atomic exchange captured. Not implemented yet. ";
414 errs() << "See source file :";
415 getPositionPrint(CI, IRB);
418 /* atomic_compare_exchange_*;
419 args = {obj, expected, new value, order1, order2}
421 if ( funName.contains("atomic_compare_exchange_") ) {
422 bool isExplicit = funName.contains("_explicit");
424 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
425 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
426 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
428 Value *order_succ, *order_fail;
430 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
431 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
433 order_succ = ConstantInt::get(OrdTy,
434 (int) AtomicOrderingCABI::seq_cst);
435 order_fail = ConstantInt::get(OrdTy,
436 (int) AtomicOrderingCABI::seq_cst);
439 Value *args[] = {Addr, CmpOperand, NewOperand,
440 order_succ, order_fail, position};
442 Instruction* funcInst=CallInst::Create(CDSAtomicCAS_V2[Idx], args);
443 ReplaceInstWithInst(CI, funcInst);
446 } else if ( funName.contains("compare_exchange_strong") ||
447 funName.contains("compare_exchange_weak") ) {
448 Value *Addr = IRB.CreatePointerCast(OrigPtr, PtrTy);
449 Value *CmpOperand = IRB.CreatePointerCast(parameters[1], PtrTy);
450 Value *NewOperand = IRB.CreateBitOrPointerCast(parameters[2], Ty);
452 Value *order_succ, *order_fail;
453 order_succ = IRB.CreateBitOrPointerCast(parameters[3], OrdTy);
454 order_fail = IRB.CreateBitOrPointerCast(parameters[4], OrdTy);
456 Value *args[] = {Addr, CmpOperand, NewOperand,
457 order_succ, order_fail, position};
458 Instruction* funcInst=CallInst::Create(CDSAtomicCAS_V2[Idx], args);
459 ReplaceInstWithInst(CI, funcInst);
468 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
469 // Peel off GEPs and BitCasts.
470 Addr = Addr->stripInBoundsOffsets();
472 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
473 if (GV->hasSection()) {
474 StringRef SectionName = GV->getSection();
475 // Check if the global is in the PGO counters section.
476 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
477 if (SectionName.endswith(
478 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
482 // Check if the global is private gcov data.
483 if (GV->getName().startswith("__llvm_gcov") ||
484 GV->getName().startswith("__llvm_gcda"))
488 // Do not instrument acesses from different address spaces; we cannot deal
491 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
492 if (PtrTy->getPointerAddressSpace() != 0)
499 bool CDSPass::addrPointsToConstantData(Value *Addr) {
500 // If this is a GEP, just analyze its pointer operand.
501 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
502 Addr = GEP->getPointerOperand();
504 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
505 if (GV->isConstant()) {
506 // Reads from constant globals can not race with any writes.
507 NumOmittedReadsFromConstantGlobals++;
510 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
511 if (isVtableAccess(L)) {
512 // Reads from a vtable pointer can not race with any writes.
513 NumOmittedReadsFromVtable++;
520 bool CDSPass::runOnFunction(Function &F) {
521 if (F.getName() == "main") {
522 F.setName("user_main");
523 errs() << "main replaced by user_main\n";
527 initializeCallbacks( *F.getParent() );
529 SmallVector<Instruction*, 8> AllLoadsAndStores;
530 SmallVector<Instruction*, 8> LocalLoadsAndStores;
531 SmallVector<Instruction*, 8> AtomicAccesses;
533 std::vector<Instruction *> worklist;
536 const DataLayout &DL = F.getParent()->getDataLayout();
538 errs() << "--- " << F.getName() << "---\n";
542 if ( (&I)->isAtomic() || isAtomicCall(&I) ) {
543 AtomicAccesses.push_back(&I);
544 } else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
545 LocalLoadsAndStores.push_back(&I);
546 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
547 // not implemented yet
551 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
554 for (auto Inst : AllLoadsAndStores) {
555 // Res |= instrumentLoadOrStore(Inst, DL);
556 // errs() << "load and store are replaced\n";
559 for (auto Inst : AtomicAccesses) {
560 Res |= instrumentAtomic(Inst, DL);
563 if (F.getName() == "user_main") {
572 void CDSPass::chooseInstructionsToInstrument(
573 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
574 const DataLayout &DL) {
575 SmallPtrSet<Value*, 8> WriteTargets;
576 // Iterate from the end.
577 for (Instruction *I : reverse(Local)) {
578 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
579 Value *Addr = Store->getPointerOperand();
580 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
582 WriteTargets.insert(Addr);
584 LoadInst *Load = cast<LoadInst>(I);
585 Value *Addr = Load->getPointerOperand();
586 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
588 if (WriteTargets.count(Addr)) {
589 // We will write to this temp, so no reason to analyze the read.
590 NumOmittedReadsBeforeWrite++;
593 if (addrPointsToConstantData(Addr)) {
594 // Addr points to some constant data -- it can not race with any writes.
598 Value *Addr = isa<StoreInst>(*I)
599 ? cast<StoreInst>(I)->getPointerOperand()
600 : cast<LoadInst>(I)->getPointerOperand();
601 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
602 !PointerMayBeCaptured(Addr, true, true)) {
603 // The variable is addressable but not captured, so it cannot be
604 // referenced from a different thread and participate in a data race
605 // (see llvm/Analysis/CaptureTracking.h for details).
606 NumOmittedNonCaptured++;
615 bool CDSPass::instrumentLoadOrStore(Instruction *I,
616 const DataLayout &DL) {
618 bool IsWrite = isa<StoreInst>(*I);
619 Value *Addr = IsWrite
620 ? cast<StoreInst>(I)->getPointerOperand()
621 : cast<LoadInst>(I)->getPointerOperand();
623 // swifterror memory addresses are mem2reg promoted by instruction selection.
624 // As such they cannot have regular uses like an instrumentation function and
625 // it makes no sense to track them as memory.
626 if (Addr->isSwiftError())
629 int Idx = getMemoryAccessFuncIndex(Addr, DL);
632 // not supported by CDS yet
633 /* if (IsWrite && isVtableAccess(I)) {
634 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
635 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
636 // StoredValue may be a vector type if we are storing several vptrs at once.
637 // In this case, just take the first element of the vector since this is
638 // enough to find vptr races.
639 if (isa<VectorType>(StoredValue->getType()))
640 StoredValue = IRB.CreateExtractElement(
641 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
642 if (StoredValue->getType()->isIntegerTy())
643 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
644 // Call TsanVptrUpdate.
645 IRB.CreateCall(TsanVptrUpdate,
646 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
647 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
648 NumInstrumentedVtableWrites++;
652 if (!IsWrite && isVtableAccess(I)) {
653 IRB.CreateCall(TsanVptrLoad,
654 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
655 NumInstrumentedVtableReads++;
660 Value *OnAccessFunc = nullptr;
661 OnAccessFunc = IsWrite ? CDSStore[Idx] : CDSLoad[Idx];
663 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
665 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
666 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
667 //errs() << "A load or store of type ";
668 //errs() << *ArgType;
669 //errs() << " is passed in\n";
670 return false; // if other types of load or stores are passed in
672 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
673 if (IsWrite) NumInstrumentedWrites++;
674 else NumInstrumentedReads++;
678 bool CDSPass::instrumentAtomic(Instruction * I, const DataLayout &DL) {
680 // LLVMContext &Ctx = IRB.getContext();
682 if (auto *CI = dyn_cast<CallInst>(I)) {
683 return instrumentAtomicCall(CI, DL);
686 Value *position = getPosition(I, IRB);
688 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
689 Value *Addr = LI->getPointerOperand();
690 int Idx=getMemoryAccessFuncIndex(Addr, DL);
691 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
692 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
693 Value *args[] = {Addr, order, position};
694 Instruction* funcInst=CallInst::Create(CDSAtomicLoad[Idx], args);
695 ReplaceInstWithInst(LI, funcInst);
696 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
697 Value *Addr = SI->getPointerOperand();
698 int Idx=getMemoryAccessFuncIndex(Addr, DL);
699 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
700 Value *val = SI->getValueOperand();
701 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
702 Value *args[] = {Addr, val, order, position};
703 Instruction* funcInst=CallInst::Create(CDSAtomicStore[Idx], args);
704 ReplaceInstWithInst(SI, funcInst);
705 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
706 Value *Addr = RMWI->getPointerOperand();
707 int Idx=getMemoryAccessFuncIndex(Addr, DL);
708 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
709 Value *val = RMWI->getValOperand();
710 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
711 Value *args[] = {Addr, val, order, position};
712 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][Idx], args);
713 ReplaceInstWithInst(RMWI, funcInst);
714 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
715 IRBuilder<> IRB(CASI);
717 Value *Addr = CASI->getPointerOperand();
718 int Idx=getMemoryAccessFuncIndex(Addr, DL);
720 const unsigned ByteSize = 1U << Idx;
721 const unsigned BitSize = ByteSize * 8;
722 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
723 Type *PtrTy = Ty->getPointerTo();
725 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
726 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
728 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
729 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
730 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
731 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
733 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
734 CmpOperand, NewOperand,
735 order_succ, order_fail, position};
737 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS_V1[Idx], Args);
738 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
740 Value *OldVal = funcInst;
741 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
742 if (Ty != OrigOldValTy) {
743 // The value is a pointer, so we need to cast the return value.
744 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
748 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
749 Res = IRB.CreateInsertValue(Res, Success, 1);
751 I->replaceAllUsesWith(Res);
752 I->eraseFromParent();
753 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
754 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
755 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
756 Value *Args[] = {order, position};
758 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
759 ReplaceInstWithInst(FI, funcInst);
760 // errs() << "Thread Fences replaced\n";
765 int CDSPass::getMemoryAccessFuncIndex(Value *Addr,
766 const DataLayout &DL) {
767 Type *OrigPtrTy = Addr->getType();
768 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
769 assert(OrigTy->isSized());
770 uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy);
771 if (TypeSize != 8 && TypeSize != 16 &&
772 TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
773 NumAccessesWithBadSize++;
774 // Ignore all unusual sizes.
777 size_t Idx = countTrailingZeros(TypeSize / 8);
778 assert(Idx < kNumberOfAccessSizes);
783 char CDSPass::ID = 0;
785 // Automatically enable the pass.
786 static void registerCDSPass(const PassManagerBuilder &,
787 legacy::PassManagerBase &PM) {
788 PM.add(new CDSPass());
790 static RegisterStandardPasses
791 RegisterMyPass(PassManagerBuilder::EP_OptimizerLast,