2 //===-- CDSPass.cpp - xxx -------------------------------===//
4 //===-- CdsPass.cpp - xxx -------------------------------===//
5 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
7 // The LLVM Compiler Infrastructure
9 // This file is distributed under the University of Illinois Open Source
10 // License. See LICENSE.TXT for details.
12 //===----------------------------------------------------------------------===//
14 // This file is a modified version of ThreadSanitizer.cpp, a part of a race detector.
16 // The tool is under development, for the details about previous versions see
17 // http://code.google.com/p/data-race-test
19 // The instrumentation phase is quite simple:
20 // - Insert calls to run-time library before every memory access.
21 // - Optimizations may apply to avoid instrumenting some of the accesses.
22 // - Insert calls at function entry/exit.
23 // The rest is handled by the run-time library.
24 //===----------------------------------------------------------------------===//
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/CaptureTracking.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/LegacyPassManager.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/Pass.h"
41 #include "llvm/ProfileData/InstrProf.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Support/AtomicOrdering.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Transforms/Scalar.h"
46 #include "llvm/Transforms/Utils/Local.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
51 // #include "llvm/Support/MathExtras.h"
53 #define DEBUG_TYPE "CDS"
56 #define FUNCARRAYSIZE 4
58 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
59 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
60 // STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes");
61 // STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads");
63 STATISTIC(NumOmittedReadsBeforeWrite,
64 "Number of reads ignored due to following writes");
65 STATISTIC(NumOmittedReadsFromConstantGlobals,
66 "Number of reads from constant globals");
67 STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads");
68 STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing");
84 Constant * CDSLoad[FUNCARRAYSIZE];
85 Constant * CDSStore[FUNCARRAYSIZE];
86 Constant * CDSAtomicLoad[FUNCARRAYSIZE];
87 Constant * CDSAtomicStore[FUNCARRAYSIZE];
88 Constant * CDSAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][FUNCARRAYSIZE];
89 Constant * CDSAtomicCAS[FUNCARRAYSIZE];
90 Constant * CDSAtomicThreadFence;
94 Constant * CdsLoad[FUNCARRAYSIZE];
95 Constant * CdsStore[FUNCARRAYSIZE];
96 Constant * CdsAtomicLoad[FUNCARRAYSIZE];
97 Constant * CdsAtomicStore[FUNCARRAYSIZE];
98 Constant * CdsAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][FUNCARRAYSIZE];
99 Constant * CdsAtomicCAS[FUNCARRAYSIZE];
100 Constant * CdsAtomicThreadFence;
101 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
103 int getAtomicOrderIndex(AtomicOrdering order){
105 case AtomicOrdering::Monotonic:
106 return (int)AtomicOrderingCABI::relaxed;
107 // case AtomicOrdering::Consume: // not specified yet
108 // return AtomicOrderingCABI::consume;
109 case AtomicOrdering::Acquire:
110 return (int)AtomicOrderingCABI::acquire;
111 case AtomicOrdering::Release:
112 return (int)AtomicOrderingCABI::release;
113 case AtomicOrdering::AcquireRelease:
114 return (int)AtomicOrderingCABI::acq_rel;
115 case AtomicOrdering::SequentiallyConsistent:
116 return (int)AtomicOrderingCABI::seq_cst;
118 // unordered or Not Atomic
123 int getTypeSize(Type* type) {
124 if (type==Int32PtrTy) {
125 return sizeof(int)*8;
126 } else if (type==Int8PtrTy) {
127 return sizeof(char)*8;
128 } else if (type==Int16PtrTy) {
129 return sizeof(short)*8;
130 } else if (type==Int64PtrTy) {
131 return sizeof(long long int)*8;
133 return sizeof(void*)*8;
139 static int sizetoindex(int size) {
151 struct CDSPass : public FunctionPass {
153 CDSPass() : FunctionPass(ID) {}
155 struct CdsPass : public FunctionPass {
157 CdsPass() : FunctionPass(ID) {}
158 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
159 bool runOnFunction(Function &F) override;
162 void initializeCallbacks(Module &M);
163 bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL);
164 bool instrumentAtomic(Instruction *I);
165 void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local,
166 SmallVectorImpl<Instruction *> &All,
167 const DataLayout &DL);
168 bool addrPointsToConstantData(Value *Addr);
173 void CDSPass::initializeCallbacks(Module &M) {
175 void CdsPass::initializeCallbacks(Module &M) {
176 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
177 LLVMContext &Ctx = M.getContext();
179 Int8Ty = Type::getInt8Ty(Ctx);
180 Int16Ty = Type::getInt16Ty(Ctx);
181 Int32Ty = Type::getInt32Ty(Ctx);
182 Int64Ty = Type::getInt64Ty(Ctx);
183 OrdTy = Type::getInt32Ty(Ctx);
185 Int8PtrTy = Type::getInt8PtrTy(Ctx);
186 Int16PtrTy = Type::getInt16PtrTy(Ctx);
187 Int32PtrTy = Type::getInt32PtrTy(Ctx);
188 Int64PtrTy = Type::getInt64PtrTy(Ctx);
190 VoidTy = Type::getVoidTy(Ctx);
193 // Get the function to call from our untime library.
194 for (unsigned i = 0; i < FUNCARRAYSIZE; i++) {
195 const unsigned ByteSize = 1U << i;
196 const unsigned BitSize = ByteSize * 8;
197 // errs() << BitSize << "\n";
198 std::string ByteSizeStr = utostr(ByteSize);
199 std::string BitSizeStr = utostr(BitSize);
201 Type *Ty = Type::getIntNTy(Ctx, BitSize);
202 Type *PtrTy = Ty->getPointerTo();
204 // uint8_t cds_atomic_load8 (void * obj, int atomic_index)
205 // void cds_atomic_store8 (void * obj, int atomic_index, uint8_t val)
206 SmallString<32> LoadName("cds_load" + BitSizeStr);
207 SmallString<32> StoreName("cds_store" + BitSizeStr);
208 SmallString<32> AtomicLoadName("cds_atomic_load" + BitSizeStr);
209 SmallString<32> AtomicStoreName("cds_atomic_store" + BitSizeStr);
212 // CDSLoad[i] = M.getOrInsertFunction(LoadName, Ty, PtrTy);
213 // CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy, Ty);
214 CDSLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
215 CDSStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
216 CDSAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName, Ty, PtrTy, OrdTy);
217 CDSAtomicStore[i] = M.getOrInsertFunction(AtomicStoreName, VoidTy, PtrTy, OrdTy, Ty);
219 for (int op = AtomicRMWInst::FIRST_BINOP; op <= AtomicRMWInst::LAST_BINOP; ++op) {
220 CDSAtomicRMW[op][i] = nullptr;
222 // CdsLoad[i] = M.getOrInsertFunction(LoadName, Ty, PtrTy);
223 // CdsStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy, Ty);
224 CdsLoad[i] = M.getOrInsertFunction(LoadName, VoidTy, PtrTy);
225 CdsStore[i] = M.getOrInsertFunction(StoreName, VoidTy, PtrTy);
226 CdsAtomicLoad[i] = M.getOrInsertFunction(AtomicLoadName, Ty, PtrTy, OrdTy);
227 CdsAtomicStore[i] = M.getOrInsertFunction(AtomicStoreName, VoidTy, PtrTy, OrdTy, Ty);
229 for (int op = AtomicRMWInst::FIRST_BINOP; op <= AtomicRMWInst::LAST_BINOP; ++op) {
230 CdsAtomicRMW[op][i] = nullptr;
231 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
232 std::string NamePart;
234 if (op == AtomicRMWInst::Xchg)
235 NamePart = "_exchange";
236 else if (op == AtomicRMWInst::Add)
237 NamePart = "_fetch_add";
238 else if (op == AtomicRMWInst::Sub)
239 NamePart = "_fetch_sub";
240 else if (op == AtomicRMWInst::And)
241 NamePart = "_fetch_and";
242 else if (op == AtomicRMWInst::Or)
243 NamePart = "_fetch_or";
244 else if (op == AtomicRMWInst::Xor)
245 NamePart = "_fetch_xor";
249 SmallString<32> AtomicRMWName("cds_atomic" + NamePart + BitSizeStr);
251 CDSAtomicRMW[op][i] = M.getOrInsertFunction(AtomicRMWName, Ty, PtrTy, OrdTy, Ty);
253 CdsAtomicRMW[op][i] = M.getOrInsertFunction(AtomicRMWName, Ty, PtrTy, OrdTy, Ty);
254 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
257 // only supportes strong version
258 SmallString<32> AtomicCASName("cds_atomic_compare_exchange" + BitSizeStr);
260 CDSAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy);
263 CDSAtomicThreadFence = M.getOrInsertFunction("cds_atomic_thread_fence", VoidTy, OrdTy);
265 CdsAtomicCAS[i] = M.getOrInsertFunction(AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, OrdTy);
268 CdsAtomicThreadFence = M.getOrInsertFunction("cds_atomic_thread_fence", VoidTy, OrdTy);
269 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
272 static bool isVtableAccess(Instruction *I) {
273 if (MDNode *Tag = I->getMetadata(LLVMContext::MD_tbaa))
274 return Tag->isTBAAVtableAccess();
278 static bool shouldInstrumentReadWriteFromAddress(const Module *M, Value *Addr) {
279 // Peel off GEPs and BitCasts.
280 Addr = Addr->stripInBoundsOffsets();
282 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
283 if (GV->hasSection()) {
284 StringRef SectionName = GV->getSection();
285 // Check if the global is in the PGO counters section.
286 auto OF = Triple(M->getTargetTriple()).getObjectFormat();
287 if (SectionName.endswith(
288 getInstrProfSectionName(IPSK_cnts, OF, /*AddSegmentInfo=*/false)))
292 // Check if the global is private gcov data.
293 if (GV->getName().startswith("__llvm_gcov") ||
294 GV->getName().startswith("__llvm_gcda"))
298 // Do not instrument acesses from different address spaces; we cannot deal
301 Type *PtrTy = cast<PointerType>(Addr->getType()->getScalarType());
302 if (PtrTy->getPointerAddressSpace() != 0)
310 bool CDSPass::addrPointsToConstantData(Value *Addr) {
312 bool CdsPass::addrPointsToConstantData(Value *Addr) {
313 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
314 // If this is a GEP, just analyze its pointer operand.
315 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr))
316 Addr = GEP->getPointerOperand();
318 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
319 if (GV->isConstant()) {
320 // Reads from constant globals can not race with any writes.
321 NumOmittedReadsFromConstantGlobals++;
324 } else if (LoadInst *L = dyn_cast<LoadInst>(Addr)) {
325 if (isVtableAccess(L)) {
326 // Reads from a vtable pointer can not race with any writes.
327 NumOmittedReadsFromVtable++;
335 bool CDSPass::runOnFunction(Function &F) {
336 if (F.getName() == "main") {
337 F.setName("user_main");
338 errs() << "main replaced by user_main\n";
340 initializeCallbacks( *F.getParent() );
342 SmallVector<Instruction*, 8> AllLoadsAndStores;
343 SmallVector<Instruction*, 8> LocalLoadsAndStores;
344 SmallVector<Instruction*, 8> AtomicAccesses;
346 std::vector<Instruction *> worklist;
349 const DataLayout &DL = F.getParent()->getDataLayout();
351 errs() << "Before\n";
356 if ( (&I)->isAtomic() ) {
357 AtomicAccesses.push_back(&I);
358 } else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
359 LocalLoadsAndStores.push_back(&I);
360 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
361 // not implemented yet
364 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
367 for (auto Inst : AllLoadsAndStores) {
368 // Res |= instrumentLoadOrStore(Inst, DL);
369 // errs() << "load and store are not replaced\n";
372 for (auto Inst : AtomicAccesses) {
373 Res |= instrumentAtomic(Inst);
377 errs() << F.getName();
378 errs() << " has above instructions replaced\n";
381 // errs() << "After\n";
384 bool CdsPass::runOnFunction(Function &F) {
385 if (F.getName() == "main")
386 F.setName("user_main");
388 initializeCallbacks( *F.getParent() );
390 SmallVector<Instruction*, 8> AllLoadsAndStores;
391 SmallVector<Instruction*, 8> LocalLoadsAndStores;
392 SmallVector<Instruction*, 8> AtomicAccesses;
394 std::vector<Instruction *> worklist;
397 const DataLayout &DL = F.getParent()->getDataLayout();
399 errs() << "Before\n";
404 if ( (&I)->isAtomic() ) {
405 AtomicAccesses.push_back(&I);
406 } else if (isa<LoadInst>(I) || isa<StoreInst>(I)) {
407 LocalLoadsAndStores.push_back(&I);
410 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, DL);
413 for (auto Inst : AllLoadsAndStores) {
414 Res |= instrumentLoadOrStore(Inst, DL);
417 for (auto Inst : AtomicAccesses) {
418 Res |= instrumentAtomic(Inst);
423 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
429 void CDSPass::chooseInstructionsToInstrument(
431 void CdsPass::chooseInstructionsToInstrument(
432 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
433 SmallVectorImpl<Instruction *> &Local, SmallVectorImpl<Instruction *> &All,
434 const DataLayout &DL) {
435 SmallPtrSet<Value*, 8> WriteTargets;
436 // Iterate from the end.
437 for (Instruction *I : reverse(Local)) {
438 if (StoreInst *Store = dyn_cast<StoreInst>(I)) {
439 Value *Addr = Store->getPointerOperand();
440 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
442 WriteTargets.insert(Addr);
444 LoadInst *Load = cast<LoadInst>(I);
445 Value *Addr = Load->getPointerOperand();
446 if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr))
448 if (WriteTargets.count(Addr)) {
449 // We will write to this temp, so no reason to analyze the read.
450 NumOmittedReadsBeforeWrite++;
453 if (addrPointsToConstantData(Addr)) {
454 // Addr points to some constant data -- it can not race with any writes.
458 Value *Addr = isa<StoreInst>(*I)
459 ? cast<StoreInst>(I)->getPointerOperand()
460 : cast<LoadInst>(I)->getPointerOperand();
461 if (isa<AllocaInst>(GetUnderlyingObject(Addr, DL)) &&
462 !PointerMayBeCaptured(Addr, true, true)) {
463 // The variable is addressable but not captured, so it cannot be
464 // referenced from a different thread and participate in a data race
465 // (see llvm/Analysis/CaptureTracking.h for details).
466 NumOmittedNonCaptured++;
476 bool CDSPass::instrumentLoadOrStore(Instruction *I,
478 bool CdsPass::instrumentLoadOrStore(Instruction *I,
479 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
480 const DataLayout &DL) {
482 bool IsWrite = isa<StoreInst>(*I);
483 Value *Addr = IsWrite
484 ? cast<StoreInst>(I)->getPointerOperand()
485 : cast<LoadInst>(I)->getPointerOperand();
487 // swifterror memory addresses are mem2reg promoted by instruction selection.
488 // As such they cannot have regular uses like an instrumentation function and
489 // it makes no sense to track them as memory.
490 if (Addr->isSwiftError())
493 int size = getTypeSize(Addr->getType());
494 int index = sizetoindex(size);
497 // not supported by CDS yet
499 // not supported by Cds yet
500 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
501 /* if (IsWrite && isVtableAccess(I)) {
502 LLVM_DEBUG(dbgs() << " VPTR : " << *I << "\n");
503 Value *StoredValue = cast<StoreInst>(I)->getValueOperand();
504 // StoredValue may be a vector type if we are storing several vptrs at once.
505 // In this case, just take the first element of the vector since this is
506 // enough to find vptr races.
507 if (isa<VectorType>(StoredValue->getType()))
508 StoredValue = IRB.CreateExtractElement(
509 StoredValue, ConstantInt::get(IRB.getInt32Ty(), 0));
510 if (StoredValue->getType()->isIntegerTy())
511 StoredValue = IRB.CreateIntToPtr(StoredValue, IRB.getInt8PtrTy());
512 // Call TsanVptrUpdate.
513 IRB.CreateCall(TsanVptrUpdate,
514 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
515 IRB.CreatePointerCast(StoredValue, IRB.getInt8PtrTy())});
516 NumInstrumentedVtableWrites++;
520 if (!IsWrite && isVtableAccess(I)) {
521 IRB.CreateCall(TsanVptrLoad,
522 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()));
523 NumInstrumentedVtableReads++;
528 Value *OnAccessFunc = nullptr;
530 OnAccessFunc = IsWrite ? CDSStore[index] : CDSLoad[index];
532 Type *ArgType = IRB.CreatePointerCast(Addr, Addr->getType())->getType();
534 if ( ArgType != Int8PtrTy && ArgType != Int16PtrTy &&
535 ArgType != Int32PtrTy && ArgType != Int64PtrTy ) {
536 //errs() << "A load or store of type ";
537 //errs() << *ArgType;
538 //errs() << " is passed in\n";
539 return false; // if other types of load or stores are passed in
542 OnAccessFunc = IsWrite ? CdsStore[index] : CdsLoad[index];
544 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
545 IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, Addr->getType()));
546 if (IsWrite) NumInstrumentedWrites++;
547 else NumInstrumentedReads++;
553 bool CDSPass::instrumentAtomic(Instruction * I) {
555 bool CdsPass::instrumentAtomic(Instruction * I) {
556 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
558 // LLVMContext &Ctx = IRB.getContext();
560 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
561 int atomic_order_index = getAtomicOrderIndex(SI->getOrdering());
563 Value *val = SI->getValueOperand();
564 Value *ptr = SI->getPointerOperand();
565 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
566 Value *args[] = {ptr, order, val};
568 int size=getTypeSize(ptr->getType());
569 int index=sizetoindex(size);
572 Instruction* funcInst=CallInst::Create(CDSAtomicStore[index], args,"");
574 Instruction* funcInst=CallInst::Create(CdsAtomicStore[index], args,"");
575 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
576 ReplaceInstWithInst(SI, funcInst);
577 errs() << "Store replaced\n";
578 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
579 int atomic_order_index = getAtomicOrderIndex(LI->getOrdering());
581 Value *ptr = LI->getPointerOperand();
582 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
583 Value *args[] = {ptr, order};
585 int size=getTypeSize(ptr->getType());
586 int index=sizetoindex(size);
589 Instruction* funcInst=CallInst::Create(CDSAtomicLoad[index], args, "");
591 Instruction* funcInst=CallInst::Create(CdsAtomicLoad[index], args, "");
592 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
593 ReplaceInstWithInst(LI, funcInst);
594 errs() << "Load Replaced\n";
595 } else if (AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) {
596 int atomic_order_index = getAtomicOrderIndex(RMWI->getOrdering());
598 Value *val = RMWI->getValOperand();
599 Value *ptr = RMWI->getPointerOperand();
600 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
601 Value *args[] = {ptr, order, val};
603 int size = getTypeSize(ptr->getType());
604 int index = sizetoindex(size);
607 Instruction* funcInst = CallInst::Create(CDSAtomicRMW[RMWI->getOperation()][index], args, "");
609 Instruction* funcInst = CallInst::Create(CdsAtomicRMW[RMWI->getOperation()][index], args, "");
610 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
611 ReplaceInstWithInst(RMWI, funcInst);
612 errs() << RMWI->getOperationName(RMWI->getOperation());
613 errs() << " replaced\n";
614 } else if (AtomicCmpXchgInst *CASI = dyn_cast<AtomicCmpXchgInst>(I)) {
615 IRBuilder<> IRB(CASI);
617 Value *Addr = CASI->getPointerOperand();
619 int size = getTypeSize(Addr->getType());
620 int index = sizetoindex(size);
621 const unsigned ByteSize = 1U << index;
622 const unsigned BitSize = ByteSize * 8;
623 Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize);
624 Type *PtrTy = Ty->getPointerTo();
626 Value *CmpOperand = IRB.CreateBitOrPointerCast(CASI->getCompareOperand(), Ty);
627 Value *NewOperand = IRB.CreateBitOrPointerCast(CASI->getNewValOperand(), Ty);
629 int atomic_order_index_succ = getAtomicOrderIndex(CASI->getSuccessOrdering());
630 int atomic_order_index_fail = getAtomicOrderIndex(CASI->getFailureOrdering());
631 Value *order_succ = ConstantInt::get(OrdTy, atomic_order_index_succ);
632 Value *order_fail = ConstantInt::get(OrdTy, atomic_order_index_fail);
634 Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy),
635 CmpOperand, NewOperand,
636 order_succ, order_fail};
639 CallInst *funcInst = IRB.CreateCall(CDSAtomicCAS[index], Args);
641 CallInst *funcInst = IRB.CreateCall(CdsAtomicCAS[index], Args);
642 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
643 Value *Success = IRB.CreateICmpEQ(funcInst, CmpOperand);
645 Value *OldVal = funcInst;
646 Type *OrigOldValTy = CASI->getNewValOperand()->getType();
647 if (Ty != OrigOldValTy) {
648 // The value is a pointer, so we need to cast the return value.
649 OldVal = IRB.CreateIntToPtr(funcInst, OrigOldValTy);
653 IRB.CreateInsertValue(UndefValue::get(CASI->getType()), OldVal, 0);
654 Res = IRB.CreateInsertValue(Res, Success, 1);
656 I->replaceAllUsesWith(Res);
657 I->eraseFromParent();
658 } else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
659 int atomic_order_index = getAtomicOrderIndex(FI->getOrdering());
660 Value *order = ConstantInt::get(OrdTy, atomic_order_index);
661 Value *Args[] = {order};
664 CallInst *funcInst = CallInst::Create(CDSAtomicThreadFence, Args);
666 CallInst *funcInst = CallInst::Create(CdsAtomicThreadFence, Args);
667 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec
668 ReplaceInstWithInst(FI, funcInst);
669 errs() << "Thread Fences replaced\n";
677 char CDSPass::ID = 0;
679 // Automatically enable the pass.
680 // http://adriansampson.net/blog/clangpass.html
681 static void registerCDSPass(const PassManagerBuilder &,
682 legacy::PassManagerBase &PM) {
683 PM.add(new CDSPass());
685 static RegisterStandardPasses
686 RegisterMyPass(PassManagerBuilder::EP_EarlyAsPossible,
689 char CdsPass::ID = 0;
691 // Automatically enable the pass.
692 // http://adriansampson.net/blog/clangpass.html
693 static void registerCdsPass(const PassManagerBuilder &,
694 legacy::PassManagerBase &PM) {
695 PM.add(new CdsPass());
697 static RegisterStandardPasses
698 RegisterMyPass(PassManagerBuilder::EP_EarlyAsPossible,
700 >>>>>>> 0d737ead79278a1a67c5829f9c6bf84ee6a90cec