Adds fake conditional branches right after relaxed loads if necessary
[oota-llvm.git] / lib / CodeGen / AtomicExpandPass.cpp
index b2134a7859afc1a0b3db484afafcd31661ddd57e..c8308afe9c1450d1fa2b7215250779e7be1ef703 100644 (file)
 //
 //===----------------------------------------------------------------------===//
 
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/CodeGen/AtomicExpandUtils.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
 
 using namespace llvm;
 
@@ -82,6 +89,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
   TLI = TM->getSubtargetImpl(F)->getTargetLowering();
 
   SmallVector<Instruction *, 1> AtomicInsts;
+  SmallVector<LoadInst*, 1> MonotonicLoadInsts;
 
   // Changing control-flow while iterating through it is a bad idea, so gather a
   // list of all atomic instructions before we start.
@@ -91,6 +99,10 @@ bool AtomicExpand::runOnFunction(Function &F) {
     if (I->isAtomic()) {
       switch (I->getOpcode()) {
         case Instruction::AtomicCmpXchg: {
+          // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
+          // conditional branch that contains the value of the load anyway, so
+          // we don't need to do anything.
+          /*
           auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
           auto SuccOrdering = CmpXchg->getSuccessOrdering();
           if (SuccOrdering == Monotonic) {
@@ -98,19 +110,30 @@ bool AtomicExpand::runOnFunction(Function &F) {
           } else if (SuccOrdering == Release) {
             CmpXchg->setSuccessOrdering(AcquireRelease);
           }
+          */
           break;
         }
         case Instruction::AtomicRMW: {
+          // XXX-comment: Similar to AtomicCmpXchg. These instructions in
+          // AArch64 will be translated to a loop whose condition depends on the
+          // store status, which further depends on the load value.
+          /*
           auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
           if (RMW->getOrdering() == Monotonic) {
             RMW->setOrdering(Acquire);
           }
+          */
           break;
         }
         case Instruction::Load: {
           auto* LI = dyn_cast<LoadInst>(&*I);
           if (LI->getOrdering() == Monotonic) {
+            /*
+            DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
+                         << *LI << '\n');
             LI->setOrdering(Acquire);
+            */
+            MonotonicLoadInsts.push_back(LI);
           }
           break;
         }
@@ -136,7 +159,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
     if (TLI->getInsertFencesForAtomic()) {
       if (LI && isAtLeastAcquire(LI->getOrdering())) {
         FenceOrdering = LI->getOrdering();
-        LI->setOrdering(Monotonic);
+//        AddFakeConditionalBranch(
         IsStore = false;
         IsLoad = true;
       } else if (SI && isAtLeastRelease(SI->getOrdering())) {
@@ -204,6 +227,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
       MadeChange |= expandAtomicCmpXchg(CASI);
     }
   }
+
   return MadeChange;
 }