//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/CodeGen/AtomicExpandUtils.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
TLI = TM->getSubtargetImpl(F)->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
+ SmallVector<LoadInst*, 1> MonotonicLoadInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
- if (I->isAtomic())
+ // XXX-update: For relaxed loads, change them to acquire. This includes
+ // relaxed loads, relaxed atomic RMW & relaxed atomic compare exchange.
+ if (I->isAtomic()) {
+ switch (I->getOpcode()) {
+ case Instruction::AtomicCmpXchg: {
+ // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
+ // conditional branch that contains the value of the load anyway, so
+ // we don't need to do anything.
+ /*
+ auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
+ auto SuccOrdering = CmpXchg->getSuccessOrdering();
+ if (SuccOrdering == Monotonic) {
+ CmpXchg->setSuccessOrdering(Acquire);
+ } else if (SuccOrdering == Release) {
+ CmpXchg->setSuccessOrdering(AcquireRelease);
+ }
+ */
+ break;
+ }
+ case Instruction::AtomicRMW: {
+ // XXX-comment: Similar to AtomicCmpXchg. These instructions in
+ // AArch64 will be translated to a loop whose condition depends on the
+ // store status, which further depends on the load value.
+ /*
+ auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
+ if (RMW->getOrdering() == Monotonic) {
+ RMW->setOrdering(Acquire);
+ }
+ */
+ break;
+ }
+ case Instruction::Load: {
+ auto* LI = dyn_cast<LoadInst>(&*I);
+ if (LI->getOrdering() == Monotonic) {
+ /*
+ DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
+ << *LI << '\n');
+ LI->setOrdering(Acquire);
+ */
+ MonotonicLoadInsts.push_back(LI);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
AtomicInsts.push_back(&*I);
+ }
}
bool MadeChange = false;
if (TLI->getInsertFencesForAtomic()) {
if (LI && isAtLeastAcquire(LI->getOrdering())) {
FenceOrdering = LI->getOrdering();
- LI->setOrdering(Monotonic);
+// AddFakeConditionalBranch(
IsStore = false;
IsLoad = true;
} else if (SI && isAtLeastRelease(SI->getOrdering())) {
MadeChange |= expandAtomicCmpXchg(CASI);
}
}
+
return MadeChange;
}