//===----------------------------------------------------------------------===//
//
// This file contains a pass (at IR level) to replace atomic instructions with
-// appropriate (intrinsic-based) ldrex/strex loops.
+// target specific instruction which implement the same semantics in a way
+// which better fits the target backend. This can include the use of either
+// (intrinsic-based) load-linked/store-conditional loops, AtomicCmpXchg, or
+// type coercions.
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/CodeGen/AtomicExpandUtils.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/NoFolder.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
namespace {
class AtomicExpand: public FunctionPass {
const TargetMachine *TM;
+ const TargetLowering *TLI;
public:
static char ID; // Pass identification, replacement for typeid
explicit AtomicExpand(const TargetMachine *TM = nullptr)
- : FunctionPass(ID), TM(TM) {
+ : FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
- bool expandAtomicInsts(Function &F);
- bool expandAtomicLoad(LoadInst *LI);
- bool expandAtomicStore(StoreInst *LI);
- bool expandAtomicRMW(AtomicRMWInst *AI);
+ private:
+ bool bracketInstWithFences(Instruction *I, AtomicOrdering Order,
+ bool IsStore, bool IsLoad);
+ IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
+ LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
+ bool tryExpandAtomicLoad(LoadInst *LI);
+ bool expandAtomicLoadToLL(LoadInst *LI);
+ bool expandAtomicLoadToCmpXchg(LoadInst *LI);
+ StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
+ bool expandAtomicStore(StoreInst *SI);
+ bool tryExpandAtomicRMW(AtomicRMWInst *AI);
+ bool expandAtomicOpToLLSC(
+ Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
+ std::function<Value *(IRBuilder<> &, Value *)> PerformOp);
bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
-
- AtomicOrdering insertLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
- void insertTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord);
+ bool isIdempotentRMW(AtomicRMWInst *AI);
+ bool simplifyIdempotentRMW(AtomicRMWInst *AI);
};
}
return new AtomicExpand(TM);
}
+
+namespace {
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal);
+Value* GetUntaintedAddress(Value* CurrentAddress);
+
+// The depth we trace down a variable to look for its dependence set.
+const unsigned kDependenceDepth = 4;
+
+// Recursively looks for variables that 'Val' depends on at the given depth
+// 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only
+// inserts the leaf node values; otherwise, all visited nodes are included in
+// 'DepSet'. Note that constants will be ignored.
+template <typename SetType>
+void recursivelyFindDependence(SetType* DepSet, Value* Val,
+ bool InsertOnlyLeafNodes = false,
+ unsigned Depth = kDependenceDepth) {
+ if (Val == nullptr) {
+ return;
+ }
+ if (!InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ if (Depth == 0) {
+ // Cannot go deeper. Insert the leaf nodes.
+ if (InsertOnlyLeafNodes && !isa<Constant>(Val)) {
+ DepSet->insert(Val);
+ }
+ return;
+ }
+
+ // Go one step further to explore the dependence of the operands.
+ Instruction* I = nullptr;
+ if ((I = dyn_cast<Instruction>(Val))) {
+ if (isa<LoadInst>(I)) {
+ // A load is considerd the leaf load of the dependence tree. Done.
+ DepSet->insert(Val);
+ return;
+ } else if (I->isBinaryOp()) {
+ BinaryOperator* I = dyn_cast<BinaryOperator>(Val);
+ Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ } else if (I->isCast()) {
+ Value* Op0 = I->getOperand(0);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ } else if (I->getOpcode() == Instruction::Select) {
+ Value* Op0 = I->getOperand(0);
+ Value* Op1 = I->getOperand(1);
+ Value* Op2 = I->getOperand(2);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ recursivelyFindDependence(DepSet, Op2, Depth - 1);
+ } else if (I->getOpcode() == Instruction::GetElementPtr) {
+ for (unsigned i = 0; i < I->getNumOperands(); i++) {
+ recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1);
+ }
+ } else if (I->getOpcode() == Instruction::Store) {
+ auto* SI = dyn_cast<StoreInst>(Val);
+ recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1);
+ recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1);
+ } else {
+ Value* Op0 = nullptr;
+ Value* Op1 = nullptr;
+ switch (I->getOpcode()) {
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ Op0 = I->getOperand(0);
+ Op1 = I->getOperand(1);
+ recursivelyFindDependence(DepSet, Op0, Depth - 1);
+ recursivelyFindDependence(DepSet, Op1, Depth - 1);
+ break;
+ }
+ default: {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+ }
+ }
+ } else if (isa<Constant>(Val)) {
+ // Not interested in constant values. Done.
+ return;
+ } else {
+ // Be conservative. Add it and be done with it.
+ DepSet->insert(Val);
+ return;
+ }
+}
+
+// Helper function to create a Cast instruction.
+Value* createCast(IRBuilder<true, NoFolder>& Builder, Value* DepVal,
+ Type* TargetIntegerType) {
+ Instruction::CastOps CastOp = Instruction::BitCast;
+ switch (DepVal->getType()->getTypeID()) {
+ case Type::IntegerTyID: {
+ CastOp = Instruction::SExt;
+ break;
+ }
+ case Type::FloatTyID:
+ case Type::DoubleTyID: {
+ CastOp = Instruction::FPToSI;
+ break;
+ }
+ case Type::PointerTyID: {
+ CastOp = Instruction::PtrToInt;
+ break;
+ }
+ default: { break; }
+ }
+
+ return Builder.CreateCast(CastOp, DepVal, TargetIntegerType);
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that ORs the "dependence value" with the "original address".
+// Otherwise, returns nullptr. This instruction is the first OR instruction
+// where one of its operand is an AND instruction with an operand being 0.
+//
+// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getOrAddress(Value* CurrentAddress) {
+ // Is it a cast from integer to pointer type.
+ Instruction* OrAddress = nullptr;
+ Instruction* AndDep = nullptr;
+ Instruction* CastToInt = nullptr;
+ Value* ActualAddress = nullptr;
+ Constant* ZeroConst = nullptr;
+
+ const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
+ if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) {
+ // Is it an OR instruction: %1 = or %and, %actualAddress.
+ if ((OrAddress = dyn_cast<Instruction>(CastToPtr->getOperand(0))) &&
+ OrAddress->getOpcode() == Instruction::Or) {
+ // The first operand should be and AND instruction.
+ AndDep = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ if (AndDep && AndDep->getOpcode() == Instruction::And) {
+ // Also make sure its first operand of the "AND" is 0, or the "AND" is
+ // marked explicitly by "NoInstCombine".
+ if ((ZeroConst = dyn_cast<Constant>(AndDep->getOperand(1))) &&
+ ZeroConst->isNullValue()) {
+ return OrAddress;
+ }
+ }
+ }
+ }
+ // Looks like it's not been tainted.
+ return nullptr;
+}
+
+// Given a value, if it's a tainted address, this function returns the
+// instruction that taints the "dependence value". Otherwise, returns nullptr.
+// This instruction is the last AND instruction where one of its operand is 0.
+// E.g., it returns '%3' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Instruction* getAndDependence(Value* CurrentAddress) {
+ // If 'CurrentAddress' is tainted, get the OR instruction.
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ return nullptr;
+ }
+
+ // No need to check the operands.
+ auto* AndDepInst = dyn_cast<Instruction>(OrAddress->getOperand(0));
+ assert(AndDepInst);
+ return AndDepInst;
+}
+
+// Given a value, if it's a tainted address, this function returns
+// the "dependence value", which is the first operand in the AND instruction.
+// E.g., it returns '%1' given 'CurrentAddress' is '%5'.
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+Value* getDependence(Value* CurrentAddress) {
+ auto* AndInst = getAndDependence(CurrentAddress);
+ if (AndInst == nullptr) {
+ return nullptr;
+ }
+ return AndInst->getOperand(0);
+}
+
+// Given an address that has been tainted, returns the only condition it depends
+// on, if any; otherwise, returns nullptr.
+Value* getConditionDependence(Value* Address) {
+ auto* Dep = getDependence(Address);
+ if (Dep == nullptr) {
+ // 'Address' has not been dependence-tainted.
+ return nullptr;
+ }
+
+ Value* Operand = Dep;
+ while (true) {
+ auto* Inst = dyn_cast<Instruction>(Operand);
+ if (Inst == nullptr) {
+ // Non-instruction type does not have condition dependence.
+ return nullptr;
+ }
+ if (Inst->getOpcode() == Instruction::ICmp) {
+ return Inst;
+ } else {
+ if (Inst->getNumOperands() != 1) {
+ return nullptr;
+ } else {
+ Operand = Inst->getOperand(0);
+ }
+ }
+ }
+}
+
+// Conservatively decides whether the dependence set of 'Val1' includes the
+// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
+// 'Val2' and use that single value as its dependence set.
+// If it returns true, it means the dependence set of 'Val1' includes that of
+// 'Val2'; otherwise, it only means we cannot conclusively decide it.
+bool dependenceSetInclusion(Value* Val1, Value* Val2,
+ int Val1ExpandLevel = 2 * kDependenceDepth,
+ int Val2ExpandLevel = kDependenceDepth) {
+ typedef SmallSet<Value*, 8> IncludingSet;
+ typedef SmallSet<Value*, 4> IncludedSet;
+
+ IncludingSet DepSet1;
+ IncludedSet DepSet2;
+ // Look for more depths for the including set.
+ recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/,
+ Val1ExpandLevel);
+ recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/,
+ Val2ExpandLevel);
+
+ auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) {
+ for (auto* Dep : Subset) {
+ if (0 == FullSet.count(Dep)) {
+ return false;
+ }
+ }
+ return true;
+ };
+ bool inclusion = set_inclusion(DepSet1, DepSet2);
+ DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n");
+ DEBUG(dbgs() << "Including set for: " << *Val1 << "\n");
+ DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; });
+ DEBUG(dbgs() << "Included set for: " << *Val2 << "\n");
+ DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; });
+
+ return inclusion;
+}
+
+// Recursively iterates through the operands spawned from 'DepVal'. If there
+// exists a single value that 'DepVal' only depends on, we call that value the
+// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
+Value* getRootDependence(Value* DepVal) {
+ SmallSet<Value*, 8> DepSet;
+ for (unsigned depth = kDependenceDepth; depth > 0; --depth) {
+ recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/,
+ depth);
+ if (DepSet.size() == 1) {
+ return *DepSet.begin();
+ }
+ DepSet.clear();
+ }
+ return DepVal;
+}
+
+// This function actually taints 'DepVal' to the address to 'SI'. If the
+// address
+// of 'SI' already depends on whatever 'DepVal' depends on, this function
+// doesn't do anything and returns false. Otherwise, returns true.
+//
+// This effect forces the store and any stores that comes later to depend on
+// 'DepVal'. For example, we have a condition "cond", and a store instruction
+// "s: STORE addr, val". If we want "s" (and any later store) to depend on
+// "cond", we do the following:
+// %conv = sext i1 %cond to i32
+// %addrVal = ptrtoint i32* %addr to i32
+// %andCond = and i32 conv, 0;
+// %orAddr = or i32 %andCond, %addrVal;
+// %NewAddr = inttoptr i32 %orAddr to i32*;
+//
+// This is a more concrete example:
+// ------
+// %0 = load i32, i32* @y, align 4, !tbaa !1
+// %cmp = icmp ne i32 %0, 42 // <== this is like the condition
+// %1 = sext i1 %cmp to i32
+// %2 = ptrtoint i32* @x to i32
+// %3 = and i32 %1, 0
+// %4 = or i32 %3, %2
+// %5 = inttoptr i32 %4 to i32*
+// store i32 1, i32* %5, align 4
+bool taintStoreAddress(StoreInst* SI, Value* DepVal,
+ const char* calling_func = __builtin_FUNCTION()) {
+ DEBUG(dbgs() << "Called from " << calling_func << '\n');
+ IRBuilder<true, NoFolder> Builder(SI);
+ BasicBlock* BB = SI->getParent();
+ Value* Address = SI->getPointerOperand();
+ Type* TargetIntegerType =
+ IntegerType::get(Address->getContext(),
+ BB->getModule()->getDataLayout().getPointerSizeInBits());
+
+ // Does SI's address already depends on whatever 'DepVal' depends on?
+ if (StoreAddressDependOnValue(SI, DepVal)) {
+ return false;
+ }
+
+ // Figure out if there's a root variable 'DepVal' depends on. For example, we
+ // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123"
+ // to be "%struct* %0" since all other operands are constant.
+ DepVal = getRootDependence(DepVal);
+
+ // Is this already a dependence-tainted store?
+ Value* OldDep = getDependence(Address);
+ if (OldDep) {
+ // The address of 'SI' has already been tainted. Just need to absorb the
+ // DepVal to the existing dependence in the address of SI.
+ Instruction* AndDep = getAndDependence(Address);
+ IRBuilder<true, NoFolder> Builder(AndDep);
+ Value* NewDep = nullptr;
+ if (DepVal->getType() == AndDep->getType()) {
+ NewDep = Builder.CreateAnd(OldDep, DepVal);
+ } else {
+ NewDep = Builder.CreateAnd(
+ OldDep, createCast(Builder, DepVal, TargetIntegerType));
+ }
+
+ auto* NewDepInst = dyn_cast<Instruction>(NewDep);
+
+ // Use the new AND instruction as the dependence
+ AndDep->setOperand(0, NewDep);
+ return true;
+ }
+
+ // SI's address has not been tainted. Now taint it with 'DepVal'.
+ Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType);
+ Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
+ Value* AndDepVal =
+ Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
+ auto AndInst = dyn_cast<Instruction>(AndDepVal);
+ // XXX-comment: The original IR InstCombiner would change our and instruction
+ // to a select and then the back end optimize the condition out. We attach a
+ // flag to instructions and set it here to inform the InstCombiner to not to
+ // touch this and instruction at all.
+ Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
+ Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
+
+ DEBUG(dbgs() << "[taintStoreAddress]\n"
+ << "Original store: " << *SI << '\n');
+ SI->setOperand(1, NewAddr);
+
+ // Debug output.
+ DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
+ << "\tCast dependence value to integer: " << *CastDepToInt
+ << '\n'
+ << "\tCast address to integer: " << *PtrToIntCast << '\n'
+ << "\tAnd dependence value: " << *AndDepVal << '\n'
+ << "\tOr address: " << *OrAddr << '\n'
+ << "\tCast or instruction to address: " << *NewAddr << "\n\n");
+
+ return true;
+}
+
+// Looks for the previous store in the if block --- 'BrBB', which makes the
+// speculative store 'StoreToHoist' safe.
+Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) {
+ assert(StoreToHoist && "StoreToHoist must be a real store");
+
+ Value* StorePtr = StoreToHoist->getPointerOperand();
+
+ // Look for a store to the same pointer in BrBB.
+ for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
+ RI != RE; ++RI) {
+ Instruction* CurI = &*RI;
+
+ StoreInst* SI = dyn_cast<StoreInst>(CurI);
+ // Found the previous store make sure it stores to the same location.
+ // XXX-update: If the previous store's original untainted address are the
+ // same as 'StorePtr', we are also good to hoist the store.
+ if (SI && (SI->getPointerOperand() == StorePtr ||
+ GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) {
+ // Found the previous store, return its value operand.
+ return SI;
+ }
+ }
+
+ assert(false &&
+ "We should not reach here since this store is safe to speculate");
+}
+
+// XXX-comment: Returns true if it changes the code, false otherwise (the branch
+// condition already depends on 'DepVal'.
+bool taintConditionalBranch(BranchInst* BI, Value* DepVal) {
+ assert(BI->isConditional());
+ auto* Cond = BI->getOperand(0);
+ if (dependenceSetInclusion(Cond, DepVal)) {
+ // The dependence/ordering is self-evident.
+ return false;
+ }
+
+ IRBuilder<true, NoFolder> Builder(BI);
+ auto* AndDep =
+ Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0));
+ auto* TruncAndDep =
+ Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1));
+ auto* OrCond = Builder.CreateOr(TruncAndDep, Cond);
+ BI->setOperand(0, OrCond);
+ return true;
+}
+
+// XXX-update: For a relaxed load 'LI', find the first immediate atomic store or
+// the first conditional branch. Returns itself if 'LI' can be left as is;
+// returns nullptr if there's no such immediately following store/branch
+// instructions, which we can only enforce the load with 'acquire'.
+Instruction* findFirstStoreCondBranchInst(LoadInst* LI) {
+ // In some situations, relaxed loads can be left as is:
+ // 1. The relaxed load is used to calculate the address of the immediate
+ // following store;
+ // 2. The relaxed load is used as a condition in the immediate following
+ // condition, and there are no stores in between. This is actually quite
+ // common. E.g.,
+ // int r1 = x.load(relaxed);
+ // if (r1 != 0) {
+ // y.store(1, relaxed);
+ // }
+
+ auto* BB = LI->getParent();
+ auto BE = BB->end();
+ auto BBI = BasicBlock::iterator(LI);
+ BBI++;
+ while (true) {
+ for (; BBI != BE; BBI++) {
+ auto* Inst = dyn_cast<Instruction>(&*BBI);
+ if (Inst == nullptr) {
+ continue;
+ }
+ if (Inst->getOpcode() == Instruction::Store) {
+ return Inst;
+ } else if (Inst->getOpcode() == Instruction::Br) {
+ auto* BrInst = dyn_cast<BranchInst>(Inst);
+ if (BrInst->isConditional()) {
+ return Inst;
+ } else {
+ // Reinitialize iterators with the destination of the unconditional
+ // branch.
+ BB = BrInst->getSuccessor(0);
+ BBI = BB->begin();
+ BE = BB->end();
+ break;
+ }
+ }
+ }
+ if (BBI == BE) {
+ return LI;
+ }
+ }
+}
+
+void taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
+ for (auto* LI : MonotonicLoadInsts) {
+ auto* FirstInst = findFirstStoreCondBranchInst(LI);
+ if (FirstInst == nullptr) {
+ // No need to worry about the relaxed load.
+ continue;
+ }
+ if (FirstInst == LI) {
+ // We don't seem to be able to taint a following store/conditional branch
+ // instruction. Simply make it acquire.
+ LI->setOrdering(Acquire);
+ continue;
+ }
+ // Taint 'FirstInst', which could be a store or a condition branch
+ // instruction.
+ if (FirstInst->getOpcode() == Instruction::Store) {
+ taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
+ } else if (FirstInst->getOpcode() == Instruction::Br) {
+ taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
+ } else {
+ assert(false && "findFirstStoreCondBranchInst() should return a "
+ "store/condition branch instruction");
+ }
+ }
+}
+
+/**** Implementations of public methods for dependence tainting ****/
+Value* GetUntaintedAddress(Value* CurrentAddress) {
+ auto* OrAddress = getOrAddress(CurrentAddress);
+ if (OrAddress == nullptr) {
+ // Is it tainted by a select instruction?
+ auto* Inst = dyn_cast<Instruction>(CurrentAddress);
+ if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) {
+ // A selection instruction.
+ if (Inst->getOperand(1) == Inst->getOperand(2)) {
+ return Inst->getOperand(1);
+ }
+ }
+
+ return CurrentAddress;
+ }
+ Value* ActualAddress = nullptr;
+
+ auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
+ if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
+ return CastToInt->getOperand(0);
+ } else {
+ // This should be a IntToPtr constant expression.
+ ConstantExpr* PtrToIntExpr =
+ dyn_cast<ConstantExpr>(OrAddress->getOperand(1));
+ if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) {
+ return PtrToIntExpr->getOperand(0);
+ }
+ }
+
+ // Looks like it's not been dependence-tainted. Returns itself.
+ return CurrentAddress;
+}
+
+MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) {
+ AAMDNodes AATags;
+ SI->getAAMetadata(AATags);
+ const auto& DL = SI->getModule()->getDataLayout();
+ const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand());
+ DEBUG(if (OriginalAddr != SI->getPointerOperand()) {
+ dbgs() << "[GetUntaintedMemoryLocation]\n"
+ << "Storing address: " << *SI->getPointerOperand()
+ << "\nUntainted address: " << *OriginalAddr << "\n";
+ });
+ return MemoryLocation(OriginalAddr,
+ DL.getTypeStoreSize(SI->getValueOperand()->getType()),
+ AATags);
+}
+
+bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI, DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) {
+ if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) {
+ return false;
+ }
+
+ bool tainted = taintStoreAddress(SI, DepVal);
+ assert(tainted);
+ return tainted;
+}
+
+bool CompressTaintedStore(BasicBlock* BB) {
+ // This function looks for windows of adajcent stores in 'BB' that satisfy the
+ // following condition (and then do optimization):
+ // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's
+ // address depends on && Dep(v1) includes Dep(d1);
+ // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's
+ // address depends on && Dep(v2) includes Dep(d2) &&
+ // Dep(d2) includes Dep(d1);
+ // ...
+ // *Addr(dN) = vN, dN is a condition and is the only dependence the store's
+ // address depends on && Dep(dN) includes Dep(d"N-1").
+ //
+ // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can
+ // safely transform the above to the following. In between these stores, we
+ // can omit untainted stores to the same address 'Addr' since they internally
+ // have dependence on the previous stores on the same address.
+ // =>
+ // *Addr = v1
+ // *Addr = v2
+ // *Addr(d3) = v3
+ for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) {
+ // Look for the first store in such a window of adajacent stores.
+ auto* FirstSI = dyn_cast<StoreInst>(&*BI);
+ if (!FirstSI) {
+ continue;
+ }
+
+ // The first store in the window must be tainted.
+ auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand());
+ if (UntaintedAddress == FirstSI->getPointerOperand()) {
+ continue;
+ }
+
+ // The first store's address must directly depend on and only depend on a
+ // condition.
+ auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand());
+ if (nullptr == FirstSIDepCond) {
+ continue;
+ }
+
+ // Dep(first store's storing value) includes Dep(tainted dependence).
+ if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) {
+ continue;
+ }
+
+ // Look for subsequent stores to the same address that satisfy the condition
+ // of "compressing the dependence".
+ SmallVector<StoreInst*, 8> AdajacentStores;
+ AdajacentStores.push_back(FirstSI);
+ auto BII = BasicBlock::iterator(FirstSI);
+ for (BII++; BII != BE; BII++) {
+ auto* CurrSI = dyn_cast<StoreInst>(&*BII);
+ if (!CurrSI) {
+ if (BII->mayHaveSideEffects()) {
+ // Be conservative. Instructions with side effects are similar to
+ // stores.
+ break;
+ }
+ continue;
+ }
+
+ auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand());
+ auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand());
+ // All other stores must satisfy either:
+ // A. 'CurrSI' is an untainted store to the same address, or
+ // B. the combination of the following 5 subconditions:
+ // 1. Tainted;
+ // 2. Untainted address is the same as the group's address;
+ // 3. The address is tainted with a sole value which is a condition;
+ // 4. The storing value depends on the condition in 3.
+ // 5. The condition in 3 depends on the previous stores dependence
+ // condition.
+
+ // Condition A. Should ignore this store directly.
+ if (OrigAddress == CurrSI->getPointerOperand() &&
+ OrigAddress == UntaintedAddress) {
+ continue;
+ }
+ // Check condition B.
+ Value* Cond = nullptr;
+ if (OrigAddress == CurrSI->getPointerOperand() ||
+ OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
+ !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
+ // Check condition 1, 2, 3 & 4.
+ break;
+ }
+
+ // Check condition 5.
+ StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1];
+ auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand());
+ assert(PrevSIDepCond &&
+ "Store in the group must already depend on a condtion");
+ if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) {
+ break;
+ }
+
+ AdajacentStores.push_back(CurrSI);
+ }
+
+ if (AdajacentStores.size() == 1) {
+ // The outer loop should keep looking from the next store.
+ continue;
+ }
+
+ // Now we have such a group of tainted stores to the same address.
+ DEBUG(dbgs() << "[CompressTaintedStore]\n");
+ DEBUG(dbgs() << "Original BB\n");
+ DEBUG(dbgs() << *BB << '\n');
+ auto* LastSI = AdajacentStores[AdajacentStores.size() - 1];
+ for (int i = 0; i < AdajacentStores.size() - 1; ++i) {
+ auto* SI = AdajacentStores[i];
+
+ // Use the original address for stores before the last one.
+ SI->setOperand(1, UntaintedAddress);
+
+ DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';);
+ }
+ // XXX-comment: Try to make the last store use fewer registers.
+ // If LastSI's storing value is a select based on the condition with which
+ // its address is tainted, transform the tainted address to a select
+ // instruction, as follows:
+ // r1 = Select Cond ? A : B
+ // r2 = Cond & 0
+ // r3 = Addr | r2
+ // *r3 = r1
+ // ==>
+ // r1 = Select Cond ? A : B
+ // r2 = Select Cond ? Addr : Addr
+ // *r2 = r1
+ // The idea is that both Select instructions depend on the same condition,
+ // so hopefully the backend can generate two cmov instructions for them (and
+ // this saves the number of registers needed).
+ auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand());
+ auto* LastSIValue = dyn_cast<Instruction>(LastSI->getValueOperand());
+ if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select &&
+ LastSIValue->getOperand(0) == LastSIDep) {
+ // XXX-comment: Maybe it's better for us to just leave it as an and/or
+ // dependence pattern.
+ // /*
+ IRBuilder<true, NoFolder> Builder(LastSI);
+ auto* Address =
+ Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress);
+ LastSI->setOperand(1, Address);
+ DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";);
+ // */
+ }
+ }
+
+ return true;
+}
+
+bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) {
+ Value* OldDep = getDependence(OldAddress);
+ // Return false when there's no dependence to pass from the OldAddress.
+ if (!OldDep) {
+ return false;
+ }
+
+ // No need to pass the dependence to NewStore's address if it already depends
+ // on whatever 'OldAddress' depends on.
+ if (StoreAddressDependOnValue(NewStore, OldDep)) {
+ return false;
+ }
+ return taintStoreAddress(NewStore, OldAddress);
+}
+
+SmallSet<Value*, 8> FindDependence(Value* Val) {
+ SmallSet<Value*, 8> DepSet;
+ recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/);
+ return DepSet;
+}
+
+bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) {
+ return dependenceSetInclusion(SI->getPointerOperand(), DepVal);
+}
+
+bool StoreDependOnValue(StoreInst* SI, Value* Dep) {
+ return dependenceSetInclusion(SI, Dep);
+}
+
+} // namespace
+
+
bool AtomicExpand::runOnFunction(Function &F) {
- if (!TM || !TM->getSubtargetImpl()->enableAtomicExpand())
+ if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand())
return false;
+ TLI = TM->getSubtargetImpl(F)->getTargetLowering();
SmallVector<Instruction *, 1> AtomicInsts;
+ SmallVector<LoadInst*, 1> MonotonicLoadInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
- for (BasicBlock &BB : F)
- for (Instruction &Inst : BB) {
- if (isa<AtomicRMWInst>(&Inst) || isa<AtomicCmpXchgInst>(&Inst) ||
- (isa<LoadInst>(&Inst) && cast<LoadInst>(&Inst)->isAtomic()) ||
- (isa<StoreInst>(&Inst) && cast<StoreInst>(&Inst)->isAtomic()))
- AtomicInsts.push_back(&Inst);
+ for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+ // XXX-update: For relaxed loads, change them to acquire. This includes
+ // relaxed loads, relaxed atomic RMW & relaxed atomic compare exchange.
+ if (I->isAtomic()) {
+ switch (I->getOpcode()) {
+ case Instruction::AtomicCmpXchg: {
+ // XXX-comment: AtomicCmpXchg in AArch64 will be translated to a
+ // conditional branch that contains the value of the load anyway, so
+ // we don't need to do anything.
+ /*
+ auto* CmpXchg = dyn_cast<AtomicCmpXchgInst>(&*I);
+ auto SuccOrdering = CmpXchg->getSuccessOrdering();
+ if (SuccOrdering == Monotonic) {
+ CmpXchg->setSuccessOrdering(Acquire);
+ } else if (SuccOrdering == Release) {
+ CmpXchg->setSuccessOrdering(AcquireRelease);
+ }
+ */
+ break;
+ }
+ case Instruction::AtomicRMW: {
+ // XXX-comment: Similar to AtomicCmpXchg. These instructions in
+ // AArch64 will be translated to a loop whose condition depends on the
+ // store status, which further depends on the load value.
+ /*
+ auto* RMW = dyn_cast<AtomicRMWInst>(&*I);
+ if (RMW->getOrdering() == Monotonic) {
+ RMW->setOrdering(Acquire);
+ }
+ */
+ break;
+ }
+ case Instruction::Load: {
+ auto* LI = dyn_cast<LoadInst>(&*I);
+ if (LI->getOrdering() == Monotonic) {
+ /*
+ DEBUG(dbgs() << "Transforming relaxed loads to acquire loads: "
+ << *LI << '\n');
+ LI->setOrdering(Acquire);
+ */
+ MonotonicLoadInsts.push_back(LI);
+ }
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ AtomicInsts.push_back(&*I);
}
+ }
bool MadeChange = false;
- for (Instruction *Inst : AtomicInsts) {
- if (!TM->getSubtargetImpl()->getTargetLowering()->shouldExpandAtomicInIR(
- Inst))
- continue;
+ for (auto I : AtomicInsts) {
+ auto LI = dyn_cast<LoadInst>(I);
+ auto SI = dyn_cast<StoreInst>(I);
+ auto RMWI = dyn_cast<AtomicRMWInst>(I);
+ auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
+ assert((LI || SI || RMWI || CASI || isa<FenceInst>(I)) &&
+ "Unknown atomic instruction");
- if (AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(Inst))
- MadeChange |= expandAtomicRMW(AI);
- else if (AtomicCmpXchgInst *CI = dyn_cast<AtomicCmpXchgInst>(Inst))
- MadeChange |= expandAtomicCmpXchg(CI);
- else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
- MadeChange |= expandAtomicLoad(LI);
- else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
- MadeChange |= expandAtomicStore(SI);
- else
- llvm_unreachable("Unknown atomic instruction");
+ auto FenceOrdering = Monotonic;
+ bool IsStore, IsLoad;
+ if (TLI->getInsertFencesForAtomic()) {
+ if (LI && isAtLeastAcquire(LI->getOrdering())) {
+ FenceOrdering = LI->getOrdering();
+ LI->setOrdering(Monotonic);
+ IsStore = false;
+ IsLoad = true;
+ } else if (SI && isAtLeastRelease(SI->getOrdering())) {
+ FenceOrdering = SI->getOrdering();
+ SI->setOrdering(Monotonic);
+ IsStore = true;
+ IsLoad = false;
+ } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
+ isAtLeastAcquire(RMWI->getOrdering()))) {
+ FenceOrdering = RMWI->getOrdering();
+ RMWI->setOrdering(Monotonic);
+ IsStore = IsLoad = true;
+ } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
+ (isAtLeastRelease(CASI->getSuccessOrdering()) ||
+ isAtLeastAcquire(CASI->getSuccessOrdering()))) {
+ // If a compare and swap is lowered to LL/SC, we can do smarter fence
+ // insertion, with a stronger one on the success path than on the
+ // failure path. As a result, fence insertion is directly done by
+ // expandAtomicCmpXchg in that case.
+ FenceOrdering = CASI->getSuccessOrdering();
+ CASI->setSuccessOrdering(Monotonic);
+ CASI->setFailureOrdering(Monotonic);
+ IsStore = IsLoad = true;
+ }
+
+ if (FenceOrdering != Monotonic) {
+ MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
+ }
+ }
+
+ if (LI) {
+ if (LI->getType()->isFloatingPointTy()) {
+ // TODO: add a TLI hook to control this so that each target can
+ // convert to lowering the original type one at a time.
+ LI = convertAtomicLoadToIntegerType(LI);
+ assert(LI->getType()->isIntegerTy() && "invariant broken");
+ MadeChange = true;
+ }
+
+ MadeChange |= tryExpandAtomicLoad(LI);
+ } else if (SI) {
+ if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
+ // TODO: add a TLI hook to control this so that each target can
+ // convert to lowering the original type one at a time.
+ SI = convertAtomicStoreToIntegerType(SI);
+ assert(SI->getValueOperand()->getType()->isIntegerTy() &&
+ "invariant broken");
+ MadeChange = true;
+ }
+
+ if (TLI->shouldExpandAtomicStoreInIR(SI))
+ MadeChange |= expandAtomicStore(SI);
+ } else if (RMWI) {
+ // There are two different ways of expanding RMW instructions:
+ // - into a load if it is idempotent
+ // - into a Cmpxchg/LL-SC loop otherwise
+ // we try them in that order.
+
+ if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
+ MadeChange = true;
+ } else {
+ MadeChange |= tryExpandAtomicRMW(RMWI);
+ }
+ } else if (CASI && TLI->shouldExpandAtomicCmpXchgInIR(CASI)) {
+ MadeChange |= expandAtomicCmpXchg(CASI);
+ }
}
+ taintMonotonicLoads(MonotonicLoadInsts);
+
return MadeChange;
}
-bool AtomicExpand::expandAtomicLoad(LoadInst *LI) {
- // Load instructions don't actually need a leading fence, even in the
- // SequentiallyConsistent case.
- AtomicOrdering MemOpOrder =
- TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic()
- ? Monotonic
- : LI->getOrdering();
+bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order,
+ bool IsStore, bool IsLoad) {
+ IRBuilder<> Builder(I);
- // The only 64-bit load guaranteed to be single-copy atomic by the ARM is
- // an ldrexd (A3.5.3).
+ auto LeadingFence = TLI->emitLeadingFence(Builder, Order, IsStore, IsLoad);
+
+ auto TrailingFence = TLI->emitTrailingFence(Builder, Order, IsStore, IsLoad);
+ // The trailing fence is emitted before the instruction instead of after
+ // because there is no easy way of setting Builder insertion point after
+ // an instruction. So we must erase it from the BB, and insert it back
+ // in the right place.
+ // We have a guard here because not every atomic operation generates a
+ // trailing fence.
+ if (TrailingFence) {
+ TrailingFence->removeFromParent();
+ TrailingFence->insertAfter(I);
+ }
+
+ return (LeadingFence || TrailingFence);
+}
+
+/// Get the iX type with the same bitwidth as T.
+IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
+ const DataLayout &DL) {
+ EVT VT = TLI->getValueType(DL, T);
+ unsigned BitWidth = VT.getStoreSizeInBits();
+ assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
+ return IntegerType::get(T->getContext(), BitWidth);
+}
+
+/// Convert an atomic load of a non-integral type to an integer load of the
+/// equivelent bitwidth. See the function comment on
+/// convertAtomicStoreToIntegerType for background.
+LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
+ auto *M = LI->getModule();
+ Type *NewTy = getCorrespondingIntegerType(LI->getType(),
+ M->getDataLayout());
+
+ IRBuilder<> Builder(LI);
+
+ Value *Addr = LI->getPointerOperand();
+ Type *PT = PointerType::get(NewTy,
+ Addr->getType()->getPointerAddressSpace());
+ Value *NewAddr = Builder.CreateBitCast(Addr, PT);
+
+ auto *NewLI = Builder.CreateLoad(NewAddr);
+ NewLI->setAlignment(LI->getAlignment());
+ NewLI->setVolatile(LI->isVolatile());
+ NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
+ DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
+
+ Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
+ LI->replaceAllUsesWith(NewVal);
+ LI->eraseFromParent();
+ return NewLI;
+}
+
+bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
+ switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
+ case TargetLoweringBase::AtomicExpansionKind::None:
+ return false;
+ case TargetLoweringBase::AtomicExpansionKind::LLSC:
+ return expandAtomicOpToLLSC(
+ LI, LI->getPointerOperand(), LI->getOrdering(),
+ [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
+ case TargetLoweringBase::AtomicExpansionKind::LLOnly:
+ return expandAtomicLoadToLL(LI);
+ case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
+ return expandAtomicLoadToCmpXchg(LI);
+ }
+ llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
+}
+
+bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
IRBuilder<> Builder(LI);
- Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
- Builder, LI->getPointerOperand(), MemOpOrder);
- insertTrailingFence(Builder, LI->getOrdering());
+ // On some architectures, load-linked instructions are atomic for larger
+ // sizes than normal loads. For example, the only 64-bit load guaranteed
+ // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
+ Value *Val =
+ TLI->emitLoadLinked(Builder, LI->getPointerOperand(), LI->getOrdering());
+ TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
LI->replaceAllUsesWith(Val);
LI->eraseFromParent();
return true;
}
+bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
+ IRBuilder<> Builder(LI);
+ AtomicOrdering Order = LI->getOrdering();
+ Value *Addr = LI->getPointerOperand();
+ Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
+ Constant *DummyVal = Constant::getNullValue(Ty);
+
+ Value *Pair = Builder.CreateAtomicCmpXchg(
+ Addr, DummyVal, DummyVal, Order,
+ AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
+ Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
+
+ LI->replaceAllUsesWith(Loaded);
+ LI->eraseFromParent();
+
+ return true;
+}
+
+/// Convert an atomic store of a non-integral type to an integer store of the
+/// equivelent bitwidth. We used to not support floating point or vector
+/// atomics in the IR at all. The backends learned to deal with the bitcast
+/// idiom because that was the only way of expressing the notion of a atomic
+/// float or vector store. The long term plan is to teach each backend to
+/// instruction select from the original atomic store, but as a migration
+/// mechanism, we convert back to the old format which the backends understand.
+/// Each backend will need individual work to recognize the new format.
+StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
+ IRBuilder<> Builder(SI);
+ auto *M = SI->getModule();
+ Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
+ M->getDataLayout());
+ Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
+
+ Value *Addr = SI->getPointerOperand();
+ Type *PT = PointerType::get(NewTy,
+ Addr->getType()->getPointerAddressSpace());
+ Value *NewAddr = Builder.CreateBitCast(Addr, PT);
+
+ StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
+ NewSI->setAlignment(SI->getAlignment());
+ NewSI->setVolatile(SI->isVolatile());
+ NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
+ DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
+ SI->eraseFromParent();
+ return NewSI;
+}
+
bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
- // The only atomic 64-bit store on ARM is an strexd that succeeds, which means
- // we need a loop and the entire instruction is essentially an "atomicrmw
- // xchg" that ignores the value loaded.
+ // This function is only called on atomic stores that are too large to be
+ // atomic if implemented as a native store. So we replace them by an
+ // atomic swap, that can be implemented for example as a ldrex/strex on ARM
+ // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
+ // It is the responsibility of the target to only signal expansion via
+ // shouldExpandAtomicRMW in cases where this is required and possible.
IRBuilder<> Builder(SI);
AtomicRMWInst *AI =
Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, SI->getPointerOperand(),
SI->eraseFromParent();
// Now we have an appropriate swap instruction, lower it as usual.
- return expandAtomicRMW(AI);
+ return tryExpandAtomicRMW(AI);
}
-bool AtomicExpand::expandAtomicRMW(AtomicRMWInst *AI) {
- AtomicOrdering Order = AI->getOrdering();
- Value *Addr = AI->getPointerOperand();
- BasicBlock *BB = AI->getParent();
+static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
+ Value *Loaded, Value *NewVal,
+ AtomicOrdering MemOpOrder,
+ Value *&Success, Value *&NewLoaded) {
+ Value* Pair = Builder.CreateAtomicCmpXchg(
+ Addr, Loaded, NewVal, MemOpOrder,
+ AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder));
+ Success = Builder.CreateExtractValue(Pair, 1, "success");
+ NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
+}
+
+/// Emit IR to implement the given atomicrmw operation on values in registers,
+/// returning the new value.
+static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
+ Value *Loaded, Value *Inc) {
+ Value *NewVal;
+ switch (Op) {
+ case AtomicRMWInst::Xchg:
+ return Inc;
+ case AtomicRMWInst::Add:
+ return Builder.CreateAdd(Loaded, Inc, "new");
+ case AtomicRMWInst::Sub:
+ return Builder.CreateSub(Loaded, Inc, "new");
+ case AtomicRMWInst::And:
+ return Builder.CreateAnd(Loaded, Inc, "new");
+ case AtomicRMWInst::Nand:
+ return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
+ case AtomicRMWInst::Or:
+ return Builder.CreateOr(Loaded, Inc, "new");
+ case AtomicRMWInst::Xor:
+ return Builder.CreateXor(Loaded, Inc, "new");
+ case AtomicRMWInst::Max:
+ NewVal = Builder.CreateICmpSGT(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::Min:
+ NewVal = Builder.CreateICmpSLE(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::UMax:
+ NewVal = Builder.CreateICmpUGT(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::UMin:
+ NewVal = Builder.CreateICmpULE(Loaded, Inc);
+ return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ default:
+ llvm_unreachable("Unknown atomic op");
+ }
+}
+
+bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
+ switch (TLI->shouldExpandAtomicRMWInIR(AI)) {
+ case TargetLoweringBase::AtomicExpansionKind::None:
+ return false;
+ case TargetLoweringBase::AtomicExpansionKind::LLSC:
+ return expandAtomicOpToLLSC(AI, AI->getPointerOperand(), AI->getOrdering(),
+ [&](IRBuilder<> &Builder, Value *Loaded) {
+ return performAtomicOp(AI->getOperation(),
+ Builder, Loaded,
+ AI->getValOperand());
+ });
+ case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
+ return expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
+ default:
+ llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
+ }
+}
+
+bool AtomicExpand::expandAtomicOpToLLSC(
+ Instruction *I, Value *Addr, AtomicOrdering MemOpOrder,
+ std::function<Value *(IRBuilder<> &, Value *)> PerformOp) {
+ BasicBlock *BB = I->getParent();
Function *F = BB->getParent();
LLVMContext &Ctx = F->getContext();
// atomicrmw.end:
// fence?
// [...]
- BasicBlock *ExitBB = BB->splitBasicBlock(AI, "atomicrmw.end");
+ BasicBlock *ExitBB = BB->splitBasicBlock(I->getIterator(), "atomicrmw.end");
BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
- // This grabs the DebugLoc from AI.
- IRBuilder<> Builder(AI);
+ // This grabs the DebugLoc from I.
+ IRBuilder<> Builder(I);
// The split call above "helpfully" added a branch at the end of BB (to the
// wrong place), but we might want a fence too. It's easiest to just remove
// the branch entirely.
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
- AtomicOrdering MemOpOrder = insertLeadingFence(Builder, Order);
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
- Value *Loaded = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
- Builder, Addr, MemOpOrder);
+ Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
- Value *NewVal;
- switch (AI->getOperation()) {
- case AtomicRMWInst::Xchg:
- NewVal = AI->getValOperand();
- break;
- case AtomicRMWInst::Add:
- NewVal = Builder.CreateAdd(Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::Sub:
- NewVal = Builder.CreateSub(Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::And:
- NewVal = Builder.CreateAnd(Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::Nand:
- NewVal = Builder.CreateNot(Builder.CreateAnd(Loaded, AI->getValOperand()),
- "new");
- break;
- case AtomicRMWInst::Or:
- NewVal = Builder.CreateOr(Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::Xor:
- NewVal = Builder.CreateXor(Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::Max:
- NewVal = Builder.CreateICmpSGT(Loaded, AI->getValOperand());
- NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::Min:
- NewVal = Builder.CreateICmpSLE(Loaded, AI->getValOperand());
- NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::UMax:
- NewVal = Builder.CreateICmpUGT(Loaded, AI->getValOperand());
- NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
- break;
- case AtomicRMWInst::UMin:
- NewVal = Builder.CreateICmpULE(Loaded, AI->getValOperand());
- NewVal = Builder.CreateSelect(NewVal, Loaded, AI->getValOperand(), "new");
- break;
- default:
- llvm_unreachable("Unknown atomic op");
- }
+ Value *NewVal = PerformOp(Builder, Loaded);
Value *StoreSuccess =
- TM->getSubtargetImpl()->getTargetLowering()->emitStoreConditional(
- Builder, NewVal, Addr, MemOpOrder);
+ TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
Value *TryAgain = Builder.CreateICmpNE(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
Builder.SetInsertPoint(ExitBB, ExitBB->begin());
- insertTrailingFence(Builder, Order);
- AI->replaceAllUsesWith(Loaded);
- AI->eraseFromParent();
+ I->replaceAllUsesWith(Loaded);
+ I->eraseFromParent();
return true;
}
BasicBlock *BB = CI->getParent();
Function *F = BB->getParent();
LLVMContext &Ctx = F->getContext();
+ // If getInsertFencesForAtomic() returns true, then the target does not want
+ // to deal with memory orders, and emitLeading/TrailingFence should take care
+ // of everything. Otherwise, emitLeading/TrailingFence are no-op and we
+ // should preserve the ordering.
+ AtomicOrdering MemOpOrder =
+ TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder;
// Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
//
// %loaded = @load.linked(%addr)
// %should_store = icmp eq %loaded, %desired
// br i1 %should_store, label %cmpxchg.trystore,
- // label %cmpxchg.failure
+ // label %cmpxchg.nostore
// cmpxchg.trystore:
// %stored = @store_conditional(%new, %addr)
// %success = icmp eq i32 %stored, 0
// cmpxchg.success:
// fence?
// br label %cmpxchg.end
+ // cmpxchg.nostore:
+ // @load_linked_fail_balance()?
+ // br label %cmpxchg.failure
// cmpxchg.failure:
// fence?
// br label %cmpxchg.end
// %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
// %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
// [...]
- BasicBlock *ExitBB = BB->splitBasicBlock(CI, "cmpxchg.end");
+ BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
- auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, FailureBB);
+ auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
+ auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
auto TryStoreBB = BasicBlock::Create(Ctx, "cmpxchg.trystore", F, SuccessBB);
auto LoopBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, TryStoreBB);
// the branch entirely.
std::prev(BB->end())->eraseFromParent();
Builder.SetInsertPoint(BB);
- AtomicOrdering MemOpOrder = insertLeadingFence(Builder, SuccessOrder);
+ TLI->emitLeadingFence(Builder, SuccessOrder, /*IsStore=*/true,
+ /*IsLoad=*/true);
Builder.CreateBr(LoopBB);
// Start the main loop block now that we've taken care of the preliminaries.
Builder.SetInsertPoint(LoopBB);
- Value *Loaded = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
- Builder, Addr, MemOpOrder);
+ Value *Loaded = TLI->emitLoadLinked(Builder, Addr, MemOpOrder);
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
- // If the the cmpxchg doesn't actually need any ordering when it fails, we can
+ // If the cmpxchg doesn't actually need any ordering when it fails, we can
// jump straight past that fence instruction (if it exists).
- Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
+ Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
Builder.SetInsertPoint(TryStoreBB);
- Value *StoreSuccess =
- TM->getSubtargetImpl()->getTargetLowering()->emitStoreConditional(
- Builder, CI->getNewValOperand(), Addr, MemOpOrder);
+ Value *StoreSuccess = TLI->emitStoreConditional(
+ Builder, CI->getNewValOperand(), Addr, MemOpOrder);
StoreSuccess = Builder.CreateICmpEQ(
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
Builder.CreateCondBr(StoreSuccess, SuccessBB,
// Make sure later instructions don't get reordered with a fence if necessary.
Builder.SetInsertPoint(SuccessBB);
- insertTrailingFence(Builder, SuccessOrder);
+ TLI->emitTrailingFence(Builder, SuccessOrder, /*IsStore=*/true,
+ /*IsLoad=*/true);
Builder.CreateBr(ExitBB);
+ Builder.SetInsertPoint(NoStoreBB);
+ // In the failing case, where we don't execute the store-conditional, the
+ // target might want to balance out the load-linked with a dedicated
+ // instruction (e.g., on ARM, clearing the exclusive monitor).
+ TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
+ Builder.CreateBr(FailureBB);
+
Builder.SetInsertPoint(FailureBB);
- insertTrailingFence(Builder, FailureOrder);
+ TLI->emitTrailingFence(Builder, FailureOrder, /*IsStore=*/true,
+ /*IsLoad=*/true);
Builder.CreateBr(ExitBB);
// Finally, we have control-flow based knowledge of whether the cmpxchg
return true;
}
-AtomicOrdering AtomicExpand::insertLeadingFence(IRBuilder<> &Builder,
- AtomicOrdering Ord) {
- if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
- return Ord;
+bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
+ auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
+ if(!C)
+ return false;
- if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
- Builder.CreateFence(Release);
+ AtomicRMWInst::BinOp Op = RMWI->getOperation();
+ switch(Op) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ return C->isZero();
+ case AtomicRMWInst::And:
+ return C->isMinusOne();
+ // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
+ default:
+ return false;
+ }
+}
- // The exclusive operations don't need any barrier if we're adding separate
- // fences.
- return Monotonic;
+bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
+ if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
+ tryExpandAtomicLoad(ResultingLoad);
+ return true;
+ }
+ return false;
}
-void AtomicExpand::insertTrailingFence(IRBuilder<> &Builder,
- AtomicOrdering Ord) {
- if (!TM->getSubtargetImpl()->getTargetLowering()->getInsertFencesForAtomic())
- return;
+bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
+ CreateCmpXchgInstFun CreateCmpXchg) {
+ assert(AI);
+
+ AtomicOrdering MemOpOrder =
+ AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
+ Value *Addr = AI->getPointerOperand();
+ BasicBlock *BB = AI->getParent();
+ Function *F = BB->getParent();
+ LLVMContext &Ctx = F->getContext();
- if (Ord == Acquire || Ord == AcquireRelease)
- Builder.CreateFence(Acquire);
- else if (Ord == SequentiallyConsistent)
- Builder.CreateFence(SequentiallyConsistent);
+ // Given: atomicrmw some_op iN* %addr, iN %incr ordering
+ //
+ // The standard expansion we produce is:
+ // [...]
+ // %init_loaded = load atomic iN* %addr
+ // br label %loop
+ // loop:
+ // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
+ // %new = some_op iN %loaded, %incr
+ // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
+ // %new_loaded = extractvalue { iN, i1 } %pair, 0
+ // %success = extractvalue { iN, i1 } %pair, 1
+ // br i1 %success, label %atomicrmw.end, label %loop
+ // atomicrmw.end:
+ // [...]
+ BasicBlock *ExitBB = BB->splitBasicBlock(AI->getIterator(), "atomicrmw.end");
+ BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
+
+ // This grabs the DebugLoc from AI.
+ IRBuilder<> Builder(AI);
+
+ // The split call above "helpfully" added a branch at the end of BB (to the
+ // wrong place), but we want a load. It's easiest to just remove
+ // the branch entirely.
+ std::prev(BB->end())->eraseFromParent();
+ Builder.SetInsertPoint(BB);
+ LoadInst *InitLoaded = Builder.CreateLoad(Addr);
+ // Atomics require at least natural alignment.
+ InitLoaded->setAlignment(AI->getType()->getPrimitiveSizeInBits() / 8);
+ Builder.CreateBr(LoopBB);
+
+ // Start the main loop block now that we've taken care of the preliminaries.
+ Builder.SetInsertPoint(LoopBB);
+ PHINode *Loaded = Builder.CreatePHI(AI->getType(), 2, "loaded");
+ Loaded->addIncoming(InitLoaded, BB);
+
+ Value *NewVal =
+ performAtomicOp(AI->getOperation(), Builder, Loaded, AI->getValOperand());
+
+ Value *NewLoaded = nullptr;
+ Value *Success = nullptr;
+
+ CreateCmpXchg(Builder, Addr, Loaded, NewVal, MemOpOrder,
+ Success, NewLoaded);
+ assert(Success && NewLoaded);
+
+ Loaded->addIncoming(NewLoaded, LoopBB);
+
+ Builder.CreateCondBr(Success, ExitBB, LoopBB);
+
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
+
+ AI->replaceAllUsesWith(NewLoaded);
+ AI->eraseFromParent();
+
+ return true;
}