From: Peizhao Ou Date: Fri, 17 Nov 2017 00:04:38 +0000 (-0800) Subject: Adds fake conditional branches right after relaxed loads if necessary X-Git-Url: http://demsky.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=81d225b93d55ec44d6e79f2d95440f250f3df064 Adds fake conditional branches right after relaxed loads if necessary --- diff --git a/include/llvm/IR/BasicBlock.h b/include/llvm/IR/BasicBlock.h index c6b54d308ce..94edb547f53 100644 --- a/include/llvm/IR/BasicBlock.h +++ b/include/llvm/IR/BasicBlock.h @@ -59,6 +59,9 @@ private: InstListType InstList; Function *Parent; + // XXX-update: A flag that checks whether we can eliminate this block. + bool canEliminateBlock; + void setParent(Function *parent); friend class SymbolTableListTraits; @@ -74,6 +77,16 @@ private: Function *Parent = nullptr, BasicBlock *InsertBefore = nullptr); public: + // XXX-update: + void disableCanEliminateBlock() { + canEliminateBlock = false; + } + + bool getCanEliminateBlock() { + return canEliminateBlock; + } + + /// \brief Get the context in which this basic block lives. LLVMContext &getContext() const; diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index faf951985a7..c8308afe9c1 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -34,6 +34,7 @@ #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSubtargetInfo.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; @@ -82,764 +83,6 @@ FunctionPass *llvm::createAtomicExpandPass(const TargetMachine *TM) { return new AtomicExpand(TM); } - -namespace { - -bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal); -Value* GetUntaintedAddress(Value* CurrentAddress); - -// The depth we trace down a variable to look for its dependence set. -const unsigned kDependenceDepth = 4; - -// Recursively looks for variables that 'Val' depends on at the given depth -// 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only -// inserts the leaf node values; otherwise, all visited nodes are included in -// 'DepSet'. Note that constants will be ignored. -template -void recursivelyFindDependence(SetType* DepSet, Value* Val, - bool InsertOnlyLeafNodes = false, - unsigned Depth = kDependenceDepth) { - if (Val == nullptr) { - return; - } - if (!InsertOnlyLeafNodes && !isa(Val)) { - DepSet->insert(Val); - } - if (Depth == 0) { - // Cannot go deeper. Insert the leaf nodes. - if (InsertOnlyLeafNodes && !isa(Val)) { - DepSet->insert(Val); - } - return; - } - - // Go one step further to explore the dependence of the operands. - Instruction* I = nullptr; - if ((I = dyn_cast(Val))) { - if (isa(I)) { - // A load is considerd the leaf load of the dependence tree. Done. - DepSet->insert(Val); - return; - } else if (I->isBinaryOp()) { - BinaryOperator* I = dyn_cast(Val); - Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1); - recursivelyFindDependence(DepSet, Op0, Depth - 1); - recursivelyFindDependence(DepSet, Op1, Depth - 1); - } else if (I->isCast()) { - Value* Op0 = I->getOperand(0); - recursivelyFindDependence(DepSet, Op0, Depth - 1); - } else if (I->getOpcode() == Instruction::Select) { - Value* Op0 = I->getOperand(0); - Value* Op1 = I->getOperand(1); - Value* Op2 = I->getOperand(2); - recursivelyFindDependence(DepSet, Op0, Depth - 1); - recursivelyFindDependence(DepSet, Op1, Depth - 1); - recursivelyFindDependence(DepSet, Op2, Depth - 1); - } else if (I->getOpcode() == Instruction::GetElementPtr) { - for (unsigned i = 0; i < I->getNumOperands(); i++) { - recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1); - } - } else if (I->getOpcode() == Instruction::Store) { - auto* SI = dyn_cast(Val); - recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1); - recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1); - } else { - Value* Op0 = nullptr; - Value* Op1 = nullptr; - switch (I->getOpcode()) { - case Instruction::ICmp: - case Instruction::FCmp: { - Op0 = I->getOperand(0); - Op1 = I->getOperand(1); - recursivelyFindDependence(DepSet, Op0, Depth - 1); - recursivelyFindDependence(DepSet, Op1, Depth - 1); - break; - } - default: { - // Be conservative. Add it and be done with it. - DepSet->insert(Val); - return; - } - } - } - } else if (isa(Val)) { - // Not interested in constant values. Done. - return; - } else { - // Be conservative. Add it and be done with it. - DepSet->insert(Val); - return; - } -} - -// Helper function to create a Cast instruction. -Value* createCast(IRBuilder& Builder, Value* DepVal, - Type* TargetIntegerType) { - Instruction::CastOps CastOp = Instruction::BitCast; - switch (DepVal->getType()->getTypeID()) { - case Type::IntegerTyID: { - CastOp = Instruction::SExt; - break; - } - case Type::FloatTyID: - case Type::DoubleTyID: { - CastOp = Instruction::FPToSI; - break; - } - case Type::PointerTyID: { - CastOp = Instruction::PtrToInt; - break; - } - default: { break; } - } - - return Builder.CreateCast(CastOp, DepVal, TargetIntegerType); -} - -// Given a value, if it's a tainted address, this function returns the -// instruction that ORs the "dependence value" with the "original address". -// Otherwise, returns nullptr. This instruction is the first OR instruction -// where one of its operand is an AND instruction with an operand being 0. -// -// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'. -// %0 = load i32, i32* @y, align 4, !tbaa !1 -// %cmp = icmp ne i32 %0, 42 // <== this is like the condition -// %1 = sext i1 %cmp to i32 -// %2 = ptrtoint i32* @x to i32 -// %3 = and i32 %1, 0 -// %4 = or i32 %3, %2 -// %5 = inttoptr i32 %4 to i32* -// store i32 1, i32* %5, align 4 -Instruction* getOrAddress(Value* CurrentAddress) { - // Is it a cast from integer to pointer type. - Instruction* OrAddress = nullptr; - Instruction* AndDep = nullptr; - Instruction* CastToInt = nullptr; - Value* ActualAddress = nullptr; - Constant* ZeroConst = nullptr; - - const Instruction* CastToPtr = dyn_cast(CurrentAddress); - if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) { - // Is it an OR instruction: %1 = or %and, %actualAddress. - if ((OrAddress = dyn_cast(CastToPtr->getOperand(0))) && - OrAddress->getOpcode() == Instruction::Or) { - // The first operand should be and AND instruction. - AndDep = dyn_cast(OrAddress->getOperand(0)); - if (AndDep && AndDep->getOpcode() == Instruction::And) { - // Also make sure its first operand of the "AND" is 0, or the "AND" is - // marked explicitly by "NoInstCombine". - if ((ZeroConst = dyn_cast(AndDep->getOperand(1))) && - ZeroConst->isNullValue()) { - return OrAddress; - } - } - } - } - // Looks like it's not been tainted. - return nullptr; -} - -// Given a value, if it's a tainted address, this function returns the -// instruction that taints the "dependence value". Otherwise, returns nullptr. -// This instruction is the last AND instruction where one of its operand is 0. -// E.g., it returns '%3' given 'CurrentAddress' is '%5'. -// %0 = load i32, i32* @y, align 4, !tbaa !1 -// %cmp = icmp ne i32 %0, 42 // <== this is like the condition -// %1 = sext i1 %cmp to i32 -// %2 = ptrtoint i32* @x to i32 -// %3 = and i32 %1, 0 -// %4 = or i32 %3, %2 -// %5 = inttoptr i32 %4 to i32* -// store i32 1, i32* %5, align 4 -Instruction* getAndDependence(Value* CurrentAddress) { - // If 'CurrentAddress' is tainted, get the OR instruction. - auto* OrAddress = getOrAddress(CurrentAddress); - if (OrAddress == nullptr) { - return nullptr; - } - - // No need to check the operands. - auto* AndDepInst = dyn_cast(OrAddress->getOperand(0)); - assert(AndDepInst); - return AndDepInst; -} - -// Given a value, if it's a tainted address, this function returns -// the "dependence value", which is the first operand in the AND instruction. -// E.g., it returns '%1' given 'CurrentAddress' is '%5'. -// %0 = load i32, i32* @y, align 4, !tbaa !1 -// %cmp = icmp ne i32 %0, 42 // <== this is like the condition -// %1 = sext i1 %cmp to i32 -// %2 = ptrtoint i32* @x to i32 -// %3 = and i32 %1, 0 -// %4 = or i32 %3, %2 -// %5 = inttoptr i32 %4 to i32* -// store i32 1, i32* %5, align 4 -Value* getDependence(Value* CurrentAddress) { - auto* AndInst = getAndDependence(CurrentAddress); - if (AndInst == nullptr) { - return nullptr; - } - return AndInst->getOperand(0); -} - -// Given an address that has been tainted, returns the only condition it depends -// on, if any; otherwise, returns nullptr. -Value* getConditionDependence(Value* Address) { - auto* Dep = getDependence(Address); - if (Dep == nullptr) { - // 'Address' has not been dependence-tainted. - return nullptr; - } - - Value* Operand = Dep; - while (true) { - auto* Inst = dyn_cast(Operand); - if (Inst == nullptr) { - // Non-instruction type does not have condition dependence. - return nullptr; - } - if (Inst->getOpcode() == Instruction::ICmp) { - return Inst; - } else { - if (Inst->getNumOperands() != 1) { - return nullptr; - } else { - Operand = Inst->getOperand(0); - } - } - } -} - -// Conservatively decides whether the dependence set of 'Val1' includes the -// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand -// 'Val2' and use that single value as its dependence set. -// If it returns true, it means the dependence set of 'Val1' includes that of -// 'Val2'; otherwise, it only means we cannot conclusively decide it. -bool dependenceSetInclusion(Value* Val1, Value* Val2, - int Val1ExpandLevel = 2 * kDependenceDepth, - int Val2ExpandLevel = kDependenceDepth) { - typedef SmallSet IncludingSet; - typedef SmallSet IncludedSet; - - IncludingSet DepSet1; - IncludedSet DepSet2; - // Look for more depths for the including set. - recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/, - Val1ExpandLevel); - recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/, - Val2ExpandLevel); - - auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) { - for (auto* Dep : Subset) { - if (0 == FullSet.count(Dep)) { - return false; - } - } - return true; - }; - bool inclusion = set_inclusion(DepSet1, DepSet2); - DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n"); - DEBUG(dbgs() << "Including set for: " << *Val1 << "\n"); - DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; }); - DEBUG(dbgs() << "Included set for: " << *Val2 << "\n"); - DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; }); - - return inclusion; -} - -// Recursively iterates through the operands spawned from 'DepVal'. If there -// exists a single value that 'DepVal' only depends on, we call that value the -// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'. -Value* getRootDependence(Value* DepVal) { - SmallSet DepSet; - for (unsigned depth = kDependenceDepth; depth > 0; --depth) { - recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/, - depth); - if (DepSet.size() == 1) { - return *DepSet.begin(); - } - DepSet.clear(); - } - return DepVal; -} - -// This function actually taints 'DepVal' to the address to 'SI'. If the -// address -// of 'SI' already depends on whatever 'DepVal' depends on, this function -// doesn't do anything and returns false. Otherwise, returns true. -// -// This effect forces the store and any stores that comes later to depend on -// 'DepVal'. For example, we have a condition "cond", and a store instruction -// "s: STORE addr, val". If we want "s" (and any later store) to depend on -// "cond", we do the following: -// %conv = sext i1 %cond to i32 -// %addrVal = ptrtoint i32* %addr to i32 -// %andCond = and i32 conv, 0; -// %orAddr = or i32 %andCond, %addrVal; -// %NewAddr = inttoptr i32 %orAddr to i32*; -// -// This is a more concrete example: -// ------ -// %0 = load i32, i32* @y, align 4, !tbaa !1 -// %cmp = icmp ne i32 %0, 42 // <== this is like the condition -// %1 = sext i1 %cmp to i32 -// %2 = ptrtoint i32* @x to i32 -// %3 = and i32 %1, 0 -// %4 = or i32 %3, %2 -// %5 = inttoptr i32 %4 to i32* -// store i32 1, i32* %5, align 4 -bool taintStoreAddress(StoreInst* SI, Value* DepVal, - const char* calling_func = __builtin_FUNCTION()) { - DEBUG(dbgs() << "Called from " << calling_func << '\n'); - IRBuilder Builder(SI); - BasicBlock* BB = SI->getParent(); - Value* Address = SI->getPointerOperand(); - Type* TargetIntegerType = - IntegerType::get(Address->getContext(), - BB->getModule()->getDataLayout().getPointerSizeInBits()); - - // Does SI's address already depends on whatever 'DepVal' depends on? - if (StoreAddressDependOnValue(SI, DepVal)) { - return false; - } - - // Figure out if there's a root variable 'DepVal' depends on. For example, we - // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123" - // to be "%struct* %0" since all other operands are constant. - DepVal = getRootDependence(DepVal); - - // Is this already a dependence-tainted store? - Value* OldDep = getDependence(Address); - if (OldDep) { - // The address of 'SI' has already been tainted. Just need to absorb the - // DepVal to the existing dependence in the address of SI. - Instruction* AndDep = getAndDependence(Address); - IRBuilder Builder(AndDep); - Value* NewDep = nullptr; - if (DepVal->getType() == AndDep->getType()) { - NewDep = Builder.CreateAnd(OldDep, DepVal); - } else { - NewDep = Builder.CreateAnd( - OldDep, createCast(Builder, DepVal, TargetIntegerType)); - } - - auto* NewDepInst = dyn_cast(NewDep); - - // Use the new AND instruction as the dependence - AndDep->setOperand(0, NewDep); - return true; - } - - // SI's address has not been tainted. Now taint it with 'DepVal'. - Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType); - Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType); - Value* AndDepVal = - Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0)); - auto AndInst = dyn_cast(AndDepVal); - // XXX-comment: The original IR InstCombiner would change our and instruction - // to a select and then the back end optimize the condition out. We attach a - // flag to instructions and set it here to inform the InstCombiner to not to - // touch this and instruction at all. - Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast); - Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType()); - - DEBUG(dbgs() << "[taintStoreAddress]\n" - << "Original store: " << *SI << '\n'); - SI->setOperand(1, NewAddr); - - // Debug output. - DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n' - << "\tCast dependence value to integer: " << *CastDepToInt - << '\n' - << "\tCast address to integer: " << *PtrToIntCast << '\n' - << "\tAnd dependence value: " << *AndDepVal << '\n' - << "\tOr address: " << *OrAddr << '\n' - << "\tCast or instruction to address: " << *NewAddr << "\n\n"); - - return true; -} - -// Looks for the previous store in the if block --- 'BrBB', which makes the -// speculative store 'StoreToHoist' safe. -Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) { - assert(StoreToHoist && "StoreToHoist must be a real store"); - - Value* StorePtr = StoreToHoist->getPointerOperand(); - - // Look for a store to the same pointer in BrBB. - for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend(); - RI != RE; ++RI) { - Instruction* CurI = &*RI; - - StoreInst* SI = dyn_cast(CurI); - // Found the previous store make sure it stores to the same location. - // XXX-update: If the previous store's original untainted address are the - // same as 'StorePtr', we are also good to hoist the store. - if (SI && (SI->getPointerOperand() == StorePtr || - GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) { - // Found the previous store, return its value operand. - return SI; - } - } - - assert(false && - "We should not reach here since this store is safe to speculate"); -} - -// XXX-comment: Returns true if it changes the code, false otherwise (the branch -// condition already depends on 'DepVal'. -bool taintConditionalBranch(BranchInst* BI, Value* DepVal) { - assert(BI->isConditional()); - auto* Cond = BI->getOperand(0); - if (dependenceSetInclusion(Cond, DepVal)) { - // The dependence/ordering is self-evident. - return false; - } - - IRBuilder Builder(BI); - auto* AndDep = - Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0)); - auto* TruncAndDep = - Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1)); - auto* OrCond = Builder.CreateOr(TruncAndDep, Cond); - BI->setOperand(0, OrCond); - - // Debug output. - DEBUG(dbgs() << "\tTainted branch condition:\n" << *BI->getParent()); - - return true; -} - -// XXX-update: For a relaxed load 'LI', find the first immediate atomic store or -// the first conditional branch. Returns nullptr if there's no such immediately -// following store/branch instructions, which we can only enforce the load with -// 'acquire'. -Instruction* findFirstStoreCondBranchInst(LoadInst* LI) { - // In some situations, relaxed loads can be left as is: - // 1. The relaxed load is used to calculate the address of the immediate - // following store; - // 2. The relaxed load is used as a condition in the immediate following - // condition, and there are no stores in between. This is actually quite - // common. E.g., - // int r1 = x.load(relaxed); - // if (r1 != 0) { - // y.store(1, relaxed); - // } - // However, in this function, we don't deal with them directly. Instead, we - // just find the immediate following store/condition branch and return it. - - auto* BB = LI->getParent(); - auto BE = BB->end(); - auto BBI = BasicBlock::iterator(LI); - BBI++; - while (true) { - for (; BBI != BE; BBI++) { - auto* Inst = dyn_cast(&*BBI); - if (Inst == nullptr) { - continue; - } - if (Inst->getOpcode() == Instruction::Store) { - return Inst; - } else if (Inst->getOpcode() == Instruction::Br) { - auto* BrInst = dyn_cast(Inst); - if (BrInst->isConditional()) { - return Inst; - } else { - // Reinitialize iterators with the destination of the unconditional - // branch. - BB = BrInst->getSuccessor(0); - BBI = BB->begin(); - BE = BB->end(); - break; - } - } - } - if (BBI == BE) { - return nullptr; - } - } -} - -// XXX-comment: Returns whether the code has been changed. -bool taintMonotonicLoads(const SmallVector& MonotonicLoadInsts) { - bool Changed = false; - for (auto* LI : MonotonicLoadInsts) { - auto* FirstInst = findFirstStoreCondBranchInst(LI); - if (FirstInst == nullptr) { - // We don't seem to be able to taint a following store/conditional branch - // instruction. Simply make it acquire. - DEBUG(dbgs() << "[RelaxedLoad]: Transformed to acquire load\n" - << *LI << "\n"); - LI->setOrdering(Acquire); - Changed = true; - continue; - } - // Taint 'FirstInst', which could be a store or a condition branch - // instruction. - if (FirstInst->getOpcode() == Instruction::Store) { - Changed |= taintStoreAddress(dyn_cast(FirstInst), LI); - } else if (FirstInst->getOpcode() == Instruction::Br) { - Changed |= taintConditionalBranch(dyn_cast(FirstInst), LI); - } else { - assert(false && "findFirstStoreCondBranchInst() should return a " - "store/condition branch instruction"); - } - } - return Changed; -} - -/**** Implementations of public methods for dependence tainting ****/ -Value* GetUntaintedAddress(Value* CurrentAddress) { - auto* OrAddress = getOrAddress(CurrentAddress); - if (OrAddress == nullptr) { - // Is it tainted by a select instruction? - auto* Inst = dyn_cast(CurrentAddress); - if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) { - // A selection instruction. - if (Inst->getOperand(1) == Inst->getOperand(2)) { - return Inst->getOperand(1); - } - } - - return CurrentAddress; - } - Value* ActualAddress = nullptr; - - auto* CastToInt = dyn_cast(OrAddress->getOperand(1)); - if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) { - return CastToInt->getOperand(0); - } else { - // This should be a IntToPtr constant expression. - ConstantExpr* PtrToIntExpr = - dyn_cast(OrAddress->getOperand(1)); - if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) { - return PtrToIntExpr->getOperand(0); - } - } - - // Looks like it's not been dependence-tainted. Returns itself. - return CurrentAddress; -} - -MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) { - AAMDNodes AATags; - SI->getAAMetadata(AATags); - const auto& DL = SI->getModule()->getDataLayout(); - const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand()); - DEBUG(if (OriginalAddr != SI->getPointerOperand()) { - dbgs() << "[GetUntaintedMemoryLocation]\n" - << "Storing address: " << *SI->getPointerOperand() - << "\nUntainted address: " << *OriginalAddr << "\n"; - }); - return MemoryLocation(OriginalAddr, - DL.getTypeStoreSize(SI->getValueOperand()->getType()), - AATags); -} - -bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) { - if (dependenceSetInclusion(SI, DepVal)) { - return false; - } - - bool tainted = taintStoreAddress(SI, DepVal); - assert(tainted); - return tainted; -} - -bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) { - if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) { - return false; - } - - bool tainted = taintStoreAddress(SI, DepVal); - assert(tainted); - return tainted; -} - -bool CompressTaintedStore(BasicBlock* BB) { - // This function looks for windows of adajcent stores in 'BB' that satisfy the - // following condition (and then do optimization): - // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's - // address depends on && Dep(v1) includes Dep(d1); - // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's - // address depends on && Dep(v2) includes Dep(d2) && - // Dep(d2) includes Dep(d1); - // ... - // *Addr(dN) = vN, dN is a condition and is the only dependence the store's - // address depends on && Dep(dN) includes Dep(d"N-1"). - // - // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can - // safely transform the above to the following. In between these stores, we - // can omit untainted stores to the same address 'Addr' since they internally - // have dependence on the previous stores on the same address. - // => - // *Addr = v1 - // *Addr = v2 - // *Addr(d3) = v3 - for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) { - // Look for the first store in such a window of adajacent stores. - auto* FirstSI = dyn_cast(&*BI); - if (!FirstSI) { - continue; - } - - // The first store in the window must be tainted. - auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand()); - if (UntaintedAddress == FirstSI->getPointerOperand()) { - continue; - } - - // The first store's address must directly depend on and only depend on a - // condition. - auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand()); - if (nullptr == FirstSIDepCond) { - continue; - } - - // Dep(first store's storing value) includes Dep(tainted dependence). - if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) { - continue; - } - - // Look for subsequent stores to the same address that satisfy the condition - // of "compressing the dependence". - SmallVector AdajacentStores; - AdajacentStores.push_back(FirstSI); - auto BII = BasicBlock::iterator(FirstSI); - for (BII++; BII != BE; BII++) { - auto* CurrSI = dyn_cast(&*BII); - if (!CurrSI) { - if (BII->mayHaveSideEffects()) { - // Be conservative. Instructions with side effects are similar to - // stores. - break; - } - continue; - } - - auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand()); - auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand()); - // All other stores must satisfy either: - // A. 'CurrSI' is an untainted store to the same address, or - // B. the combination of the following 5 subconditions: - // 1. Tainted; - // 2. Untainted address is the same as the group's address; - // 3. The address is tainted with a sole value which is a condition; - // 4. The storing value depends on the condition in 3. - // 5. The condition in 3 depends on the previous stores dependence - // condition. - - // Condition A. Should ignore this store directly. - if (OrigAddress == CurrSI->getPointerOperand() && - OrigAddress == UntaintedAddress) { - continue; - } - // Check condition B. - Value* Cond = nullptr; - if (OrigAddress == CurrSI->getPointerOperand() || - OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr || - !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) { - // Check condition 1, 2, 3 & 4. - break; - } - - // Check condition 5. - StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1]; - auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand()); - assert(PrevSIDepCond && - "Store in the group must already depend on a condtion"); - if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) { - break; - } - - AdajacentStores.push_back(CurrSI); - } - - if (AdajacentStores.size() == 1) { - // The outer loop should keep looking from the next store. - continue; - } - - // Now we have such a group of tainted stores to the same address. - DEBUG(dbgs() << "[CompressTaintedStore]\n"); - DEBUG(dbgs() << "Original BB\n"); - DEBUG(dbgs() << *BB << '\n'); - auto* LastSI = AdajacentStores[AdajacentStores.size() - 1]; - for (int i = 0; i < AdajacentStores.size() - 1; ++i) { - auto* SI = AdajacentStores[i]; - - // Use the original address for stores before the last one. - SI->setOperand(1, UntaintedAddress); - - DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';); - } - // XXX-comment: Try to make the last store use fewer registers. - // If LastSI's storing value is a select based on the condition with which - // its address is tainted, transform the tainted address to a select - // instruction, as follows: - // r1 = Select Cond ? A : B - // r2 = Cond & 0 - // r3 = Addr | r2 - // *r3 = r1 - // ==> - // r1 = Select Cond ? A : B - // r2 = Select Cond ? Addr : Addr - // *r2 = r1 - // The idea is that both Select instructions depend on the same condition, - // so hopefully the backend can generate two cmov instructions for them (and - // this saves the number of registers needed). - auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand()); - auto* LastSIValue = dyn_cast(LastSI->getValueOperand()); - if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select && - LastSIValue->getOperand(0) == LastSIDep) { - // XXX-comment: Maybe it's better for us to just leave it as an and/or - // dependence pattern. - // /* - IRBuilder Builder(LastSI); - auto* Address = - Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress); - LastSI->setOperand(1, Address); - DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";); - // */ - } - } - - return true; -} - -bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) { - Value* OldDep = getDependence(OldAddress); - // Return false when there's no dependence to pass from the OldAddress. - if (!OldDep) { - return false; - } - - // No need to pass the dependence to NewStore's address if it already depends - // on whatever 'OldAddress' depends on. - if (StoreAddressDependOnValue(NewStore, OldDep)) { - return false; - } - return taintStoreAddress(NewStore, OldAddress); -} - -SmallSet FindDependence(Value* Val) { - SmallSet DepSet; - recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/); - return DepSet; -} - -bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) { - return dependenceSetInclusion(SI->getPointerOperand(), DepVal); -} - -bool StoreDependOnValue(StoreInst* SI, Value* Dep) { - return dependenceSetInclusion(SI, Dep); -} - -} // namespace - - bool AtomicExpand::runOnFunction(Function &F) { if (!TM || !TM->getSubtargetImpl(F)->enableAtomicExpand()) return false; @@ -916,7 +159,7 @@ bool AtomicExpand::runOnFunction(Function &F) { if (TLI->getInsertFencesForAtomic()) { if (LI && isAtLeastAcquire(LI->getOrdering())) { FenceOrdering = LI->getOrdering(); - LI->setOrdering(Monotonic); +// AddFakeConditionalBranch( IsStore = false; IsLoad = true; } else if (SI && isAtLeastRelease(SI->getOrdering())) { @@ -985,8 +228,6 @@ bool AtomicExpand::runOnFunction(Function &F) { } } - MadeChange |= taintMonotonicLoads(MonotonicLoadInsts); - return MadeChange; } diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index df5cac5a9f7..e0b7d73339d 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -1254,6 +1254,7 @@ ReoptimizeBlock: goto ReoptimizeBlock; } + /* // If the previous block unconditionally falls through to this block and // this block has no other predecessors, move the contents of this block // into the prior block. This doesn't usually happen when SimplifyCFG @@ -1289,10 +1290,20 @@ ReoptimizeBlock: MadeChange = true; return MadeChange; } + */ // If the previous branch *only* branches to *this* block (conditional or // not) remove the branch. if (PriorTBB == MBB && !PriorFBB) { + // XXX-disabled: Don't fold conditional branches that we added + // intentionally. + MachineBasicBlock::iterator I = PrevBB.getLastNonDebugInstr(); + if (I != PrevBB.end()) { + if (I->isConditionalBranch()) { + return MadeChange ; + } + } + TII->RemoveBranch(PrevBB); MadeChange = true; ++NumBranchOpts; diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp index c8007a524e7..9ada111eaa9 100644 --- a/lib/CodeGen/CodeGenPrepare.cpp +++ b/lib/CodeGen/CodeGenPrepare.cpp @@ -15,9 +15,13 @@ #include "llvm/CodeGen/Passes.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" @@ -30,9 +34,12 @@ #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InlineAsm.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/MDBuilder.h" +#include "llvm/IR/NoFolder.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Statepoint.h" #include "llvm/IR/ValueHandle.h" @@ -202,13 +209,858 @@ FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { return new CodeGenPrepare(TM); } +namespace { + +bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal); +Value* GetUntaintedAddress(Value* CurrentAddress); + +// The depth we trace down a variable to look for its dependence set. +const unsigned kDependenceDepth = 4; + +// Recursively looks for variables that 'Val' depends on at the given depth +// 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only +// inserts the leaf node values; otherwise, all visited nodes are included in +// 'DepSet'. Note that constants will be ignored. +template +void recursivelyFindDependence(SetType* DepSet, Value* Val, + bool InsertOnlyLeafNodes = false, + unsigned Depth = kDependenceDepth) { + if (Val == nullptr) { + return; + } + if (!InsertOnlyLeafNodes && !isa(Val)) { + DepSet->insert(Val); + } + if (Depth == 0) { + // Cannot go deeper. Insert the leaf nodes. + if (InsertOnlyLeafNodes && !isa(Val)) { + DepSet->insert(Val); + } + return; + } + + // Go one step further to explore the dependence of the operands. + Instruction* I = nullptr; + if ((I = dyn_cast(Val))) { + if (isa(I)) { + // A load is considerd the leaf load of the dependence tree. Done. + DepSet->insert(Val); + return; + } else if (I->isBinaryOp()) { + BinaryOperator* I = dyn_cast(Val); + Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1); + recursivelyFindDependence(DepSet, Op0, Depth - 1); + recursivelyFindDependence(DepSet, Op1, Depth - 1); + } else if (I->isCast()) { + Value* Op0 = I->getOperand(0); + recursivelyFindDependence(DepSet, Op0, Depth - 1); + } else if (I->getOpcode() == Instruction::Select) { + Value* Op0 = I->getOperand(0); + Value* Op1 = I->getOperand(1); + Value* Op2 = I->getOperand(2); + recursivelyFindDependence(DepSet, Op0, Depth - 1); + recursivelyFindDependence(DepSet, Op1, Depth - 1); + recursivelyFindDependence(DepSet, Op2, Depth - 1); + } else if (I->getOpcode() == Instruction::GetElementPtr) { + for (unsigned i = 0; i < I->getNumOperands(); i++) { + recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1); + } + } else if (I->getOpcode() == Instruction::Store) { + auto* SI = dyn_cast(Val); + recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1); + recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1); + } else { + Value* Op0 = nullptr; + Value* Op1 = nullptr; + switch (I->getOpcode()) { + case Instruction::ICmp: + case Instruction::FCmp: { + Op0 = I->getOperand(0); + Op1 = I->getOperand(1); + recursivelyFindDependence(DepSet, Op0, Depth - 1); + recursivelyFindDependence(DepSet, Op1, Depth - 1); + break; + } + default: { + // Be conservative. Add it and be done with it. + DepSet->insert(Val); + return; + } + } + } + } else if (isa(Val)) { + // Not interested in constant values. Done. + return; + } else { + // Be conservative. Add it and be done with it. + DepSet->insert(Val); + return; + } +} + +// Helper function to create a Cast instruction. +Value* createCast(IRBuilder& Builder, Value* DepVal, + Type* TargetIntegerType) { + Instruction::CastOps CastOp = Instruction::BitCast; + switch (DepVal->getType()->getTypeID()) { + case Type::IntegerTyID: { + CastOp = Instruction::SExt; + break; + } + case Type::FloatTyID: + case Type::DoubleTyID: { + CastOp = Instruction::FPToSI; + break; + } + case Type::PointerTyID: { + CastOp = Instruction::PtrToInt; + break; + } + default: { break; } + } + + return Builder.CreateCast(CastOp, DepVal, TargetIntegerType); +} + +// Given a value, if it's a tainted address, this function returns the +// instruction that ORs the "dependence value" with the "original address". +// Otherwise, returns nullptr. This instruction is the first OR instruction +// where one of its operand is an AND instruction with an operand being 0. +// +// E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'. +// %0 = load i32, i32* @y, align 4, !tbaa !1 +// %cmp = icmp ne i32 %0, 42 // <== this is like the condition +// %1 = sext i1 %cmp to i32 +// %2 = ptrtoint i32* @x to i32 +// %3 = and i32 %1, 0 +// %4 = or i32 %3, %2 +// %5 = inttoptr i32 %4 to i32* +// store i32 1, i32* %5, align 4 +Instruction* getOrAddress(Value* CurrentAddress) { + // Is it a cast from integer to pointer type. + Instruction* OrAddress = nullptr; + Instruction* AndDep = nullptr; + Instruction* CastToInt = nullptr; + Value* ActualAddress = nullptr; + Constant* ZeroConst = nullptr; + + const Instruction* CastToPtr = dyn_cast(CurrentAddress); + if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) { + // Is it an OR instruction: %1 = or %and, %actualAddress. + if ((OrAddress = dyn_cast(CastToPtr->getOperand(0))) && + OrAddress->getOpcode() == Instruction::Or) { + // The first operand should be and AND instruction. + AndDep = dyn_cast(OrAddress->getOperand(0)); + if (AndDep && AndDep->getOpcode() == Instruction::And) { + // Also make sure its first operand of the "AND" is 0, or the "AND" is + // marked explicitly by "NoInstCombine". + if ((ZeroConst = dyn_cast(AndDep->getOperand(1))) && + ZeroConst->isNullValue()) { + return OrAddress; + } + } + } + } + // Looks like it's not been tainted. + return nullptr; +} + +// Given a value, if it's a tainted address, this function returns the +// instruction that taints the "dependence value". Otherwise, returns nullptr. +// This instruction is the last AND instruction where one of its operand is 0. +// E.g., it returns '%3' given 'CurrentAddress' is '%5'. +// %0 = load i32, i32* @y, align 4, !tbaa !1 +// %cmp = icmp ne i32 %0, 42 // <== this is like the condition +// %1 = sext i1 %cmp to i32 +// %2 = ptrtoint i32* @x to i32 +// %3 = and i32 %1, 0 +// %4 = or i32 %3, %2 +// %5 = inttoptr i32 %4 to i32* +// store i32 1, i32* %5, align 4 +Instruction* getAndDependence(Value* CurrentAddress) { + // If 'CurrentAddress' is tainted, get the OR instruction. + auto* OrAddress = getOrAddress(CurrentAddress); + if (OrAddress == nullptr) { + return nullptr; + } + + // No need to check the operands. + auto* AndDepInst = dyn_cast(OrAddress->getOperand(0)); + assert(AndDepInst); + return AndDepInst; +} + +// Given a value, if it's a tainted address, this function returns +// the "dependence value", which is the first operand in the AND instruction. +// E.g., it returns '%1' given 'CurrentAddress' is '%5'. +// %0 = load i32, i32* @y, align 4, !tbaa !1 +// %cmp = icmp ne i32 %0, 42 // <== this is like the condition +// %1 = sext i1 %cmp to i32 +// %2 = ptrtoint i32* @x to i32 +// %3 = and i32 %1, 0 +// %4 = or i32 %3, %2 +// %5 = inttoptr i32 %4 to i32* +// store i32 1, i32* %5, align 4 +Value* getDependence(Value* CurrentAddress) { + auto* AndInst = getAndDependence(CurrentAddress); + if (AndInst == nullptr) { + return nullptr; + } + return AndInst->getOperand(0); +} + +// Given an address that has been tainted, returns the only condition it depends +// on, if any; otherwise, returns nullptr. +Value* getConditionDependence(Value* Address) { + auto* Dep = getDependence(Address); + if (Dep == nullptr) { + // 'Address' has not been dependence-tainted. + return nullptr; + } + + Value* Operand = Dep; + while (true) { + auto* Inst = dyn_cast(Operand); + if (Inst == nullptr) { + // Non-instruction type does not have condition dependence. + return nullptr; + } + if (Inst->getOpcode() == Instruction::ICmp) { + return Inst; + } else { + if (Inst->getNumOperands() != 1) { + return nullptr; + } else { + Operand = Inst->getOperand(0); + } + } + } +} + +// Conservatively decides whether the dependence set of 'Val1' includes the +// dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand +// 'Val2' and use that single value as its dependence set. +// If it returns true, it means the dependence set of 'Val1' includes that of +// 'Val2'; otherwise, it only means we cannot conclusively decide it. +bool dependenceSetInclusion(Value* Val1, Value* Val2, + int Val1ExpandLevel = 2 * kDependenceDepth, + int Val2ExpandLevel = kDependenceDepth) { + typedef SmallSet IncludingSet; + typedef SmallSet IncludedSet; + + IncludingSet DepSet1; + IncludedSet DepSet2; + // Look for more depths for the including set. + recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/, + Val1ExpandLevel); + recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/, + Val2ExpandLevel); + + auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) { + for (auto* Dep : Subset) { + if (0 == FullSet.count(Dep)) { + return false; + } + } + return true; + }; + bool inclusion = set_inclusion(DepSet1, DepSet2); + DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n"); + DEBUG(dbgs() << "Including set for: " << *Val1 << "\n"); + DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; }); + DEBUG(dbgs() << "Included set for: " << *Val2 << "\n"); + DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; }); + + return inclusion; +} + +// Recursively iterates through the operands spawned from 'DepVal'. If there +// exists a single value that 'DepVal' only depends on, we call that value the +// root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'. +Value* getRootDependence(Value* DepVal) { + SmallSet DepSet; + for (unsigned depth = kDependenceDepth; depth > 0; --depth) { + recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/, + depth); + if (DepSet.size() == 1) { + return *DepSet.begin(); + } + DepSet.clear(); + } + return DepVal; +} + +// This function actually taints 'DepVal' to the address to 'SI'. If the +// address +// of 'SI' already depends on whatever 'DepVal' depends on, this function +// doesn't do anything and returns false. Otherwise, returns true. +// +// This effect forces the store and any stores that comes later to depend on +// 'DepVal'. For example, we have a condition "cond", and a store instruction +// "s: STORE addr, val". If we want "s" (and any later store) to depend on +// "cond", we do the following: +// %conv = sext i1 %cond to i32 +// %addrVal = ptrtoint i32* %addr to i32 +// %andCond = and i32 conv, 0; +// %orAddr = or i32 %andCond, %addrVal; +// %NewAddr = inttoptr i32 %orAddr to i32*; +// +// This is a more concrete example: +// ------ +// %0 = load i32, i32* @y, align 4, !tbaa !1 +// %cmp = icmp ne i32 %0, 42 // <== this is like the condition +// %1 = sext i1 %cmp to i32 +// %2 = ptrtoint i32* @x to i32 +// %3 = and i32 %1, 0 +// %4 = or i32 %3, %2 +// %5 = inttoptr i32 %4 to i32* +// store i32 1, i32* %5, align 4 +bool taintStoreAddress(StoreInst* SI, Value* DepVal, + const char* calling_func = __builtin_FUNCTION()) { + DEBUG(dbgs() << "Called from " << calling_func << '\n'); + IRBuilder Builder(SI); + BasicBlock* BB = SI->getParent(); + Value* Address = SI->getPointerOperand(); + Type* TargetIntegerType = + IntegerType::get(Address->getContext(), + BB->getModule()->getDataLayout().getPointerSizeInBits()); + + // Does SI's address already depends on whatever 'DepVal' depends on? + if (StoreAddressDependOnValue(SI, DepVal)) { + return false; + } + + // Figure out if there's a root variable 'DepVal' depends on. For example, we + // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123" + // to be "%struct* %0" since all other operands are constant. + DepVal = getRootDependence(DepVal); + + // Is this already a dependence-tainted store? + Value* OldDep = getDependence(Address); + if (OldDep) { + // The address of 'SI' has already been tainted. Just need to absorb the + // DepVal to the existing dependence in the address of SI. + Instruction* AndDep = getAndDependence(Address); + IRBuilder Builder(AndDep); + Value* NewDep = nullptr; + if (DepVal->getType() == AndDep->getType()) { + NewDep = Builder.CreateAnd(OldDep, DepVal); + } else { + NewDep = Builder.CreateAnd( + OldDep, createCast(Builder, DepVal, TargetIntegerType)); + } + + auto* NewDepInst = dyn_cast(NewDep); + + // Use the new AND instruction as the dependence + AndDep->setOperand(0, NewDep); + return true; + } + + // SI's address has not been tainted. Now taint it with 'DepVal'. + Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType); + Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType); + Value* AndDepVal = + Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0)); + auto AndInst = dyn_cast(AndDepVal); + // XXX-comment: The original IR InstCombiner would change our and instruction + // to a select and then the back end optimize the condition out. We attach a + // flag to instructions and set it here to inform the InstCombiner to not to + // touch this and instruction at all. + Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast); + Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType()); + + DEBUG(dbgs() << "[taintStoreAddress]\n" + << "Original store: " << *SI << '\n'); + SI->setOperand(1, NewAddr); + + // Debug output. + DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n' + << "\tCast dependence value to integer: " << *CastDepToInt + << '\n' + << "\tCast address to integer: " << *PtrToIntCast << '\n' + << "\tAnd dependence value: " << *AndDepVal << '\n' + << "\tOr address: " << *OrAddr << '\n' + << "\tCast or instruction to address: " << *NewAddr << "\n\n"); + + return true; +} + +// Looks for the previous store in the if block --- 'BrBB', which makes the +// speculative store 'StoreToHoist' safe. +Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) { + assert(StoreToHoist && "StoreToHoist must be a real store"); + + Value* StorePtr = StoreToHoist->getPointerOperand(); + + // Look for a store to the same pointer in BrBB. + for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend(); + RI != RE; ++RI) { + Instruction* CurI = &*RI; + + StoreInst* SI = dyn_cast(CurI); + // Found the previous store make sure it stores to the same location. + // XXX-update: If the previous store's original untainted address are the + // same as 'StorePtr', we are also good to hoist the store. + if (SI && (SI->getPointerOperand() == StorePtr || + GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) { + // Found the previous store, return its value operand. + return SI; + } + } + + assert(false && + "We should not reach here since this store is safe to speculate"); +} + +// XXX-comment: Returns true if it changes the code, false otherwise (the branch +// condition already depends on 'DepVal'. +bool taintConditionalBranch(BranchInst* BI, Value* DepVal) { + assert(BI->isConditional()); + auto* Cond = BI->getOperand(0); + if (dependenceSetInclusion(Cond, DepVal)) { + // The dependence/ordering is self-evident. + return false; + } + + IRBuilder Builder(BI); + auto* AndDep = + Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0)); + auto* TruncAndDep = + Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1)); + auto* OrCond = Builder.CreateOr(TruncAndDep, Cond); + BI->setOperand(0, OrCond); + + // Debug output. + DEBUG(dbgs() << "\tTainted branch condition:\n" << *BI->getParent()); + + return true; +} + +bool ConditionalBranchDependsOnValue(BranchInst* BI, Value* DepVal) { + assert(BI->isConditional()); + auto* Cond = BI->getOperand(0); + return dependenceSetInclusion(Cond, DepVal); +} + +// XXX-update: For a relaxed load 'LI', find the first immediate atomic store or +// the first conditional branch. Returns nullptr if there's no such immediately +// following store/branch instructions, which we can only enforce the load with +// 'acquire'. +Instruction* findFirstStoreCondBranchInst(LoadInst* LI) { + // In some situations, relaxed loads can be left as is: + // 1. The relaxed load is used to calculate the address of the immediate + // following store; + // 2. The relaxed load is used as a condition in the immediate following + // condition, and there are no stores in between. This is actually quite + // common. E.g., + // int r1 = x.load(relaxed); + // if (r1 != 0) { + // y.store(1, relaxed); + // } + // However, in this function, we don't deal with them directly. Instead, we + // just find the immediate following store/condition branch and return it. + + auto* BB = LI->getParent(); + auto BE = BB->end(); + auto BBI = BasicBlock::iterator(LI); + BBI++; + while (true) { + for (; BBI != BE; BBI++) { + auto* Inst = dyn_cast(&*BBI); + if (Inst == nullptr) { + continue; + } + if (Inst->getOpcode() == Instruction::Store) { + return Inst; + } else if (Inst->getOpcode() == Instruction::Br) { + auto* BrInst = dyn_cast(Inst); + if (BrInst->isConditional()) { + return Inst; + } else { + // Reinitialize iterators with the destination of the unconditional + // branch. + BB = BrInst->getSuccessor(0); + BBI = BB->begin(); + BE = BB->end(); + break; + } + } + } + if (BBI == BE) { + return nullptr; + } + } +} + +// XXX-comment: Returns whether the code has been changed. +bool taintMonotonicLoads(const SmallVector& MonotonicLoadInsts) { + bool Changed = false; + for (auto* LI : MonotonicLoadInsts) { + auto* FirstInst = findFirstStoreCondBranchInst(LI); + if (FirstInst == nullptr) { + // We don't seem to be able to taint a following store/conditional branch + // instruction. Simply make it acquire. + DEBUG(dbgs() << "[RelaxedLoad]: Transformed to acquire load\n" + << *LI << "\n"); + LI->setOrdering(Acquire); + Changed = true; + continue; + } + // Taint 'FirstInst', which could be a store or a condition branch + // instruction. + if (FirstInst->getOpcode() == Instruction::Store) { + Changed |= taintStoreAddress(dyn_cast(FirstInst), LI); + } else if (FirstInst->getOpcode() == Instruction::Br) { + Changed |= taintConditionalBranch(dyn_cast(FirstInst), LI); + } else { + assert(false && "findFirstStoreCondBranchInst() should return a " + "store/condition branch instruction"); + } + } + return Changed; +} + +// Inserts a fake conditional branch right after the instruction 'SplitInst', +// and the branch condition is 'Condition'. 'SplitInst' will be placed in the +// newly created block. +void AddFakeConditionalBranch(Instruction* SplitInst, Value* Condition) { + auto* BB = SplitInst->getParent(); + TerminatorInst* ThenTerm = nullptr; + TerminatorInst* ElseTerm = nullptr; + SplitBlockAndInsertIfThenElse(Condition, SplitInst, &ThenTerm, &ElseTerm); + auto* ThenBB = ThenTerm->getParent(); + auto* ElseBB = ElseTerm->getParent(); + ThenBB->disableCanEliminateBlock(); + ThenBB->disableCanEliminateBlock(); + ThenBB->setName(BB->getName() + "Then.Fake"); + ElseBB->setName(BB->getName() + "Else.Fake"); + DEBUG(dbgs() << "Add fake conditional branch:\n" + << "Then Block:\n" + << *ThenBB << "Else Block:\n" + << *ElseBB << "\n"); +} + +// Returns true if the code is changed, and false otherwise. +void TaintRelaxedLoads(LoadInst* LI) { + IRBuilder Builder(LI->getNextNode()); + auto* FakeCondition = dyn_cast(Builder.CreateICmp( + CmpInst::ICMP_EQ, LI, Constant::getNullValue(LI->getType()))); + AddFakeConditionalBranch(FakeCondition->getNextNode(), FakeCondition); +} + +// XXX-comment: Returns whether the code has been changed. +bool AddsFakeConditionalBranchAfterMonotonicLoads( + const SmallVector& MonotonicLoadInsts) { + bool Changed = false; + for (auto* LI : MonotonicLoadInsts) { + auto* FirstInst = findFirstStoreCondBranchInst(LI); + if (FirstInst != nullptr) { + if (FirstInst->getOpcode() == Instruction::Store) { + if (StoreAddressDependOnValue(dyn_cast(FirstInst), LI)) { + continue; + } + } else if (FirstInst->getOpcode() == Instruction::Br) { + if (ConditionalBranchDependsOnValue(dyn_cast(FirstInst), + LI)) { + continue; + } + } else { + dbgs() << "FirstInst=" << *FirstInst << "\n"; + assert(false && "findFirstStoreCondBranchInst() should return a " + "store/condition branch instruction"); + } + } + + // We really need to process the relaxed load now. + TaintRelaxedLoads(LI); + Changed = true; + } + return Changed; +} + +/**** Implementations of public methods for dependence tainting ****/ +Value* GetUntaintedAddress(Value* CurrentAddress) { + auto* OrAddress = getOrAddress(CurrentAddress); + if (OrAddress == nullptr) { + // Is it tainted by a select instruction? + auto* Inst = dyn_cast(CurrentAddress); + if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) { + // A selection instruction. + if (Inst->getOperand(1) == Inst->getOperand(2)) { + return Inst->getOperand(1); + } + } + + return CurrentAddress; + } + Value* ActualAddress = nullptr; + + auto* CastToInt = dyn_cast(OrAddress->getOperand(1)); + if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) { + return CastToInt->getOperand(0); + } else { + // This should be a IntToPtr constant expression. + ConstantExpr* PtrToIntExpr = + dyn_cast(OrAddress->getOperand(1)); + if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) { + return PtrToIntExpr->getOperand(0); + } + } + + // Looks like it's not been dependence-tainted. Returns itself. + return CurrentAddress; +} + +MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) { + AAMDNodes AATags; + SI->getAAMetadata(AATags); + const auto& DL = SI->getModule()->getDataLayout(); + const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand()); + DEBUG(if (OriginalAddr != SI->getPointerOperand()) { + dbgs() << "[GetUntaintedMemoryLocation]\n" + << "Storing address: " << *SI->getPointerOperand() + << "\nUntainted address: " << *OriginalAddr << "\n"; + }); + return MemoryLocation(OriginalAddr, + DL.getTypeStoreSize(SI->getValueOperand()->getType()), + AATags); +} + +bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) { + if (dependenceSetInclusion(SI, DepVal)) { + return false; + } + + bool tainted = taintStoreAddress(SI, DepVal); + assert(tainted); + return tainted; +} + +bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) { + if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) { + return false; + } + + bool tainted = taintStoreAddress(SI, DepVal); + assert(tainted); + return tainted; +} + +bool CompressTaintedStore(BasicBlock* BB) { + // This function looks for windows of adajcent stores in 'BB' that satisfy the + // following condition (and then do optimization): + // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's + // address depends on && Dep(v1) includes Dep(d1); + // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's + // address depends on && Dep(v2) includes Dep(d2) && + // Dep(d2) includes Dep(d1); + // ... + // *Addr(dN) = vN, dN is a condition and is the only dependence the store's + // address depends on && Dep(dN) includes Dep(d"N-1"). + // + // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can + // safely transform the above to the following. In between these stores, we + // can omit untainted stores to the same address 'Addr' since they internally + // have dependence on the previous stores on the same address. + // => + // *Addr = v1 + // *Addr = v2 + // *Addr(d3) = v3 + for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) { + // Look for the first store in such a window of adajacent stores. + auto* FirstSI = dyn_cast(&*BI); + if (!FirstSI) { + continue; + } + + // The first store in the window must be tainted. + auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand()); + if (UntaintedAddress == FirstSI->getPointerOperand()) { + continue; + } + + // The first store's address must directly depend on and only depend on a + // condition. + auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand()); + if (nullptr == FirstSIDepCond) { + continue; + } + + // Dep(first store's storing value) includes Dep(tainted dependence). + if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) { + continue; + } + + // Look for subsequent stores to the same address that satisfy the condition + // of "compressing the dependence". + SmallVector AdajacentStores; + AdajacentStores.push_back(FirstSI); + auto BII = BasicBlock::iterator(FirstSI); + for (BII++; BII != BE; BII++) { + auto* CurrSI = dyn_cast(&*BII); + if (!CurrSI) { + if (BII->mayHaveSideEffects()) { + // Be conservative. Instructions with side effects are similar to + // stores. + break; + } + continue; + } + + auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand()); + auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand()); + // All other stores must satisfy either: + // A. 'CurrSI' is an untainted store to the same address, or + // B. the combination of the following 5 subconditions: + // 1. Tainted; + // 2. Untainted address is the same as the group's address; + // 3. The address is tainted with a sole value which is a condition; + // 4. The storing value depends on the condition in 3. + // 5. The condition in 3 depends on the previous stores dependence + // condition. + + // Condition A. Should ignore this store directly. + if (OrigAddress == CurrSI->getPointerOperand() && + OrigAddress == UntaintedAddress) { + continue; + } + // Check condition B. + Value* Cond = nullptr; + if (OrigAddress == CurrSI->getPointerOperand() || + OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr || + !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) { + // Check condition 1, 2, 3 & 4. + break; + } + + // Check condition 5. + StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1]; + auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand()); + assert(PrevSIDepCond && + "Store in the group must already depend on a condtion"); + if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) { + break; + } + + AdajacentStores.push_back(CurrSI); + } + + if (AdajacentStores.size() == 1) { + // The outer loop should keep looking from the next store. + continue; + } + + // Now we have such a group of tainted stores to the same address. + DEBUG(dbgs() << "[CompressTaintedStore]\n"); + DEBUG(dbgs() << "Original BB\n"); + DEBUG(dbgs() << *BB << '\n'); + auto* LastSI = AdajacentStores[AdajacentStores.size() - 1]; + for (unsigned i = 0; i < AdajacentStores.size() - 1; ++i) { + auto* SI = AdajacentStores[i]; + + // Use the original address for stores before the last one. + SI->setOperand(1, UntaintedAddress); + + DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';); + } + // XXX-comment: Try to make the last store use fewer registers. + // If LastSI's storing value is a select based on the condition with which + // its address is tainted, transform the tainted address to a select + // instruction, as follows: + // r1 = Select Cond ? A : B + // r2 = Cond & 0 + // r3 = Addr | r2 + // *r3 = r1 + // ==> + // r1 = Select Cond ? A : B + // r2 = Select Cond ? Addr : Addr + // *r2 = r1 + // The idea is that both Select instructions depend on the same condition, + // so hopefully the backend can generate two cmov instructions for them (and + // this saves the number of registers needed). + auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand()); + auto* LastSIValue = dyn_cast(LastSI->getValueOperand()); + if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select && + LastSIValue->getOperand(0) == LastSIDep) { + // XXX-comment: Maybe it's better for us to just leave it as an and/or + // dependence pattern. + // /* + IRBuilder Builder(LastSI); + auto* Address = + Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress); + LastSI->setOperand(1, Address); + DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";); + // */ + } + } + + return true; +} + +bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) { + Value* OldDep = getDependence(OldAddress); + // Return false when there's no dependence to pass from the OldAddress. + if (!OldDep) { + return false; + } + + // No need to pass the dependence to NewStore's address if it already depends + // on whatever 'OldAddress' depends on. + if (StoreAddressDependOnValue(NewStore, OldDep)) { + return false; + } + return taintStoreAddress(NewStore, OldAddress); +} + +SmallSet FindDependence(Value* Val) { + SmallSet DepSet; + recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/); + return DepSet; +} + +bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) { + return dependenceSetInclusion(SI->getPointerOperand(), DepVal); +} + +bool StoreDependOnValue(StoreInst* SI, Value* Dep) { + return dependenceSetInclusion(SI, Dep); +} + +} // namespace + + + bool CodeGenPrepare::runOnFunction(Function &F) { + // XXX-comment: Delay dealing with relaxed loads in this function to avoid + // further changes done by other passes (e.g., SimplifyCFG). + + // Collect all the relaxed loads. + SmallVector MonotonicLoadInsts; + for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) { + if (I->isAtomic()) { + switch (I->getOpcode()) { + case Instruction::Load: { + auto* LI = dyn_cast(&*I); + if (LI->getOrdering() == Monotonic) { + MonotonicLoadInsts.push_back(LI); + } + break; + } + default: { + break; + } + } + } + } + bool EverMadeChange = + AddsFakeConditionalBranchAfterMonotonicLoads(MonotonicLoadInsts); + if (skipOptnoneFunction(F)) return false; DL = &F.getParent()->getDataLayout(); - bool EverMadeChange = false; // Clear per function information. InsertedInsts.clear(); PromotedInsts.clear(); @@ -363,6 +1215,10 @@ bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { // Note that this intentionally skips the entry block. for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { BasicBlock *BB = &*I++; + // XXX-disabled: Do not eliminate the added fake basic block. + if (!BB->getCanEliminateBlock()) { + continue; + } // If this block doesn't end with an uncond branch, ignore it. BranchInst *BI = dyn_cast(BB->getTerminator()); diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index f5e30564501..4beaa7f3964 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -1271,7 +1271,10 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { // bool needUpdateBr = true; if (!Cond.empty() && (!FBB || FBB == ChainBB)) { + // XXX-disabled: Don't bother with added fake conditional branches. + /* PrevBB->updateTerminator(); + */ needUpdateBr = false; Cond.clear(); TBB = FBB = nullptr; diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp index b2090ab4ea0..8cc3ff22725 100644 --- a/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -218,7 +218,7 @@ void AArch64PassConfig::addIRPasses() { addPass(createAtomicExpandPass(TM)); // XXX-update: Immediate add -licm pass after atomic expand pass to deal with // loop invariants introduced mannually. - addPass(createLICMPass()); +// addPass(createLICMPass()); // Cmpxchg instructions are often used with a subsequent comparison to // determine whether it succeeded. We can exploit existing control-flow in