X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FDeadStoreElimination.cpp;h=57432c7d71d8605e94b25102e1f06f624163834f;hb=b4e0c9b85d7a6c5adc05059c85e4cc52dd2d92b2;hp=f6b2999c294d7d00830ca072c3eced17181953f4;hpb=3161ae18670e2b66aa4a7bf4805b32ca6aff1757;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index f6b2999c294..57432c7d71d 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -17,20 +17,24 @@ #define DEBUG_TYPE "dse" #include "llvm/Transforms/Scalar.h" -#include "llvm/Constants.h" -#include "llvm/Function.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Pass.h" -#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/Pass.h" +#include "llvm/Support/Debug.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; @@ -41,37 +45,37 @@ namespace { struct DSE : public FunctionPass { AliasAnalysis *AA; MemoryDependenceAnalysis *MD; + DominatorTree *DT; + const TargetLibraryInfo *TLI; static char ID; // Pass identification, replacement for typeid - DSE() : FunctionPass(ID), AA(0), MD(0) { + DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) { initializeDSEPass(*PassRegistry::getPassRegistry()); } virtual bool runOnFunction(Function &F) { AA = &getAnalysis(); MD = &getAnalysis(); - DominatorTree &DT = getAnalysis(); - + DT = &getAnalysis(); + TLI = AA->getTargetLibraryInfo(); + bool Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) // Only check non-dead blocks. Dead blocks may have strange pointer // cycles that will confuse alias analysis. - if (DT.isReachableFromEntry(I)) + if (DT->isReachableFromEntry(I)) Changed |= runOnBasicBlock(*I); - - AA = 0; MD = 0; + + AA = 0; MD = 0; DT = 0; return Changed; } - + bool runOnBasicBlock(BasicBlock &BB); bool HandleFree(CallInst *F); bool handleEndBlock(BasicBlock &BB); void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, - SmallPtrSet &DeadStackObjects); - + SmallSetVector &DeadStackObjects); - // getAnalysisUsage - We require post dominance frontiers (aka Control - // Dependence Graph) virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); @@ -105,44 +109,45 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } /// static void DeleteDeadInstruction(Instruction *I, MemoryDependenceAnalysis &MD, - SmallPtrSet *ValueSet = 0) { + const TargetLibraryInfo *TLI, + SmallSetVector *ValueSet = 0) { SmallVector NowDeadInsts; - + NowDeadInsts.push_back(I); --NumFastOther; - + // Before we touch this instruction, remove it from memdep! do { Instruction *DeadInst = NowDeadInsts.pop_back_val(); ++NumFastOther; - + // This instruction is dead, zap it, in stages. Start by removing it from // MemDep, which needs to know the operands and needs it to be in the // function. MD.removeInstruction(DeadInst); - + for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { Value *Op = DeadInst->getOperand(op); DeadInst->setOperand(op, 0); - + // If this operand just became dead, add it to the NowDeadInsts list. if (!Op->use_empty()) continue; - + if (Instruction *OpI = dyn_cast(Op)) - if (isInstructionTriviallyDead(OpI)) + if (isInstructionTriviallyDead(OpI, TLI)) NowDeadInsts.push_back(OpI); } - + DeadInst->eraseFromParent(); - - if (ValueSet) ValueSet->erase(DeadInst); + + if (ValueSet) ValueSet->remove(DeadInst); } while (!NowDeadInsts.empty()); } /// hasMemoryWrite - Does this instruction write some memory? This only returns /// true for things that we can analyze with other helpers below. -static bool hasMemoryWrite(Instruction *I) { +static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) { if (isa(I)) return true; if (IntrinsicInst *II = dyn_cast(I)) { @@ -157,37 +162,59 @@ static bool hasMemoryWrite(Instruction *I) { return true; } } + if (CallSite CS = I) { + if (Function *F = CS.getCalledFunction()) { + if (TLI && TLI->has(LibFunc::strcpy) && + F->getName() == TLI->getName(LibFunc::strcpy)) { + return true; + } + if (TLI && TLI->has(LibFunc::strncpy) && + F->getName() == TLI->getName(LibFunc::strncpy)) { + return true; + } + if (TLI && TLI->has(LibFunc::strcat) && + F->getName() == TLI->getName(LibFunc::strcat)) { + return true; + } + if (TLI && TLI->has(LibFunc::strncat) && + F->getName() == TLI->getName(LibFunc::strncat)) { + return true; + } + } + } return false; } /// getLocForWrite - Return a Location stored to by the specified instruction. +/// If isRemovable returns true, this function and getLocForRead completely +/// describe the memory operations for this instruction. static AliasAnalysis::Location getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { if (StoreInst *SI = dyn_cast(Inst)) return AA.getLocation(SI); - + if (MemIntrinsic *MI = dyn_cast(Inst)) { // memcpy/memmove/memset. AliasAnalysis::Location Loc = AA.getLocationForDest(MI); // If we don't have target data around, an unknown size in Location means // that we should use the size of the pointee type. This isn't valid for // memset/memcpy, which writes more than an i8. - if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0) + if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0) return AliasAnalysis::Location(); return Loc; } - + IntrinsicInst *II = dyn_cast(Inst); if (II == 0) return AliasAnalysis::Location(); - + switch (II->getIntrinsicID()) { default: return AliasAnalysis::Location(); // Unhandled intrinsic. case Intrinsic::init_trampoline: // If we don't have target data around, an unknown size in Location means // that we should use the size of the pointee type. This isn't valid for // init.trampoline, which writes more than an i8. - if (AA.getTargetData() == 0) return AliasAnalysis::Location(); - + if (AA.getDataLayout() == 0) return AliasAnalysis::Location(); + // FIXME: We don't know the size of the trampoline, so we can't really // handle it here. return AliasAnalysis::Location(II->getArgOperand(0)); @@ -198,30 +225,74 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { } } +/// getLocForRead - Return the location read by the specified "hasMemoryWrite" +/// instruction if any. +static AliasAnalysis::Location +getLocForRead(Instruction *Inst, AliasAnalysis &AA) { + assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) && + "Unknown instruction case"); + + // The only instructions that both read and write are the mem transfer + // instructions (memcpy/memmove). + if (MemTransferInst *MTI = dyn_cast(Inst)) + return AA.getLocationForSource(MTI); + return AliasAnalysis::Location(); +} + + /// isRemovable - If the value of this instruction and the memory it writes to /// is unused, may we delete this instruction? static bool isRemovable(Instruction *I) { - // Don't remove volatile stores. + // Don't remove volatile/atomic stores. if (StoreInst *SI = dyn_cast(I)) - return !SI->isVolatile(); - - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate"); - case Intrinsic::lifetime_end: - // Never remove dead lifetime_end's, e.g. because it is followed by a - // free. + return SI->isUnordered(); + + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); + case Intrinsic::lifetime_end: + // Never remove dead lifetime_end's, e.g. because it is followed by a + // free. + return false; + case Intrinsic::init_trampoline: + // Always safe to remove init_trampoline. + return true; + + case Intrinsic::memset: + case Intrinsic::memmove: + case Intrinsic::memcpy: + // Don't remove volatile memory intrinsics. + return !cast(II)->isVolatile(); + } + } + + if (CallSite CS = I) + return CS.getInstruction()->use_empty(); + + return false; +} + + +/// isShortenable - Returns true if this instruction can be safely shortened in +/// length. +static bool isShortenable(Instruction *I) { + // Don't shorten stores for now + if (isa(I)) return false; - case Intrinsic::init_trampoline: - // Always safe to remove init_trampoline. - return true; - - case Intrinsic::memset: - case Intrinsic::memmove: - case Intrinsic::memcpy: - // Don't remove volatile memory intrinsics. - return !cast(II)->isVolatile(); + + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: return false; + case Intrinsic::memset: + case Intrinsic::memcpy: + // Do shorten memory intrinsics. + return true; + } } + + // Don't shorten libcalls calls for now. + + return false; } /// getStoredPointerOperand - Return the pointer that is being written to. @@ -231,51 +302,48 @@ static Value *getStoredPointerOperand(Instruction *I) { if (MemIntrinsic *MI = dyn_cast(I)) return MI->getDest(); - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: assert(false && "Unexpected intrinsic!"); - case Intrinsic::init_trampoline: - return II->getArgOperand(0); + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("Unexpected intrinsic!"); + case Intrinsic::init_trampoline: + return II->getArgOperand(0); + } } + + CallSite CS = I; + // All the supported functions so far happen to have dest as their first + // argument. + return CS.getArgument(0); } -static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) { - const TargetData *TD = AA.getTargetData(); - if (TD == 0) - return AliasAnalysis::UnknownSize; - - if (AllocaInst *A = dyn_cast(V)) { - // Get size information for the alloca - if (ConstantInt *C = dyn_cast(A->getArraySize())) - return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType()); - return AliasAnalysis::UnknownSize; - } - - assert(isa(V) && "Expected AllocaInst or Argument!"); - const PointerType *PT = cast(V->getType()); - return TD->getTypeAllocSize(PT->getElementType()); +static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { + uint64_t Size; + if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo())) + return Size; + return AliasAnalysis::UnknownSize; } -/// isObjectPointerWithTrustworthySize - Return true if the specified Value* is -/// pointing to an object with a pointer size we can trust. -static bool isObjectPointerWithTrustworthySize(const Value *V) { - if (const AllocaInst *AI = dyn_cast(V)) - return !AI->isArrayAllocation(); - if (const GlobalVariable *GV = dyn_cast(V)) - return !GV->isWeakForLinker(); - if (const Argument *A = dyn_cast(V)) - return A->hasByValAttr(); - return false; +namespace { + enum OverwriteResult + { + OverwriteComplete, + OverwriteEnd, + OverwriteUnknown + }; } -/// isCompleteOverwrite - Return true if a store to the 'Later' location +/// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location /// completely overwrites a store to the 'Earlier' location. -static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, - const AliasAnalysis::Location &Earlier, - AliasAnalysis &AA) { +/// 'OverwriteEnd' if the end of the 'Earlier' location is completely +/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined +static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, + const AliasAnalysis::Location &Earlier, + AliasAnalysis &AA, + int64_t &EarlierOff, + int64_t &LaterOff) { const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); - + // If the start pointers are the same, we just have to compare sizes to see if // the later store was larger than the earlier store. if (P1 == P2) { @@ -283,67 +351,135 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, // comparison. if (Later.Size == AliasAnalysis::UnknownSize || Earlier.Size == AliasAnalysis::UnknownSize) { - // If we have no TargetData information around, then the size of the store + // If we have no DataLayout information around, then the size of the store // is inferrable from the pointee type. If they are the same type, then // we know that the store is safe. - if (AA.getTargetData() == 0) - return Later.Ptr->getType() == Earlier.Ptr->getType(); - return false; + if (AA.getDataLayout() == 0 && + Later.Ptr->getType() == Earlier.Ptr->getType()) + return OverwriteComplete; + + return OverwriteUnknown; } - + // Make sure that the Later size is >= the Earlier size. - if (Later.Size < Earlier.Size) - return false; - return true; + if (Later.Size >= Earlier.Size) + return OverwriteComplete; } - + // Otherwise, we have to have size information, and the later store has to be // larger than the earlier one. if (Later.Size == AliasAnalysis::UnknownSize || Earlier.Size == AliasAnalysis::UnknownSize || - Later.Size <= Earlier.Size || AA.getTargetData() == 0) - return false; - + AA.getDataLayout() == 0) + return OverwriteUnknown; + // Check to see if the later store is to the entire object (either a global, // an alloca, or a byval argument). If so, then it clearly overwrites any // other store to the same object. - const TargetData &TD = *AA.getTargetData(); - - const Value *UO1 = P1->getUnderlyingObject(), *UO2 = P2->getUnderlyingObject(); - + const DataLayout *TD = AA.getDataLayout(); + + const Value *UO1 = GetUnderlyingObject(P1, TD), + *UO2 = GetUnderlyingObject(P2, TD); + // If we can't resolve the same pointers to the same object, then we can't // analyze them at all. if (UO1 != UO2) - return false; - + return OverwriteUnknown; + // If the "Later" store is to a recognizable object, get its size. - if (isObjectPointerWithTrustworthySize(UO2)) { - uint64_t ObjectSize = - TD.getTypeAllocSize(cast(UO2->getType())->getElementType()); - if (ObjectSize == Later.Size) - return true; - } - + uint64_t ObjectSize = getPointerSize(UO2, AA); + if (ObjectSize != AliasAnalysis::UnknownSize) + if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) + return OverwriteComplete; + // Okay, we have stores to two completely different pointers. Try to // decompose the pointer into a "base + constant_offset" form. If the base // pointers are equal, then we can reason about the two stores. - int64_t Off1 = 0, Off2 = 0; - const Value *BP1 = GetPointerBaseWithConstantOffset(P1, Off1, TD); - const Value *BP2 = GetPointerBaseWithConstantOffset(P2, Off2, TD); - + EarlierOff = 0; + LaterOff = 0; + const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD); + const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD); + // If the base pointers still differ, we have two completely different stores. if (BP1 != BP2) + return OverwriteUnknown; + + // The later store completely overlaps the earlier store if: + // + // 1. Both start at the same offset and the later one's size is greater than + // or equal to the earlier one's, or + // + // |--earlier--| + // |-- later --| + // + // 2. The earlier store has an offset greater than the later offset, but which + // still lies completely within the later store. + // + // |--earlier--| + // |----- later ------| + // + // We have to be careful here as *Off is signed while *.Size is unsigned. + if (EarlierOff >= LaterOff && + Later.Size >= Earlier.Size && + uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) + return OverwriteComplete; + + // The other interesting case is if the later store overwrites the end of + // the earlier store + // + // |--earlier--| + // |-- later --| + // + // In this case we may want to trim the size of earlier to avoid generating + // writes to addresses which will definitely be overwritten later + if (LaterOff > EarlierOff && + LaterOff < int64_t(EarlierOff + Earlier.Size) && + int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)) + return OverwriteEnd; + + // Otherwise, they don't completely overlap. + return OverwriteUnknown; +} + +/// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a +/// memory region into an identical pointer) then it doesn't actually make its +/// input dead in the traditional sense. Consider this case: +/// +/// memcpy(A <- B) +/// memcpy(A <- A) +/// +/// In this case, the second store to A does not make the first store to A dead. +/// The usual situation isn't an explicit A<-A store like this (which can be +/// trivially removed) but a case where two pointers may alias. +/// +/// This function detects when it is unsafe to remove a dependent instruction +/// because the DSE inducing instruction may be a self-read. +static bool isPossibleSelfRead(Instruction *Inst, + const AliasAnalysis::Location &InstStoreLoc, + Instruction *DepWrite, AliasAnalysis &AA) { + // Self reads can only happen for instructions that read memory. Get the + // location read. + AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA); + if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction. + + // If the read and written loc obviously don't alias, it isn't a read. + if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false; + + // Okay, 'Inst' may copy over itself. However, we can still remove a the + // DepWrite instruction if we can prove that it reads from the same location + // as Inst. This handles useful cases like: + // memcpy(A <- B) + // memcpy(A <- B) + // Here we don't know if A/B may alias, but we do know that B/B are must + // aliases, so removing the first memcpy is safe (assuming it writes <= # + // bytes as the second one. + AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA); + + if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) return false; - - // Otherwise, we might have a situation like: - // store i16 -> P + 1 Byte - // store i32 -> P - // In this case, we see if the later store completely overlaps all bytes - // stored by the previous store. - if (Off1 < Off2 || // Earlier starts before Later. - Off1+Earlier.Size > Off2+Later.Size) // Earlier goes beyond Later. - return false; - // Otherwise, we have complete overlap. + + // If DepWrite doesn't read memory or if we can't prove it is a must alias, + // then it can't be considered dead. return true; } @@ -354,43 +490,43 @@ static bool isCompleteOverwrite(const AliasAnalysis::Location &Later, bool DSE::runOnBasicBlock(BasicBlock &BB) { bool MadeChange = false; - + // Do a top-down walk on the BB. for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) { Instruction *Inst = BBI++; - + // Handle 'free' calls specially. - if (CallInst *F = isFreeCall(Inst)) { + if (CallInst *F = isFreeCall(Inst, TLI)) { MadeChange |= HandleFree(F); continue; } - + // If we find something that writes memory, get its memory dependence. - if (!hasMemoryWrite(Inst)) + if (!hasMemoryWrite(Inst, TLI)) continue; MemDepResult InstDep = MD->getDependency(Inst); - - // Ignore non-local store liveness. + + // Ignore any store where we can't find a local dependence. // FIXME: cross-block DSE would be fun. :) - if (InstDep.isNonLocal() || - // Ignore self dependence, which happens in the entry block of the - // function. - InstDep.getInst() == Inst) + if (!InstDep.isDef() && !InstDep.isClobber()) continue; - + // If we're storing the same value back to a pointer that we just // loaded from, then the store can be removed. if (StoreInst *SI = dyn_cast(Inst)) { if (LoadInst *DepLoad = dyn_cast(InstDep.getInst())) { if (SI->getPointerOperand() == DepLoad->getPointerOperand() && - SI->getOperand(0) == DepLoad && !SI->isVolatile()) { + SI->getOperand(0) == DepLoad && isRemovable(SI)) { + DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n " + << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n'); + // DeleteDeadInstruction can delete the current instruction. Save BBI // in case we need it. WeakVH NextInst(BBI); - - DeleteDeadInstruction(SI, *MD); - + + DeleteDeadInstruction(SI, *MD, TLI); + if (NextInst == 0) // Next instruction deleted. BBI = BB.begin(); else if (BBI != BB.begin()) // Revisit this instruction if possible. @@ -401,44 +537,79 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { } } } - + // Figure out what location is being stored to. AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA); // If we didn't get a useful location, fail. if (Loc.Ptr == 0) continue; - - while (!InstDep.isNonLocal()) { + + while (InstDep.isDef() || InstDep.isClobber()) { // Get the memory clobbered by the instruction we depend on. MemDep will // skip any instructions that 'Loc' clearly doesn't interact with. If we // end up depending on a may- or must-aliased load, then we can't optimize // away the store and we bail out. However, if we depend on on something // that overwrites the memory location we *can* potentially optimize it. // - // Find out what memory location the dependant instruction stores. + // Find out what memory location the dependent instruction stores. Instruction *DepWrite = InstDep.getInst(); AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA); // If we didn't get a useful location, or if it isn't a size, bail out. if (DepLoc.Ptr == 0) break; - // If we find a removable write that is completely obliterated by the - // store to 'Loc' then we can remove it. - if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA)) { - // Delete the store and now-dead instructions that feed it. - DeleteDeadInstruction(DepWrite, *MD); - ++NumFastStores; - MadeChange = true; - - // DeleteDeadInstruction can delete the current instruction in loop - // cases, reset BBI. - BBI = Inst; - if (BBI != BB.begin()) - --BBI; - break; + // If we find a write that is a) removable (i.e., non-volatile), b) is + // completely obliterated by the store to 'Loc', and c) which we know that + // 'Inst' doesn't load from, then we can remove it. + if (isRemovable(DepWrite) && + !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) { + int64_t InstWriteOffset, DepWriteOffset; + OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA, + DepWriteOffset, InstWriteOffset); + if (OR == OverwriteComplete) { + DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " + << *DepWrite << "\n KILLER: " << *Inst << '\n'); + + // Delete the store and now-dead instructions that feed it. + DeleteDeadInstruction(DepWrite, *MD, TLI); + ++NumFastStores; + MadeChange = true; + + // DeleteDeadInstruction can delete the current instruction in loop + // cases, reset BBI. + BBI = Inst; + if (BBI != BB.begin()) + --BBI; + break; + } else if (OR == OverwriteEnd && isShortenable(DepWrite)) { + // TODO: base this on the target vector size so that if the earlier + // store was too small to get vector writes anyway then its likely + // a good idea to shorten it + // Power of 2 vector writes are probably always a bad idea to optimize + // as any store/memset/memcpy is likely using vector instructions so + // shortening it to not vector size is likely to be slower + MemIntrinsic* DepIntrinsic = cast(DepWrite); + unsigned DepWriteAlign = DepIntrinsic->getAlignment(); + if (llvm::isPowerOf2_64(InstWriteOffset) || + ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) { + + DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: " + << *DepWrite << "\n KILLER (offset " + << InstWriteOffset << ", " + << DepLoc.Size << ")" + << *Inst << '\n'); + + Value* DepWriteLength = DepIntrinsic->getLength(); + Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(), + InstWriteOffset - + DepWriteOffset); + DepIntrinsic->setLength(TrimmedLength); + MadeChange = true; + } + } } - + // If this is a may-aliased store that is clobbering the store value, we // can keep searching past it for another must-aliased pointer that stores // to the same location. For example, in: @@ -448,55 +619,102 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // we can remove the first store to P even though we don't know if P and Q // alias. if (DepWrite == &BB.front()) break; - + // Can't look past this instruction if it might read 'Loc'. if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref) break; - + InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB); } } - + // If this block ends in a return, unwind, or unreachable, all allocas are // dead at its end, which means stores to them are also dead. if (BB.getTerminator()->getNumSuccessors() == 0) MadeChange |= handleEndBlock(BB); - + return MadeChange; } +/// Find all blocks that will unconditionally lead to the block BB and append +/// them to F. +static void FindUnconditionalPreds(SmallVectorImpl &Blocks, + BasicBlock *BB, DominatorTree *DT) { + for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { + BasicBlock *Pred = *I; + if (Pred == BB) continue; + TerminatorInst *PredTI = Pred->getTerminator(); + if (PredTI->getNumSuccessors() != 1) + continue; + + if (DT->isReachableFromEntry(Pred)) + Blocks.push_back(Pred); + } +} + /// HandleFree - Handle frees of entire structures whose dependency is a store /// to a field of that structure. bool DSE::HandleFree(CallInst *F) { - MemDepResult Dep = MD->getDependency(F); - do { - if (Dep.isNonLocal()) return false; - - Instruction *Dependency = Dep.getInst(); - if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) - return false; - - Value *DepPointer = - getStoredPointerOperand(Dependency)->getUnderlyingObject(); + bool MadeChange = false; - // Check for aliasing. - if (AA->alias(F->getArgOperand(0), 1, DepPointer, 1) != - AliasAnalysis::MustAlias) - return false; - - // DCE instructions only used to calculate that store - DeleteDeadInstruction(Dependency, *MD); - ++NumFastStores; - - // Inst's old Dependency is now deleted. Compute the next dependency, - // which may also be dead, as in - // s[0] = 0; - // s[1] = 0; // This has just been deleted. - // free(s); - Dep = MD->getDependency(F); - } while (!Dep.isNonLocal()); - - return true; + AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0)); + SmallVector Blocks; + Blocks.push_back(F->getParent()); + + while (!Blocks.empty()) { + BasicBlock *BB = Blocks.pop_back_val(); + Instruction *InstPt = BB->getTerminator(); + if (BB == F->getParent()) InstPt = F; + + MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); + while (Dep.isDef() || Dep.isClobber()) { + Instruction *Dependency = Dep.getInst(); + if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency)) + break; + + Value *DepPointer = + GetUnderlyingObject(getStoredPointerOperand(Dependency)); + + // Check for aliasing. + if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) + break; + + Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); + + // DCE instructions only used to calculate that store + DeleteDeadInstruction(Dependency, *MD, TLI); + ++NumFastStores; + MadeChange = true; + + // Inst's old Dependency is now deleted. Compute the next dependency, + // which may also be dead, as in + // s[0] = 0; + // s[1] = 0; // This has just been deleted. + // free(s); + Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB); + } + + if (Dep.isNonLocal()) + FindUnconditionalPreds(Blocks, BB, DT); + } + + return MadeChange; +} + +namespace { + struct CouldRef { + typedef Value *argument_type; + const CallSite CS; + AliasAnalysis *AA; + + bool operator()(Value *I) { + // See if the call site touches the value. + AliasAnalysis::ModRefResult A = + AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); + + return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; + } + }; } /// handleEndBlock - Remove dead stores to stack-allocated locations in the @@ -507,114 +725,128 @@ bool DSE::HandleFree(CallInst *F) { /// ret void bool DSE::handleEndBlock(BasicBlock &BB) { bool MadeChange = false; - + // Keep track of all of the stack objects that are dead at the end of the // function. - SmallPtrSet DeadStackObjects; - + SmallSetVector DeadStackObjects; + // Find all of the alloca'd pointers in the entry block. BasicBlock *Entry = BB.getParent()->begin(); - for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) - if (AllocaInst *AI = dyn_cast(I)) - DeadStackObjects.insert(AI); - + for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) { + if (isa(I)) + DeadStackObjects.insert(I); + + // Okay, so these are dead heap objects, but if the pointer never escapes + // then it's leaked by this function anyways. + else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true)) + DeadStackObjects.insert(I); + } + // Treat byval arguments the same, stores to them are dead at the end of the // function. for (Function::arg_iterator AI = BB.getParent()->arg_begin(), AE = BB.getParent()->arg_end(); AI != AE; ++AI) if (AI->hasByValAttr()) DeadStackObjects.insert(AI); - + // Scan the basic block backwards for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ --BBI; - + // If we find a store, check to see if it points into a dead stack value. - if (hasMemoryWrite(BBI) && isRemovable(BBI)) { + if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts - Value *Pointer = getStoredPointerOperand(BBI)->getUnderlyingObject(); + SmallVector Pointers; + GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); // Stores to stack values are valid candidates for removal. - if (DeadStackObjects.count(Pointer)) { - // DCE instructions only used to calculate that store. + bool AllDead = true; + for (SmallVectorImpl::iterator I = Pointers.begin(), + E = Pointers.end(); I != E; ++I) + if (!DeadStackObjects.count(*I)) { + AllDead = false; + break; + } + + if (AllDead) { Instruction *Dead = BBI++; - DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); + + DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " + << *Dead << "\n Objects: "; + for (SmallVectorImpl::iterator I = Pointers.begin(), + E = Pointers.end(); I != E; ++I) { + dbgs() << **I; + if (llvm::next(I) != E) + dbgs() << ", "; + } + dbgs() << '\n'); + + // DCE instructions only used to calculate that store. + DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; } } - + // Remove any dead non-memory-mutating instructions. - if (isInstructionTriviallyDead(BBI)) { + if (isInstructionTriviallyDead(BBI, TLI)) { Instruction *Inst = BBI++; - DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); + DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; } - - if (AllocaInst *A = dyn_cast(BBI)) { - DeadStackObjects.erase(A); + + if (isa(BBI)) { + // Remove allocas from the list of dead stack objects; there can't be + // any references before the definition. + DeadStackObjects.remove(BBI); continue; } - + if (CallSite CS = cast(BBI)) { + // Remove allocation function calls from the list of dead stack objects; + // there can't be any references before the definition. + if (isAllocLikeFn(BBI, TLI)) + DeadStackObjects.remove(BBI); + // If this call does not access memory, it can't be loading any of our // pointers. if (AA->doesNotAccessMemory(CS)) continue; - - unsigned NumModRef = 0, NumOther = 0; - + // If the call might load from any of our allocas, then any store above // the call is live. - SmallVector LiveAllocas; - for (SmallPtrSet::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { - // If we detect that our AA is imprecise, it's not worth it to scan the - // rest of the DeadPointers set. Just assume that the AA will return - // ModRef for everything, and go ahead and bail out. - if (NumModRef >= 16 && NumOther == 0) - return MadeChange; - - // See if the call site touches it. - AliasAnalysis::ModRefResult A = - AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); - - if (A == AliasAnalysis::ModRef) - ++NumModRef; - else - ++NumOther; - - if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) - LiveAllocas.push_back(*I); - } - - for (SmallVector::iterator I = LiveAllocas.begin(), - E = LiveAllocas.end(); I != E; ++I) - DeadStackObjects.erase(*I); - + CouldRef Pred = { CS, AA }; + DeadStackObjects.remove_if(Pred); + // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. if (DeadStackObjects.empty()) - return MadeChange; - + break; + continue; } - + AliasAnalysis::Location LoadedLoc; - + // If we encounter a use of the pointer, it is no longer considered dead if (LoadInst *L = dyn_cast(BBI)) { + if (!L->isUnordered()) // Be conservative with atomic/volatile load + break; LoadedLoc = AA->getLocation(L); } else if (VAArgInst *V = dyn_cast(BBI)) { LoadedLoc = AA->getLocation(V); } else if (MemTransferInst *MTI = dyn_cast(BBI)) { LoadedLoc = AA->getLocationForSource(MTI); - } else { - // Not a loading instruction. + } else if (!BBI->mayReadFromMemory()) { + // Instruction doesn't read memory. Note that stores that weren't removed + // above will hit this case. continue; + } else { + // Unknown inst; assume it clobbers everything. + break; } // Remove any allocas from the DeadPointer set that are loaded, as this @@ -626,39 +858,43 @@ bool DSE::handleEndBlock(BasicBlock &BB) { if (DeadStackObjects.empty()) break; } - + return MadeChange; } +namespace { + struct CouldAlias { + typedef Value *argument_type; + const AliasAnalysis::Location &LoadedLoc; + AliasAnalysis *AA; + + bool operator()(Value *I) { + // See if the loaded location could alias the stack location. + AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA)); + return !AA->isNoAlias(StackLoc, LoadedLoc); + } + }; +} + /// RemoveAccessedObjects - Check to see if the specified location may alias any /// of the stack objects in the DeadStackObjects set. If so, they become live /// because the location is being loaded. void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, - SmallPtrSet &DeadStackObjects) { - const Value *UnderlyingPointer = LoadedLoc.Ptr->getUnderlyingObject(); + SmallSetVector &DeadStackObjects) { + const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr); // A constant can't be in the dead pointer set. if (isa(UnderlyingPointer)) return; - + // If the kill pointer can be easily reduced to an alloca, don't bother doing // extraneous AA queries. if (isa(UnderlyingPointer) || isa(UnderlyingPointer)) { - DeadStackObjects.erase(const_cast(UnderlyingPointer)); + DeadStackObjects.remove(const_cast(UnderlyingPointer)); return; } - - SmallVector NowLive; - for (SmallPtrSet::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { - // See if the loaded location could alias the stack location. - AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA)); - if (!AA->isNoAlias(StackLoc, LoadedLoc)) - NowLive.push_back(*I); - } - for (SmallVector::iterator I = NowLive.begin(), E = NowLive.end(); - I != E; ++I) - DeadStackObjects.erase(*I); + // Remove objects that could alias LoadedLoc. + CouldAlias Pred = { LoadedLoc, AA }; + DeadStackObjects.remove_if(Pred); } -