1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Scalar.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/Pass.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Target/TargetLibraryInfo.h"
37 #include "llvm/Transforms/Utils/Local.h"
40 #define DEBUG_TYPE "dse"
42 STATISTIC(NumFastStores, "Number of stores deleted");
43 STATISTIC(NumFastOther , "Number of other instrs removed");
46 struct DSE : public FunctionPass {
48 MemoryDependenceAnalysis *MD;
50 const TargetLibraryInfo *TLI;
52 static char ID; // Pass identification, replacement for typeid
53 DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
54 initializeDSEPass(*PassRegistry::getPassRegistry());
57 bool runOnFunction(Function &F) override {
58 if (skipOptnoneFunction(F))
61 AA = &getAnalysis<AliasAnalysis>();
62 MD = &getAnalysis<MemoryDependenceAnalysis>();
63 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
64 TLI = AA->getTargetLibraryInfo();
67 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
68 // Only check non-dead blocks. Dead blocks may have strange pointer
69 // cycles that will confuse alias analysis.
70 if (DT->isReachableFromEntry(I))
71 Changed |= runOnBasicBlock(*I);
73 AA = nullptr; MD = nullptr; DT = nullptr;
77 bool runOnBasicBlock(BasicBlock &BB);
78 bool HandleFree(CallInst *F);
79 bool handleEndBlock(BasicBlock &BB);
80 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
81 SmallSetVector<Value*, 16> &DeadStackObjects);
83 void getAnalysisUsage(AnalysisUsage &AU) const override {
85 AU.addRequired<DominatorTreeWrapperPass>();
86 AU.addRequired<AliasAnalysis>();
87 AU.addRequired<MemoryDependenceAnalysis>();
88 AU.addPreserved<AliasAnalysis>();
89 AU.addPreserved<DominatorTreeWrapperPass>();
90 AU.addPreserved<MemoryDependenceAnalysis>();
96 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
97 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
98 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
99 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
100 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
102 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
104 //===----------------------------------------------------------------------===//
106 //===----------------------------------------------------------------------===//
108 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
109 /// and zero out all the operands of this instruction. If any of them become
110 /// dead, delete them and the computation tree that feeds them.
112 /// If ValueSet is non-null, remove any deleted instructions from it as well.
114 static void DeleteDeadInstruction(Instruction *I,
115 MemoryDependenceAnalysis &MD,
116 const TargetLibraryInfo *TLI,
117 SmallSetVector<Value*, 16> *ValueSet = nullptr) {
118 SmallVector<Instruction*, 32> NowDeadInsts;
120 NowDeadInsts.push_back(I);
123 // Before we touch this instruction, remove it from memdep!
125 Instruction *DeadInst = NowDeadInsts.pop_back_val();
128 // This instruction is dead, zap it, in stages. Start by removing it from
129 // MemDep, which needs to know the operands and needs it to be in the
131 MD.removeInstruction(DeadInst);
133 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
134 Value *Op = DeadInst->getOperand(op);
135 DeadInst->setOperand(op, nullptr);
137 // If this operand just became dead, add it to the NowDeadInsts list.
138 if (!Op->use_empty()) continue;
140 if (Instruction *OpI = dyn_cast<Instruction>(Op))
141 if (isInstructionTriviallyDead(OpI, TLI))
142 NowDeadInsts.push_back(OpI);
145 DeadInst->eraseFromParent();
147 if (ValueSet) ValueSet->remove(DeadInst);
148 } while (!NowDeadInsts.empty());
152 /// hasMemoryWrite - Does this instruction write some memory? This only returns
153 /// true for things that we can analyze with other helpers below.
154 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
155 if (isa<StoreInst>(I))
157 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
158 switch (II->getIntrinsicID()) {
161 case Intrinsic::memset:
162 case Intrinsic::memmove:
163 case Intrinsic::memcpy:
164 case Intrinsic::init_trampoline:
165 case Intrinsic::lifetime_end:
169 if (CallSite CS = I) {
170 if (Function *F = CS.getCalledFunction()) {
171 if (TLI && TLI->has(LibFunc::strcpy) &&
172 F->getName() == TLI->getName(LibFunc::strcpy)) {
175 if (TLI && TLI->has(LibFunc::strncpy) &&
176 F->getName() == TLI->getName(LibFunc::strncpy)) {
179 if (TLI && TLI->has(LibFunc::strcat) &&
180 F->getName() == TLI->getName(LibFunc::strcat)) {
183 if (TLI && TLI->has(LibFunc::strncat) &&
184 F->getName() == TLI->getName(LibFunc::strncat)) {
192 /// getLocForWrite - Return a Location stored to by the specified instruction.
193 /// If isRemovable returns true, this function and getLocForRead completely
194 /// describe the memory operations for this instruction.
195 static AliasAnalysis::Location
196 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
197 const DataLayout *DL = AA.getDataLayout();
198 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
199 return AA.getLocation(SI);
201 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
202 // memcpy/memmove/memset.
203 AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
204 // If we don't have target data around, an unknown size in Location means
205 // that we should use the size of the pointee type. This isn't valid for
206 // memset/memcpy, which writes more than an i8.
207 if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
208 return AliasAnalysis::Location();
212 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
213 if (!II) return AliasAnalysis::Location();
215 switch (II->getIntrinsicID()) {
216 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
217 case Intrinsic::init_trampoline:
218 // If we don't have target data around, an unknown size in Location means
219 // that we should use the size of the pointee type. This isn't valid for
220 // init.trampoline, which writes more than an i8.
221 if (!DL) return AliasAnalysis::Location();
223 // FIXME: We don't know the size of the trampoline, so we can't really
225 return AliasAnalysis::Location(II->getArgOperand(0));
226 case Intrinsic::lifetime_end: {
227 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
228 return AliasAnalysis::Location(II->getArgOperand(1), Len);
233 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
234 /// instruction if any.
235 static AliasAnalysis::Location
236 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
237 assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
238 "Unknown instruction case");
240 // The only instructions that both read and write are the mem transfer
241 // instructions (memcpy/memmove).
242 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
243 return AA.getLocationForSource(MTI);
244 return AliasAnalysis::Location();
248 /// isRemovable - If the value of this instruction and the memory it writes to
249 /// is unused, may we delete this instruction?
250 static bool isRemovable(Instruction *I) {
251 // Don't remove volatile/atomic stores.
252 if (StoreInst *SI = dyn_cast<StoreInst>(I))
253 return SI->isUnordered();
255 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
256 switch (II->getIntrinsicID()) {
257 default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
258 case Intrinsic::lifetime_end:
259 // Never remove dead lifetime_end's, e.g. because it is followed by a
262 case Intrinsic::init_trampoline:
263 // Always safe to remove init_trampoline.
266 case Intrinsic::memset:
267 case Intrinsic::memmove:
268 case Intrinsic::memcpy:
269 // Don't remove volatile memory intrinsics.
270 return !cast<MemIntrinsic>(II)->isVolatile();
275 return CS.getInstruction()->use_empty();
281 /// isShortenable - Returns true if this instruction can be safely shortened in
283 static bool isShortenable(Instruction *I) {
284 // Don't shorten stores for now
285 if (isa<StoreInst>(I))
288 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
289 switch (II->getIntrinsicID()) {
290 default: return false;
291 case Intrinsic::memset:
292 case Intrinsic::memcpy:
293 // Do shorten memory intrinsics.
298 // Don't shorten libcalls calls for now.
303 /// getStoredPointerOperand - Return the pointer that is being written to.
304 static Value *getStoredPointerOperand(Instruction *I) {
305 if (StoreInst *SI = dyn_cast<StoreInst>(I))
306 return SI->getPointerOperand();
307 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
308 return MI->getDest();
310 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
311 switch (II->getIntrinsicID()) {
312 default: llvm_unreachable("Unexpected intrinsic!");
313 case Intrinsic::init_trampoline:
314 return II->getArgOperand(0);
319 // All the supported functions so far happen to have dest as their first
321 return CS.getArgument(0);
324 static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
326 if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo()))
328 return AliasAnalysis::UnknownSize;
340 /// isOverwrite - Return 'OverwriteComplete' if a store to the 'Later' location
341 /// completely overwrites a store to the 'Earlier' location.
342 /// 'OverwriteEnd' if the end of the 'Earlier' location is completely
343 /// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
344 static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
345 const AliasAnalysis::Location &Earlier,
349 const DataLayout *DL = AA.getDataLayout();
350 const Value *P1 = Earlier.Ptr->stripPointerCasts();
351 const Value *P2 = Later.Ptr->stripPointerCasts();
353 // If the start pointers are the same, we just have to compare sizes to see if
354 // the later store was larger than the earlier store.
356 // If we don't know the sizes of either access, then we can't do a
358 if (Later.Size == AliasAnalysis::UnknownSize ||
359 Earlier.Size == AliasAnalysis::UnknownSize) {
360 // If we have no DataLayout information around, then the size of the store
361 // is inferrable from the pointee type. If they are the same type, then
362 // we know that the store is safe.
363 if (DL == nullptr && Later.Ptr->getType() == Earlier.Ptr->getType())
364 return OverwriteComplete;
366 return OverwriteUnknown;
369 // Make sure that the Later size is >= the Earlier size.
370 if (Later.Size >= Earlier.Size)
371 return OverwriteComplete;
374 // Otherwise, we have to have size information, and the later store has to be
375 // larger than the earlier one.
376 if (Later.Size == AliasAnalysis::UnknownSize ||
377 Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr)
378 return OverwriteUnknown;
380 // Check to see if the later store is to the entire object (either a global,
381 // an alloca, or a byval/inalloca argument). If so, then it clearly
382 // overwrites any other store to the same object.
383 const Value *UO1 = GetUnderlyingObject(P1, DL),
384 *UO2 = GetUnderlyingObject(P2, DL);
386 // If we can't resolve the same pointers to the same object, then we can't
387 // analyze them at all.
389 return OverwriteUnknown;
391 // If the "Later" store is to a recognizable object, get its size.
392 uint64_t ObjectSize = getPointerSize(UO2, AA);
393 if (ObjectSize != AliasAnalysis::UnknownSize)
394 if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
395 return OverwriteComplete;
397 // Okay, we have stores to two completely different pointers. Try to
398 // decompose the pointer into a "base + constant_offset" form. If the base
399 // pointers are equal, then we can reason about the two stores.
402 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
403 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
405 // If the base pointers still differ, we have two completely different stores.
407 return OverwriteUnknown;
409 // The later store completely overlaps the earlier store if:
411 // 1. Both start at the same offset and the later one's size is greater than
412 // or equal to the earlier one's, or
417 // 2. The earlier store has an offset greater than the later offset, but which
418 // still lies completely within the later store.
421 // |----- later ------|
423 // We have to be careful here as *Off is signed while *.Size is unsigned.
424 if (EarlierOff >= LaterOff &&
425 Later.Size >= Earlier.Size &&
426 uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
427 return OverwriteComplete;
429 // The other interesting case is if the later store overwrites the end of
435 // In this case we may want to trim the size of earlier to avoid generating
436 // writes to addresses which will definitely be overwritten later
437 if (LaterOff > EarlierOff &&
438 LaterOff < int64_t(EarlierOff + Earlier.Size) &&
439 int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))
442 // Otherwise, they don't completely overlap.
443 return OverwriteUnknown;
446 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
447 /// memory region into an identical pointer) then it doesn't actually make its
448 /// input dead in the traditional sense. Consider this case:
453 /// In this case, the second store to A does not make the first store to A dead.
454 /// The usual situation isn't an explicit A<-A store like this (which can be
455 /// trivially removed) but a case where two pointers may alias.
457 /// This function detects when it is unsafe to remove a dependent instruction
458 /// because the DSE inducing instruction may be a self-read.
459 static bool isPossibleSelfRead(Instruction *Inst,
460 const AliasAnalysis::Location &InstStoreLoc,
461 Instruction *DepWrite, AliasAnalysis &AA) {
462 // Self reads can only happen for instructions that read memory. Get the
464 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
465 if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
467 // If the read and written loc obviously don't alias, it isn't a read.
468 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
470 // Okay, 'Inst' may copy over itself. However, we can still remove a the
471 // DepWrite instruction if we can prove that it reads from the same location
472 // as Inst. This handles useful cases like:
475 // Here we don't know if A/B may alias, but we do know that B/B are must
476 // aliases, so removing the first memcpy is safe (assuming it writes <= #
477 // bytes as the second one.
478 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
480 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
483 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
484 // then it can't be considered dead.
489 //===----------------------------------------------------------------------===//
491 //===----------------------------------------------------------------------===//
493 bool DSE::runOnBasicBlock(BasicBlock &BB) {
494 bool MadeChange = false;
496 // Do a top-down walk on the BB.
497 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
498 Instruction *Inst = BBI++;
500 // Handle 'free' calls specially.
501 if (CallInst *F = isFreeCall(Inst, TLI)) {
502 MadeChange |= HandleFree(F);
506 // If we find something that writes memory, get its memory dependence.
507 if (!hasMemoryWrite(Inst, TLI))
510 MemDepResult InstDep = MD->getDependency(Inst);
512 // Ignore any store where we can't find a local dependence.
513 // FIXME: cross-block DSE would be fun. :)
514 if (!InstDep.isDef() && !InstDep.isClobber())
517 // If we're storing the same value back to a pointer that we just
518 // loaded from, then the store can be removed.
519 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
520 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
521 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
522 SI->getOperand(0) == DepLoad && isRemovable(SI)) {
523 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
524 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
526 // DeleteDeadInstruction can delete the current instruction. Save BBI
527 // in case we need it.
528 WeakVH NextInst(BBI);
530 DeleteDeadInstruction(SI, *MD, TLI);
532 if (!NextInst) // Next instruction deleted.
534 else if (BBI != BB.begin()) // Revisit this instruction if possible.
543 // Figure out what location is being stored to.
544 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
546 // If we didn't get a useful location, fail.
550 while (InstDep.isDef() || InstDep.isClobber()) {
551 // Get the memory clobbered by the instruction we depend on. MemDep will
552 // skip any instructions that 'Loc' clearly doesn't interact with. If we
553 // end up depending on a may- or must-aliased load, then we can't optimize
554 // away the store and we bail out. However, if we depend on on something
555 // that overwrites the memory location we *can* potentially optimize it.
557 // Find out what memory location the dependent instruction stores.
558 Instruction *DepWrite = InstDep.getInst();
559 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
560 // If we didn't get a useful location, or if it isn't a size, bail out.
564 // If we find a write that is a) removable (i.e., non-volatile), b) is
565 // completely obliterated by the store to 'Loc', and c) which we know that
566 // 'Inst' doesn't load from, then we can remove it.
567 if (isRemovable(DepWrite) &&
568 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
569 int64_t InstWriteOffset, DepWriteOffset;
570 OverwriteResult OR = isOverwrite(Loc, DepLoc, *AA,
571 DepWriteOffset, InstWriteOffset);
572 if (OR == OverwriteComplete) {
573 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
574 << *DepWrite << "\n KILLER: " << *Inst << '\n');
576 // Delete the store and now-dead instructions that feed it.
577 DeleteDeadInstruction(DepWrite, *MD, TLI);
581 // DeleteDeadInstruction can delete the current instruction in loop
584 if (BBI != BB.begin())
587 } else if (OR == OverwriteEnd && isShortenable(DepWrite)) {
588 // TODO: base this on the target vector size so that if the earlier
589 // store was too small to get vector writes anyway then its likely
590 // a good idea to shorten it
591 // Power of 2 vector writes are probably always a bad idea to optimize
592 // as any store/memset/memcpy is likely using vector instructions so
593 // shortening it to not vector size is likely to be slower
594 MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
595 unsigned DepWriteAlign = DepIntrinsic->getAlignment();
596 if (llvm::isPowerOf2_64(InstWriteOffset) ||
597 ((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
599 DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW END: "
600 << *DepWrite << "\n KILLER (offset "
601 << InstWriteOffset << ", "
602 << DepLoc.Size << ")"
605 Value* DepWriteLength = DepIntrinsic->getLength();
606 Value* TrimmedLength = ConstantInt::get(DepWriteLength->getType(),
609 DepIntrinsic->setLength(TrimmedLength);
615 // If this is a may-aliased store that is clobbering the store value, we
616 // can keep searching past it for another must-aliased pointer that stores
617 // to the same location. For example, in:
621 // we can remove the first store to P even though we don't know if P and Q
623 if (DepWrite == &BB.front()) break;
625 // Can't look past this instruction if it might read 'Loc'.
626 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
629 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
633 // If this block ends in a return, unwind, or unreachable, all allocas are
634 // dead at its end, which means stores to them are also dead.
635 if (BB.getTerminator()->getNumSuccessors() == 0)
636 MadeChange |= handleEndBlock(BB);
641 /// Find all blocks that will unconditionally lead to the block BB and append
643 static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
644 BasicBlock *BB, DominatorTree *DT) {
645 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
646 BasicBlock *Pred = *I;
647 if (Pred == BB) continue;
648 TerminatorInst *PredTI = Pred->getTerminator();
649 if (PredTI->getNumSuccessors() != 1)
652 if (DT->isReachableFromEntry(Pred))
653 Blocks.push_back(Pred);
657 /// HandleFree - Handle frees of entire structures whose dependency is a store
658 /// to a field of that structure.
659 bool DSE::HandleFree(CallInst *F) {
660 bool MadeChange = false;
662 AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
663 SmallVector<BasicBlock *, 16> Blocks;
664 Blocks.push_back(F->getParent());
666 while (!Blocks.empty()) {
667 BasicBlock *BB = Blocks.pop_back_val();
668 Instruction *InstPt = BB->getTerminator();
669 if (BB == F->getParent()) InstPt = F;
671 MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
672 while (Dep.isDef() || Dep.isClobber()) {
673 Instruction *Dependency = Dep.getInst();
674 if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
678 GetUnderlyingObject(getStoredPointerOperand(Dependency));
680 // Check for aliasing.
681 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
684 Instruction *Next = std::next(BasicBlock::iterator(Dependency));
686 // DCE instructions only used to calculate that store
687 DeleteDeadInstruction(Dependency, *MD, TLI);
691 // Inst's old Dependency is now deleted. Compute the next dependency,
692 // which may also be dead, as in
694 // s[1] = 0; // This has just been deleted.
696 Dep = MD->getPointerDependencyFrom(Loc, false, Next, BB);
699 if (Dep.isNonLocal())
700 FindUnconditionalPreds(Blocks, BB, DT);
706 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
707 /// function end block. Ex:
710 /// store i32 1, i32* %A
712 bool DSE::handleEndBlock(BasicBlock &BB) {
713 bool MadeChange = false;
715 // Keep track of all of the stack objects that are dead at the end of the
717 SmallSetVector<Value*, 16> DeadStackObjects;
719 // Find all of the alloca'd pointers in the entry block.
720 BasicBlock *Entry = BB.getParent()->begin();
721 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I) {
722 if (isa<AllocaInst>(I))
723 DeadStackObjects.insert(I);
725 // Okay, so these are dead heap objects, but if the pointer never escapes
726 // then it's leaked by this function anyways.
727 else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
728 DeadStackObjects.insert(I);
731 // Treat byval or inalloca arguments the same, stores to them are dead at the
732 // end of the function.
733 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
734 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
735 if (AI->hasByValOrInAllocaAttr())
736 DeadStackObjects.insert(AI);
738 // Scan the basic block backwards
739 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
742 // If we find a store, check to see if it points into a dead stack value.
743 if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
744 // See through pointer-to-pointer bitcasts
745 SmallVector<Value *, 4> Pointers;
746 GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
748 // Stores to stack values are valid candidates for removal.
750 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
751 E = Pointers.end(); I != E; ++I)
752 if (!DeadStackObjects.count(*I)) {
758 Instruction *Dead = BBI++;
760 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
761 << *Dead << "\n Objects: ";
762 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
763 E = Pointers.end(); I != E; ++I) {
765 if (std::next(I) != E)
770 // DCE instructions only used to calculate that store.
771 DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
778 // Remove any dead non-memory-mutating instructions.
779 if (isInstructionTriviallyDead(BBI, TLI)) {
780 Instruction *Inst = BBI++;
781 DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
787 if (isa<AllocaInst>(BBI)) {
788 // Remove allocas from the list of dead stack objects; there can't be
789 // any references before the definition.
790 DeadStackObjects.remove(BBI);
794 if (CallSite CS = cast<Value>(BBI)) {
795 // Remove allocation function calls from the list of dead stack objects;
796 // there can't be any references before the definition.
797 if (isAllocLikeFn(BBI, TLI))
798 DeadStackObjects.remove(BBI);
800 // If this call does not access memory, it can't be loading any of our
802 if (AA->doesNotAccessMemory(CS))
805 // If the call might load from any of our allocas, then any store above
807 DeadStackObjects.remove_if([&](Value *I) {
808 // See if the call site touches the value.
809 AliasAnalysis::ModRefResult A =
810 AA->getModRefInfo(CS, I, getPointerSize(I, *AA));
812 return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref;
815 // If all of the allocas were clobbered by the call then we're not going
816 // to find anything else to process.
817 if (DeadStackObjects.empty())
823 AliasAnalysis::Location LoadedLoc;
825 // If we encounter a use of the pointer, it is no longer considered dead
826 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
827 if (!L->isUnordered()) // Be conservative with atomic/volatile load
829 LoadedLoc = AA->getLocation(L);
830 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
831 LoadedLoc = AA->getLocation(V);
832 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
833 LoadedLoc = AA->getLocationForSource(MTI);
834 } else if (!BBI->mayReadFromMemory()) {
835 // Instruction doesn't read memory. Note that stores that weren't removed
836 // above will hit this case.
839 // Unknown inst; assume it clobbers everything.
843 // Remove any allocas from the DeadPointer set that are loaded, as this
844 // makes any stores above the access live.
845 RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
847 // If all of the allocas were clobbered by the access then we're not going
848 // to find anything else to process.
849 if (DeadStackObjects.empty())
856 /// RemoveAccessedObjects - Check to see if the specified location may alias any
857 /// of the stack objects in the DeadStackObjects set. If so, they become live
858 /// because the location is being loaded.
859 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
860 SmallSetVector<Value*, 16> &DeadStackObjects) {
861 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
863 // A constant can't be in the dead pointer set.
864 if (isa<Constant>(UnderlyingPointer))
867 // If the kill pointer can be easily reduced to an alloca, don't bother doing
868 // extraneous AA queries.
869 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
870 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
874 // Remove objects that could alias LoadedLoc.
875 DeadStackObjects.remove_if([&](Value *I) {
876 // See if the loaded location could alias the stack location.
877 AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA));
878 return !AA->isNoAlias(StackLoc, LoadedLoc);