1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "dse"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/Dominators.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
38 STATISTIC(NumFastStores, "Number of stores deleted");
39 STATISTIC(NumFastOther , "Number of other instrs removed");
42 struct DSE : public FunctionPass {
44 MemoryDependenceAnalysis *MD;
46 static char ID; // Pass identification, replacement for typeid
47 DSE() : FunctionPass(ID), AA(0), MD(0) {
48 initializeDSEPass(*PassRegistry::getPassRegistry());
51 virtual bool runOnFunction(Function &F) {
52 AA = &getAnalysis<AliasAnalysis>();
53 MD = &getAnalysis<MemoryDependenceAnalysis>();
54 DominatorTree &DT = getAnalysis<DominatorTree>();
57 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
58 // Only check non-dead blocks. Dead blocks may have strange pointer
59 // cycles that will confuse alias analysis.
60 if (DT.isReachableFromEntry(I))
61 Changed |= runOnBasicBlock(*I);
67 bool runOnBasicBlock(BasicBlock &BB);
68 bool HandleFree(CallInst *F);
69 bool handleEndBlock(BasicBlock &BB);
70 void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
71 SmallPtrSet<Value*, 16> &DeadStackObjects);
74 // getAnalysisUsage - We require post dominance frontiers (aka Control
76 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
78 AU.addRequired<DominatorTree>();
79 AU.addRequired<AliasAnalysis>();
80 AU.addRequired<MemoryDependenceAnalysis>();
81 AU.addPreserved<AliasAnalysis>();
82 AU.addPreserved<DominatorTree>();
83 AU.addPreserved<MemoryDependenceAnalysis>();
89 INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
90 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
91 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
92 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
93 INITIALIZE_PASS_END(DSE, "dse", "Dead Store Elimination", false, false)
95 FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
97 //===----------------------------------------------------------------------===//
99 //===----------------------------------------------------------------------===//
101 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
102 /// and zero out all the operands of this instruction. If any of them become
103 /// dead, delete them and the computation tree that feeds them.
105 /// If ValueSet is non-null, remove any deleted instructions from it as well.
107 static void DeleteDeadInstruction(Instruction *I,
108 MemoryDependenceAnalysis &MD,
109 SmallPtrSet<Value*, 16> *ValueSet = 0) {
110 SmallVector<Instruction*, 32> NowDeadInsts;
112 NowDeadInsts.push_back(I);
115 // Before we touch this instruction, remove it from memdep!
117 Instruction *DeadInst = NowDeadInsts.pop_back_val();
120 // This instruction is dead, zap it, in stages. Start by removing it from
121 // MemDep, which needs to know the operands and needs it to be in the
123 MD.removeInstruction(DeadInst);
125 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
126 Value *Op = DeadInst->getOperand(op);
127 DeadInst->setOperand(op, 0);
129 // If this operand just became dead, add it to the NowDeadInsts list.
130 if (!Op->use_empty()) continue;
132 if (Instruction *OpI = dyn_cast<Instruction>(Op))
133 if (isInstructionTriviallyDead(OpI))
134 NowDeadInsts.push_back(OpI);
137 DeadInst->eraseFromParent();
139 if (ValueSet) ValueSet->erase(DeadInst);
140 } while (!NowDeadInsts.empty());
144 /// hasMemoryWrite - Does this instruction write some memory? This only returns
145 /// true for things that we can analyze with other helpers below.
146 static bool hasMemoryWrite(Instruction *I) {
147 if (isa<StoreInst>(I))
149 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
150 switch (II->getIntrinsicID()) {
153 case Intrinsic::memset:
154 case Intrinsic::memmove:
155 case Intrinsic::memcpy:
156 case Intrinsic::init_trampoline:
157 case Intrinsic::lifetime_end:
164 /// getLocForWrite - Return a Location stored to by the specified instruction.
165 static AliasAnalysis::Location
166 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
167 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
168 return AA.getLocation(SI);
170 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
171 // memcpy/memmove/memset.
172 AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
173 // If we don't have target data around, an unknown size in Location means
174 // that we should use the size of the pointee type. This isn't valid for
175 // memset/memcpy, which writes more than an i8.
176 if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0)
177 return AliasAnalysis::Location();
181 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
182 if (II == 0) return AliasAnalysis::Location();
184 switch (II->getIntrinsicID()) {
185 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
186 case Intrinsic::init_trampoline:
187 // If we don't have target data around, an unknown size in Location means
188 // that we should use the size of the pointee type. This isn't valid for
189 // init.trampoline, which writes more than an i8.
190 if (AA.getTargetData() == 0) return AliasAnalysis::Location();
192 // FIXME: We don't know the size of the trampoline, so we can't really
194 return AliasAnalysis::Location(II->getArgOperand(0));
195 case Intrinsic::lifetime_end: {
196 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
197 return AliasAnalysis::Location(II->getArgOperand(1), Len);
202 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
203 /// instruction if any.
204 static AliasAnalysis::Location
205 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
206 assert(hasMemoryWrite(Inst) && "Unknown instruction case");
208 // The only instructions that both read and write are the mem transfer
209 // instructions (memcpy/memmove).
210 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
211 return AA.getLocationForSource(MTI);
212 return AliasAnalysis::Location();
216 /// isRemovable - If the value of this instruction and the memory it writes to
217 /// is unused, may we delete this instruction?
218 static bool isRemovable(Instruction *I) {
219 // Don't remove volatile stores.
220 if (StoreInst *SI = dyn_cast<StoreInst>(I))
221 return !SI->isVolatile();
223 IntrinsicInst *II = cast<IntrinsicInst>(I);
224 switch (II->getIntrinsicID()) {
225 default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
226 case Intrinsic::lifetime_end:
227 // Never remove dead lifetime_end's, e.g. because it is followed by a
230 case Intrinsic::init_trampoline:
231 // Always safe to remove init_trampoline.
234 case Intrinsic::memset:
235 case Intrinsic::memmove:
236 case Intrinsic::memcpy:
237 // Don't remove volatile memory intrinsics.
238 return !cast<MemIntrinsic>(II)->isVolatile();
242 /// getStoredPointerOperand - Return the pointer that is being written to.
243 static Value *getStoredPointerOperand(Instruction *I) {
244 if (StoreInst *SI = dyn_cast<StoreInst>(I))
245 return SI->getPointerOperand();
246 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
247 return MI->getDest();
249 IntrinsicInst *II = cast<IntrinsicInst>(I);
250 switch (II->getIntrinsicID()) {
251 default: assert(false && "Unexpected intrinsic!");
252 case Intrinsic::init_trampoline:
253 return II->getArgOperand(0);
257 static uint64_t getPointerSize(Value *V, AliasAnalysis &AA) {
258 const TargetData *TD = AA.getTargetData();
260 return AliasAnalysis::UnknownSize;
262 if (AllocaInst *A = dyn_cast<AllocaInst>(V)) {
263 // Get size information for the alloca
264 if (ConstantInt *C = dyn_cast<ConstantInt>(A->getArraySize()))
265 return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
266 return AliasAnalysis::UnknownSize;
269 assert(isa<Argument>(V) && "Expected AllocaInst or Argument!");
270 const PointerType *PT = cast<PointerType>(V->getType());
271 return TD->getTypeAllocSize(PT->getElementType());
274 /// isObjectPointerWithTrustworthySize - Return true if the specified Value* is
275 /// pointing to an object with a pointer size we can trust.
276 static bool isObjectPointerWithTrustworthySize(const Value *V) {
277 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
278 return !AI->isArrayAllocation();
279 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
280 return !GV->mayBeOverridden();
281 if (const Argument *A = dyn_cast<Argument>(V))
282 return A->hasByValAttr();
286 /// isCompleteOverwrite - Return true if a store to the 'Later' location
287 /// completely overwrites a store to the 'Earlier' location.
288 static bool isCompleteOverwrite(const AliasAnalysis::Location &Later,
289 const AliasAnalysis::Location &Earlier,
291 const Value *P1 = Earlier.Ptr->stripPointerCasts();
292 const Value *P2 = Later.Ptr->stripPointerCasts();
294 // If the start pointers are the same, we just have to compare sizes to see if
295 // the later store was larger than the earlier store.
297 // If we don't know the sizes of either access, then we can't do a
299 if (Later.Size == AliasAnalysis::UnknownSize ||
300 Earlier.Size == AliasAnalysis::UnknownSize) {
301 // If we have no TargetData information around, then the size of the store
302 // is inferrable from the pointee type. If they are the same type, then
303 // we know that the store is safe.
304 if (AA.getTargetData() == 0)
305 return Later.Ptr->getType() == Earlier.Ptr->getType();
309 // Make sure that the Later size is >= the Earlier size.
310 if (Later.Size < Earlier.Size)
315 // Otherwise, we have to have size information, and the later store has to be
316 // larger than the earlier one.
317 if (Later.Size == AliasAnalysis::UnknownSize ||
318 Earlier.Size == AliasAnalysis::UnknownSize ||
319 Later.Size <= Earlier.Size || AA.getTargetData() == 0)
322 // Check to see if the later store is to the entire object (either a global,
323 // an alloca, or a byval argument). If so, then it clearly overwrites any
324 // other store to the same object.
325 const TargetData &TD = *AA.getTargetData();
327 const Value *UO1 = GetUnderlyingObject(P1), *UO2 = GetUnderlyingObject(P2);
329 // If we can't resolve the same pointers to the same object, then we can't
330 // analyze them at all.
334 // If the "Later" store is to a recognizable object, get its size.
335 if (isObjectPointerWithTrustworthySize(UO2)) {
336 uint64_t ObjectSize =
337 TD.getTypeAllocSize(cast<PointerType>(UO2->getType())->getElementType());
338 if (ObjectSize == Later.Size)
342 // Okay, we have stores to two completely different pointers. Try to
343 // decompose the pointer into a "base + constant_offset" form. If the base
344 // pointers are equal, then we can reason about the two stores.
345 int64_t Off1 = 0, Off2 = 0;
346 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, Off1, TD);
347 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, Off2, TD);
349 // If the base pointers still differ, we have two completely different stores.
353 // Otherwise, we might have a situation like:
354 // store i16 -> P + 1 Byte
356 // In this case, we see if the later store completely overlaps all bytes
357 // stored by the previous store.
358 if (Off1 < Off2 || // Earlier starts before Later.
359 Off1+Earlier.Size > Off2+Later.Size) // Earlier goes beyond Later.
361 // Otherwise, we have complete overlap.
365 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
366 /// memory region into an identical pointer) then it doesn't actually make its
367 /// input dead in the traditional sense. Consider this case:
372 /// In this case, the second store to A does not make the first store to A dead.
373 /// The usual situation isn't an explicit A<-A store like this (which can be
374 /// trivially removed) but a case where two pointers may alias.
376 /// This function detects when it is unsafe to remove a dependent instruction
377 /// because the DSE inducing instruction may be a self-read.
378 static bool isPossibleSelfRead(Instruction *Inst,
379 const AliasAnalysis::Location &InstStoreLoc,
380 Instruction *DepWrite, AliasAnalysis &AA) {
381 // Self reads can only happen for instructions that read memory. Get the
383 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
384 if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
386 // If the read and written loc obviously don't alias, it isn't a read.
387 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
389 // Okay, 'Inst' may copy over itself. However, we can still remove a the
390 // DepWrite instruction if we can prove that it reads from the same location
391 // as Inst. This handles useful cases like:
394 // Here we don't know if A/B may alias, but we do know that B/B are must
395 // aliases, so removing the first memcpy is safe (assuming it writes <= #
396 // bytes as the second one.
397 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
399 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
402 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
403 // then it can't be considered dead.
408 //===----------------------------------------------------------------------===//
410 //===----------------------------------------------------------------------===//
412 bool DSE::runOnBasicBlock(BasicBlock &BB) {
413 bool MadeChange = false;
415 // Do a top-down walk on the BB.
416 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
417 Instruction *Inst = BBI++;
419 // Handle 'free' calls specially.
420 if (CallInst *F = isFreeCall(Inst)) {
421 MadeChange |= HandleFree(F);
425 // If we find something that writes memory, get its memory dependence.
426 if (!hasMemoryWrite(Inst))
429 MemDepResult InstDep = MD->getDependency(Inst);
431 // Ignore non-local store liveness.
432 // FIXME: cross-block DSE would be fun. :)
433 if (InstDep.isNonLocal() ||
434 // Ignore self dependence, which happens in the entry block of the
436 InstDep.getInst() == Inst)
439 // If we're storing the same value back to a pointer that we just
440 // loaded from, then the store can be removed.
441 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
442 if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
443 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
444 SI->getOperand(0) == DepLoad && !SI->isVolatile()) {
445 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
446 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
448 // DeleteDeadInstruction can delete the current instruction. Save BBI
449 // in case we need it.
450 WeakVH NextInst(BBI);
452 DeleteDeadInstruction(SI, *MD);
454 if (NextInst == 0) // Next instruction deleted.
456 else if (BBI != BB.begin()) // Revisit this instruction if possible.
465 // Figure out what location is being stored to.
466 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
468 // If we didn't get a useful location, fail.
472 while (!InstDep.isNonLocal()) {
473 // Get the memory clobbered by the instruction we depend on. MemDep will
474 // skip any instructions that 'Loc' clearly doesn't interact with. If we
475 // end up depending on a may- or must-aliased load, then we can't optimize
476 // away the store and we bail out. However, if we depend on on something
477 // that overwrites the memory location we *can* potentially optimize it.
479 // Find out what memory location the dependant instruction stores.
480 Instruction *DepWrite = InstDep.getInst();
481 AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
482 // If we didn't get a useful location, or if it isn't a size, bail out.
486 // If we find a write that is a) removable (i.e., non-volatile), b) is
487 // completely obliterated by the store to 'Loc', and c) which we know that
488 // 'Inst' doesn't load from, then we can remove it.
489 if (isRemovable(DepWrite) && isCompleteOverwrite(Loc, DepLoc, *AA) &&
490 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
491 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
492 << *DepWrite << "\n KILLER: " << *Inst << '\n');
494 // Delete the store and now-dead instructions that feed it.
495 DeleteDeadInstruction(DepWrite, *MD);
499 // DeleteDeadInstruction can delete the current instruction in loop
502 if (BBI != BB.begin())
507 // If this is a may-aliased store that is clobbering the store value, we
508 // can keep searching past it for another must-aliased pointer that stores
509 // to the same location. For example, in:
513 // we can remove the first store to P even though we don't know if P and Q
515 if (DepWrite == &BB.front()) break;
517 // Can't look past this instruction if it might read 'Loc'.
518 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
521 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
525 // If this block ends in a return, unwind, or unreachable, all allocas are
526 // dead at its end, which means stores to them are also dead.
527 if (BB.getTerminator()->getNumSuccessors() == 0)
528 MadeChange |= handleEndBlock(BB);
533 /// HandleFree - Handle frees of entire structures whose dependency is a store
534 /// to a field of that structure.
535 bool DSE::HandleFree(CallInst *F) {
536 MemDepResult Dep = MD->getDependency(F);
538 if (Dep.isNonLocal()) return false;
540 Instruction *Dependency = Dep.getInst();
541 if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
545 GetUnderlyingObject(getStoredPointerOperand(Dependency));
547 // Check for aliasing.
548 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
551 // DCE instructions only used to calculate that store
552 DeleteDeadInstruction(Dependency, *MD);
555 // Inst's old Dependency is now deleted. Compute the next dependency,
556 // which may also be dead, as in
558 // s[1] = 0; // This has just been deleted.
560 Dep = MD->getDependency(F);
561 } while (!Dep.isNonLocal());
566 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
567 /// function end block. Ex:
570 /// store i32 1, i32* %A
572 bool DSE::handleEndBlock(BasicBlock &BB) {
573 bool MadeChange = false;
575 // Keep track of all of the stack objects that are dead at the end of the
577 SmallPtrSet<Value*, 16> DeadStackObjects;
579 // Find all of the alloca'd pointers in the entry block.
580 BasicBlock *Entry = BB.getParent()->begin();
581 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
582 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
583 DeadStackObjects.insert(AI);
585 // Treat byval arguments the same, stores to them are dead at the end of the
587 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
588 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
589 if (AI->hasByValAttr())
590 DeadStackObjects.insert(AI);
592 // Scan the basic block backwards
593 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
596 // If we find a store, check to see if it points into a dead stack value.
597 if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
598 // See through pointer-to-pointer bitcasts
599 Value *Pointer = GetUnderlyingObject(getStoredPointerOperand(BBI));
601 // Stores to stack values are valid candidates for removal.
602 if (DeadStackObjects.count(Pointer)) {
603 Instruction *Dead = BBI++;
605 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
606 << *Dead << "\n Object: " << *Pointer << '\n');
608 // DCE instructions only used to calculate that store.
609 DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
616 // Remove any dead non-memory-mutating instructions.
617 if (isInstructionTriviallyDead(BBI)) {
618 Instruction *Inst = BBI++;
619 DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
625 if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
626 DeadStackObjects.erase(A);
630 if (CallSite CS = cast<Value>(BBI)) {
631 // If this call does not access memory, it can't be loading any of our
633 if (AA->doesNotAccessMemory(CS))
636 unsigned NumModRef = 0, NumOther = 0;
638 // If the call might load from any of our allocas, then any store above
640 SmallVector<Value*, 8> LiveAllocas;
641 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
642 E = DeadStackObjects.end(); I != E; ++I) {
643 // If we detect that our AA is imprecise, it's not worth it to scan the
644 // rest of the DeadPointers set. Just assume that the AA will return
645 // ModRef for everything, and go ahead and bail out.
646 if (NumModRef >= 16 && NumOther == 0)
649 // See if the call site touches it.
650 AliasAnalysis::ModRefResult A =
651 AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
653 if (A == AliasAnalysis::ModRef)
658 if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
659 LiveAllocas.push_back(*I);
662 for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
663 E = LiveAllocas.end(); I != E; ++I)
664 DeadStackObjects.erase(*I);
666 // If all of the allocas were clobbered by the call then we're not going
667 // to find anything else to process.
668 if (DeadStackObjects.empty())
674 AliasAnalysis::Location LoadedLoc;
676 // If we encounter a use of the pointer, it is no longer considered dead
677 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
678 LoadedLoc = AA->getLocation(L);
679 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
680 LoadedLoc = AA->getLocation(V);
681 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
682 LoadedLoc = AA->getLocationForSource(MTI);
684 // Not a loading instruction.
688 // Remove any allocas from the DeadPointer set that are loaded, as this
689 // makes any stores above the access live.
690 RemoveAccessedObjects(LoadedLoc, DeadStackObjects);
692 // If all of the allocas were clobbered by the access then we're not going
693 // to find anything else to process.
694 if (DeadStackObjects.empty())
701 /// RemoveAccessedObjects - Check to see if the specified location may alias any
702 /// of the stack objects in the DeadStackObjects set. If so, they become live
703 /// because the location is being loaded.
704 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
705 SmallPtrSet<Value*, 16> &DeadStackObjects) {
706 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr);
708 // A constant can't be in the dead pointer set.
709 if (isa<Constant>(UnderlyingPointer))
712 // If the kill pointer can be easily reduced to an alloca, don't bother doing
713 // extraneous AA queries.
714 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
715 DeadStackObjects.erase(const_cast<Value*>(UnderlyingPointer));
719 SmallVector<Value*, 16> NowLive;
720 for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
721 E = DeadStackObjects.end(); I != E; ++I) {
722 // See if the loaded location could alias the stack location.
723 AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA));
724 if (!AA->isNoAlias(StackLoc, LoadedLoc))
725 NowLive.push_back(*I);
728 for (SmallVector<Value*, 16>::iterator I = NowLive.begin(), E = NowLive.end();
730 DeadStackObjects.erase(*I);