1 //===- GVN.cpp - Eliminate redundant values and loads ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "gvn"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/BasicBlock.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/IntrinsicInst.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/ParameterAttributes.h"
24 #include "llvm/Value.h"
25 #include "llvm/ADT/BitVector.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DepthFirstIterator.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Analysis/AliasAnalysis.h"
33 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
34 #include "llvm/Support/CFG.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Target/TargetData.h"
39 STATISTIC(NumGVNInstr, "Number of instructions deleted");
40 STATISTIC(NumGVNLoad, "Number of loads deleted");
41 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
44 //===----------------------------------------------------------------------===//
46 //===----------------------------------------------------------------------===//
48 /// This class holds the mapping between values and value numbers. It is used
49 /// as an efficient mechanism to determine the expression-wise equivalence of
52 struct VISIBILITY_HIDDEN Expression {
53 enum ExpressionOpcode { ADD, SUB, MUL, UDIV, SDIV, FDIV, UREM, SREM,
54 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
55 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
56 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
57 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
58 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
59 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
60 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
61 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
62 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, EMPTY,
65 ExpressionOpcode opcode;
70 SmallVector<uint32_t, 4> varargs;
74 Expression(ExpressionOpcode o) : opcode(o) { }
76 bool operator==(const Expression &other) const {
77 if (opcode != other.opcode)
79 else if (opcode == EMPTY || opcode == TOMBSTONE)
81 else if (type != other.type)
83 else if (function != other.function)
85 else if (firstVN != other.firstVN)
87 else if (secondVN != other.secondVN)
89 else if (thirdVN != other.thirdVN)
92 if (varargs.size() != other.varargs.size())
95 for (size_t i = 0; i < varargs.size(); ++i)
96 if (varargs[i] != other.varargs[i])
103 bool operator!=(const Expression &other) const {
104 if (opcode != other.opcode)
106 else if (opcode == EMPTY || opcode == TOMBSTONE)
108 else if (type != other.type)
110 else if (function != other.function)
112 else if (firstVN != other.firstVN)
114 else if (secondVN != other.secondVN)
116 else if (thirdVN != other.thirdVN)
119 if (varargs.size() != other.varargs.size())
122 for (size_t i = 0; i < varargs.size(); ++i)
123 if (varargs[i] != other.varargs[i])
131 class VISIBILITY_HIDDEN ValueTable {
133 DenseMap<Value*, uint32_t> valueNumbering;
134 DenseMap<Expression, uint32_t> expressionNumbering;
137 uint32_t nextValueNumber;
139 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO);
140 Expression::ExpressionOpcode getOpcode(CmpInst* C);
141 Expression::ExpressionOpcode getOpcode(CastInst* C);
142 Expression create_expression(BinaryOperator* BO);
143 Expression create_expression(CmpInst* C);
144 Expression create_expression(ShuffleVectorInst* V);
145 Expression create_expression(ExtractElementInst* C);
146 Expression create_expression(InsertElementInst* V);
147 Expression create_expression(SelectInst* V);
148 Expression create_expression(CastInst* C);
149 Expression create_expression(GetElementPtrInst* G);
150 Expression create_expression(CallInst* C);
152 ValueTable() : nextValueNumber(1) { }
153 uint32_t lookup_or_add(Value* V);
154 uint32_t lookup(Value* V) const;
155 void add(Value* V, uint32_t num);
157 void erase(Value* v);
159 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
160 uint32_t hash_operand(Value* v);
165 template <> struct DenseMapInfo<Expression> {
166 static inline Expression getEmptyKey() {
167 return Expression(Expression::EMPTY);
170 static inline Expression getTombstoneKey() {
171 return Expression(Expression::TOMBSTONE);
174 static unsigned getHashValue(const Expression e) {
175 unsigned hash = e.opcode;
177 hash = e.firstVN + hash * 37;
178 hash = e.secondVN + hash * 37;
179 hash = e.thirdVN + hash * 37;
181 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
182 (unsigned)((uintptr_t)e.type >> 9)) +
185 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
186 E = e.varargs.end(); I != E; ++I)
187 hash = *I + hash * 37;
189 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
190 (unsigned)((uintptr_t)e.function >> 9)) +
195 static bool isEqual(const Expression &LHS, const Expression &RHS) {
198 static bool isPod() { return true; }
202 //===----------------------------------------------------------------------===//
203 // ValueTable Internal Functions
204 //===----------------------------------------------------------------------===//
205 Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
206 switch(BO->getOpcode()) {
207 default: // THIS SHOULD NEVER HAPPEN
208 assert(0 && "Binary operator with unknown opcode?");
209 case Instruction::Add: return Expression::ADD;
210 case Instruction::Sub: return Expression::SUB;
211 case Instruction::Mul: return Expression::MUL;
212 case Instruction::UDiv: return Expression::UDIV;
213 case Instruction::SDiv: return Expression::SDIV;
214 case Instruction::FDiv: return Expression::FDIV;
215 case Instruction::URem: return Expression::UREM;
216 case Instruction::SRem: return Expression::SREM;
217 case Instruction::FRem: return Expression::FREM;
218 case Instruction::Shl: return Expression::SHL;
219 case Instruction::LShr: return Expression::LSHR;
220 case Instruction::AShr: return Expression::ASHR;
221 case Instruction::And: return Expression::AND;
222 case Instruction::Or: return Expression::OR;
223 case Instruction::Xor: return Expression::XOR;
227 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
228 if (isa<ICmpInst>(C)) {
229 switch (C->getPredicate()) {
230 default: // THIS SHOULD NEVER HAPPEN
231 assert(0 && "Comparison with unknown predicate?");
232 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
233 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
234 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
235 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
236 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
237 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
238 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
239 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
240 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
241 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
244 assert(isa<FCmpInst>(C) && "Unknown compare");
245 switch (C->getPredicate()) {
246 default: // THIS SHOULD NEVER HAPPEN
247 assert(0 && "Comparison with unknown predicate?");
248 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
249 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
250 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
251 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
252 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
253 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
254 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
255 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
256 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
257 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
258 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
259 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
260 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
261 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
265 Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
266 switch(C->getOpcode()) {
267 default: // THIS SHOULD NEVER HAPPEN
268 assert(0 && "Cast operator with unknown opcode?");
269 case Instruction::Trunc: return Expression::TRUNC;
270 case Instruction::ZExt: return Expression::ZEXT;
271 case Instruction::SExt: return Expression::SEXT;
272 case Instruction::FPToUI: return Expression::FPTOUI;
273 case Instruction::FPToSI: return Expression::FPTOSI;
274 case Instruction::UIToFP: return Expression::UITOFP;
275 case Instruction::SIToFP: return Expression::SITOFP;
276 case Instruction::FPTrunc: return Expression::FPTRUNC;
277 case Instruction::FPExt: return Expression::FPEXT;
278 case Instruction::PtrToInt: return Expression::PTRTOINT;
279 case Instruction::IntToPtr: return Expression::INTTOPTR;
280 case Instruction::BitCast: return Expression::BITCAST;
284 uint32_t ValueTable::hash_operand(Value* v) {
285 if (CallInst* CI = dyn_cast<CallInst>(v))
286 if (!AA->doesNotAccessMemory(CI))
287 return nextValueNumber++;
289 return lookup_or_add(v);
292 Expression ValueTable::create_expression(CallInst* C) {
295 e.type = C->getType();
299 e.function = C->getCalledFunction();
300 e.opcode = Expression::CALL;
302 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
304 e.varargs.push_back(hash_operand(*I));
309 Expression ValueTable::create_expression(BinaryOperator* BO) {
312 e.firstVN = hash_operand(BO->getOperand(0));
313 e.secondVN = hash_operand(BO->getOperand(1));
316 e.type = BO->getType();
317 e.opcode = getOpcode(BO);
322 Expression ValueTable::create_expression(CmpInst* C) {
325 e.firstVN = hash_operand(C->getOperand(0));
326 e.secondVN = hash_operand(C->getOperand(1));
329 e.type = C->getType();
330 e.opcode = getOpcode(C);
335 Expression ValueTable::create_expression(CastInst* C) {
338 e.firstVN = hash_operand(C->getOperand(0));
342 e.type = C->getType();
343 e.opcode = getOpcode(C);
348 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
351 e.firstVN = hash_operand(S->getOperand(0));
352 e.secondVN = hash_operand(S->getOperand(1));
353 e.thirdVN = hash_operand(S->getOperand(2));
355 e.type = S->getType();
356 e.opcode = Expression::SHUFFLE;
361 Expression ValueTable::create_expression(ExtractElementInst* E) {
364 e.firstVN = hash_operand(E->getOperand(0));
365 e.secondVN = hash_operand(E->getOperand(1));
368 e.type = E->getType();
369 e.opcode = Expression::EXTRACT;
374 Expression ValueTable::create_expression(InsertElementInst* I) {
377 e.firstVN = hash_operand(I->getOperand(0));
378 e.secondVN = hash_operand(I->getOperand(1));
379 e.thirdVN = hash_operand(I->getOperand(2));
381 e.type = I->getType();
382 e.opcode = Expression::INSERT;
387 Expression ValueTable::create_expression(SelectInst* I) {
390 e.firstVN = hash_operand(I->getCondition());
391 e.secondVN = hash_operand(I->getTrueValue());
392 e.thirdVN = hash_operand(I->getFalseValue());
394 e.type = I->getType();
395 e.opcode = Expression::SELECT;
400 Expression ValueTable::create_expression(GetElementPtrInst* G) {
403 e.firstVN = hash_operand(G->getPointerOperand());
407 e.type = G->getType();
408 e.opcode = Expression::GEP;
410 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
412 e.varargs.push_back(hash_operand(*I));
417 //===----------------------------------------------------------------------===//
418 // ValueTable External Functions
419 //===----------------------------------------------------------------------===//
421 /// lookup_or_add - Returns the value number for the specified value, assigning
422 /// it a new number if it did not have one before.
423 uint32_t ValueTable::lookup_or_add(Value* V) {
424 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
425 if (VI != valueNumbering.end())
428 if (CallInst* C = dyn_cast<CallInst>(V)) {
429 if (AA->onlyReadsMemory(C)) { // includes doesNotAccessMemory
430 Expression e = create_expression(C);
432 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
433 if (EI != expressionNumbering.end()) {
434 valueNumbering.insert(std::make_pair(V, EI->second));
437 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
438 valueNumbering.insert(std::make_pair(V, nextValueNumber));
440 return nextValueNumber++;
443 valueNumbering.insert(std::make_pair(V, nextValueNumber));
444 return nextValueNumber++;
446 } else if (BinaryOperator* BO = dyn_cast<BinaryOperator>(V)) {
447 Expression e = create_expression(BO);
449 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
450 if (EI != expressionNumbering.end()) {
451 valueNumbering.insert(std::make_pair(V, EI->second));
454 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
455 valueNumbering.insert(std::make_pair(V, nextValueNumber));
457 return nextValueNumber++;
459 } else if (CmpInst* C = dyn_cast<CmpInst>(V)) {
460 Expression e = create_expression(C);
462 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
463 if (EI != expressionNumbering.end()) {
464 valueNumbering.insert(std::make_pair(V, EI->second));
467 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
468 valueNumbering.insert(std::make_pair(V, nextValueNumber));
470 return nextValueNumber++;
472 } else if (ShuffleVectorInst* U = dyn_cast<ShuffleVectorInst>(V)) {
473 Expression e = create_expression(U);
475 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
476 if (EI != expressionNumbering.end()) {
477 valueNumbering.insert(std::make_pair(V, EI->second));
480 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
481 valueNumbering.insert(std::make_pair(V, nextValueNumber));
483 return nextValueNumber++;
485 } else if (ExtractElementInst* U = dyn_cast<ExtractElementInst>(V)) {
486 Expression e = create_expression(U);
488 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
489 if (EI != expressionNumbering.end()) {
490 valueNumbering.insert(std::make_pair(V, EI->second));
493 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
494 valueNumbering.insert(std::make_pair(V, nextValueNumber));
496 return nextValueNumber++;
498 } else if (InsertElementInst* U = dyn_cast<InsertElementInst>(V)) {
499 Expression e = create_expression(U);
501 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
502 if (EI != expressionNumbering.end()) {
503 valueNumbering.insert(std::make_pair(V, EI->second));
506 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
507 valueNumbering.insert(std::make_pair(V, nextValueNumber));
509 return nextValueNumber++;
511 } else if (SelectInst* U = dyn_cast<SelectInst>(V)) {
512 Expression e = create_expression(U);
514 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
515 if (EI != expressionNumbering.end()) {
516 valueNumbering.insert(std::make_pair(V, EI->second));
519 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
520 valueNumbering.insert(std::make_pair(V, nextValueNumber));
522 return nextValueNumber++;
524 } else if (CastInst* U = dyn_cast<CastInst>(V)) {
525 Expression e = create_expression(U);
527 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
528 if (EI != expressionNumbering.end()) {
529 valueNumbering.insert(std::make_pair(V, EI->second));
532 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
533 valueNumbering.insert(std::make_pair(V, nextValueNumber));
535 return nextValueNumber++;
537 } else if (GetElementPtrInst* U = dyn_cast<GetElementPtrInst>(V)) {
538 Expression e = create_expression(U);
540 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
541 if (EI != expressionNumbering.end()) {
542 valueNumbering.insert(std::make_pair(V, EI->second));
545 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
546 valueNumbering.insert(std::make_pair(V, nextValueNumber));
548 return nextValueNumber++;
551 valueNumbering.insert(std::make_pair(V, nextValueNumber));
552 return nextValueNumber++;
556 /// lookup - Returns the value number of the specified value. Fails if
557 /// the value has not yet been numbered.
558 uint32_t ValueTable::lookup(Value* V) const {
559 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
560 assert(VI != valueNumbering.end() && "Value not numbered?");
564 /// clear - Remove all entries from the ValueTable
565 void ValueTable::clear() {
566 valueNumbering.clear();
567 expressionNumbering.clear();
571 /// erase - Remove a value from the value numbering
572 void ValueTable::erase(Value* V) {
573 valueNumbering.erase(V);
576 //===----------------------------------------------------------------------===//
577 // ValueNumberedSet Class
578 //===----------------------------------------------------------------------===//
580 class VISIBILITY_HIDDEN ValueNumberedSet {
582 SmallPtrSet<Value*, 8> contents;
585 ValueNumberedSet() { numbers.resize(1); }
586 ValueNumberedSet(const ValueNumberedSet& other) {
587 numbers = other.numbers;
588 contents = other.contents;
591 typedef SmallPtrSet<Value*, 8>::iterator iterator;
593 iterator begin() { return contents.begin(); }
594 iterator end() { return contents.end(); }
596 bool insert(Value* v) { return contents.insert(v); }
597 void insert(iterator I, iterator E) { contents.insert(I, E); }
598 void erase(Value* v) { contents.erase(v); }
599 unsigned count(Value* v) { return contents.count(v); }
600 size_t size() { return contents.size(); }
602 void set(unsigned i) {
603 if (i >= numbers.size())
609 void operator=(const ValueNumberedSet& other) {
610 contents = other.contents;
611 numbers = other.numbers;
614 void reset(unsigned i) {
615 if (i < numbers.size())
619 bool test(unsigned i) {
620 if (i >= numbers.size())
623 return numbers.test(i);
633 //===----------------------------------------------------------------------===//
635 //===----------------------------------------------------------------------===//
639 class VISIBILITY_HIDDEN GVN : public FunctionPass {
640 bool runOnFunction(Function &F);
642 static char ID; // Pass identification, replacement for typeid
643 GVN() : FunctionPass((intptr_t)&ID) { }
648 DenseMap<BasicBlock*, ValueNumberedSet> availableOut;
650 typedef DenseMap<Value*, SmallPtrSet<Instruction*, 4> > PhiMapType;
654 // This transformation requires dominator postdominator info
655 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
656 AU.setPreservesCFG();
657 AU.addRequired<DominatorTree>();
658 AU.addRequired<MemoryDependenceAnalysis>();
659 AU.addRequired<AliasAnalysis>();
660 AU.addRequired<TargetData>();
661 AU.addPreserved<AliasAnalysis>();
662 AU.addPreserved<MemoryDependenceAnalysis>();
663 AU.addPreserved<TargetData>();
667 // FIXME: eliminate or document these better
668 Value* find_leader(ValueNumberedSet& vals, uint32_t v) ;
669 void val_insert(ValueNumberedSet& s, Value* v);
670 bool processLoad(LoadInst* L,
671 DenseMap<Value*, LoadInst*> &lastLoad,
672 SmallVectorImpl<Instruction*> &toErase);
673 bool processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase);
674 bool processInstruction(Instruction* I,
675 ValueNumberedSet& currAvail,
676 DenseMap<Value*, LoadInst*>& lastSeenLoad,
677 SmallVectorImpl<Instruction*> &toErase);
678 bool processNonLocalLoad(LoadInst* L,
679 SmallVectorImpl<Instruction*> &toErase);
680 bool processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
681 SmallVectorImpl<Instruction*> &toErase);
682 bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C,
683 SmallVectorImpl<Instruction*> &toErase);
684 Value *GetValueForBlock(BasicBlock *BB, LoadInst* orig,
685 DenseMap<BasicBlock*, Value*> &Phis,
686 bool top_level = false);
687 void dump(DenseMap<BasicBlock*, Value*>& d);
688 bool iterateOnFunction(Function &F);
689 Value* CollapsePhi(PHINode* p);
690 bool isSafeReplacement(PHINode* p, Instruction* inst);
696 // createGVNPass - The public interface to this file...
697 FunctionPass *llvm::createGVNPass() { return new GVN(); }
699 static RegisterPass<GVN> X("gvn",
700 "Global Value Numbering");
702 /// find_leader - Given a set and a value number, return the first
703 /// element of the set with that value number, or 0 if no such element
705 Value* GVN::find_leader(ValueNumberedSet& vals, uint32_t v) {
709 for (ValueNumberedSet::iterator I = vals.begin(), E = vals.end();
711 if (v == VN.lookup(*I))
714 assert(0 && "No leader found, but present bit is set?");
718 /// val_insert - Insert a value into a set only if there is not a value
719 /// with the same value number already in the set
720 void GVN::val_insert(ValueNumberedSet& s, Value* v) {
721 uint32_t num = VN.lookup(v);
726 void GVN::dump(DenseMap<BasicBlock*, Value*>& d) {
728 for (DenseMap<BasicBlock*, Value*>::iterator I = d.begin(),
729 E = d.end(); I != E; ++I) {
730 if (I->second == MemoryDependenceAnalysis::None)
738 Value* GVN::CollapsePhi(PHINode* p) {
739 DominatorTree &DT = getAnalysis<DominatorTree>();
740 Value* constVal = p->hasConstantValue();
742 if (!constVal) return 0;
744 Instruction* inst = dyn_cast<Instruction>(constVal);
748 if (DT.dominates(inst, p))
749 if (isSafeReplacement(p, inst))
754 bool GVN::isSafeReplacement(PHINode* p, Instruction* inst) {
755 if (!isa<PHINode>(inst))
758 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
760 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
761 if (use_phi->getParent() == inst->getParent())
767 /// GetValueForBlock - Get the value to use within the specified basic block.
768 /// available values are in Phis.
769 Value *GVN::GetValueForBlock(BasicBlock *BB, LoadInst* orig,
770 DenseMap<BasicBlock*, Value*> &Phis,
773 // If we have already computed this value, return the previously computed val.
774 DenseMap<BasicBlock*, Value*>::iterator V = Phis.find(BB);
775 if (V != Phis.end() && !top_level) return V->second;
777 BasicBlock* singlePred = BB->getSinglePredecessor();
779 Value *ret = GetValueForBlock(singlePred, orig, Phis);
784 // Otherwise, the idom is the loop, so we need to insert a PHI node. Do so
785 // now, then get values to fill in the incoming values for the PHI.
786 PHINode *PN = new PHINode(orig->getType(), orig->getName()+".rle",
788 PN->reserveOperandSpace(std::distance(pred_begin(BB), pred_end(BB)));
790 if (Phis.count(BB) == 0)
791 Phis.insert(std::make_pair(BB, PN));
793 // Fill in the incoming values for the block.
794 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
795 Value* val = GetValueForBlock(*PI, orig, Phis);
796 PN->addIncoming(val, *PI);
799 AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
800 AA.copyValue(orig, PN);
802 // Attempt to collapse PHI nodes that are trivially redundant
803 Value* v = CollapsePhi(PN);
805 // Cache our phi construction results
806 phiMap[orig->getPointerOperand()].insert(PN);
810 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
812 MD.removeInstruction(PN);
813 PN->replaceAllUsesWith(v);
815 for (DenseMap<BasicBlock*, Value*>::iterator I = Phis.begin(),
816 E = Phis.end(); I != E; ++I)
820 PN->eraseFromParent();
826 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
827 /// non-local by performing PHI construction.
828 bool GVN::processNonLocalLoad(LoadInst* L,
829 SmallVectorImpl<Instruction*> &toErase) {
830 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
832 // Find the non-local dependencies of the load
833 DenseMap<BasicBlock*, Value*> deps;
834 MD.getNonLocalDependency(L, deps);
836 DenseMap<BasicBlock*, Value*> repl;
838 // Filter out useless results (non-locals, etc)
839 for (DenseMap<BasicBlock*, Value*>::iterator I = deps.begin(), E = deps.end();
841 if (I->second == MemoryDependenceAnalysis::None)
844 if (I->second == MemoryDependenceAnalysis::NonLocal)
847 if (StoreInst* S = dyn_cast<StoreInst>(I->second)) {
848 if (S->getPointerOperand() != L->getPointerOperand())
850 repl[I->first] = S->getOperand(0);
851 } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second)) {
852 if (LD->getPointerOperand() != L->getPointerOperand())
860 // Use cached PHI construction information from previous runs
861 SmallPtrSet<Instruction*, 4>& p = phiMap[L->getPointerOperand()];
862 for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
864 if ((*I)->getParent() == L->getParent()) {
865 MD.removeInstruction(L);
866 L->replaceAllUsesWith(*I);
867 toErase.push_back(L);
872 repl.insert(std::make_pair((*I)->getParent(), *I));
875 // Perform PHI construction
876 SmallPtrSet<BasicBlock*, 4> visited;
877 Value* v = GetValueForBlock(L->getParent(), L, repl, true);
879 MD.removeInstruction(L);
880 L->replaceAllUsesWith(v);
881 toErase.push_back(L);
887 /// processLoad - Attempt to eliminate a load, first by eliminating it
888 /// locally, and then attempting non-local elimination if that fails.
889 bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
890 SmallVectorImpl<Instruction*> &toErase) {
891 if (L->isVolatile()) {
892 lastLoad[L->getPointerOperand()] = L;
896 Value* pointer = L->getPointerOperand();
897 LoadInst*& last = lastLoad[pointer];
899 // ... to a pointer that has been loaded from before...
900 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
901 bool removedNonLocal = false;
902 Instruction* dep = MD.getDependency(L);
903 if (dep == MemoryDependenceAnalysis::NonLocal &&
904 L->getParent() != &L->getParent()->getParent()->getEntryBlock()) {
905 removedNonLocal = processNonLocalLoad(L, toErase);
907 if (!removedNonLocal)
910 return removedNonLocal;
914 bool deletedLoad = false;
916 // Walk up the dependency chain until we either find
917 // a dependency we can use, or we can't walk any further
918 while (dep != MemoryDependenceAnalysis::None &&
919 dep != MemoryDependenceAnalysis::NonLocal &&
920 (isa<LoadInst>(dep) || isa<StoreInst>(dep))) {
921 // ... that depends on a store ...
922 if (StoreInst* S = dyn_cast<StoreInst>(dep)) {
923 if (S->getPointerOperand() == pointer) {
925 MD.removeInstruction(L);
927 L->replaceAllUsesWith(S->getOperand(0));
928 toErase.push_back(L);
933 // Whether we removed it or not, we can't
937 // If we don't depend on a store, and we haven't
938 // been loaded before, bail.
940 } else if (dep == last) {
942 MD.removeInstruction(L);
944 L->replaceAllUsesWith(last);
945 toErase.push_back(L);
951 dep = MD.getDependency(L, dep);
955 if (dep != MemoryDependenceAnalysis::None &&
956 dep != MemoryDependenceAnalysis::NonLocal &&
957 isa<AllocationInst>(dep)) {
958 // Check that this load is actually from the
959 // allocation we found
960 Value* v = L->getOperand(0);
962 if (BitCastInst *BC = dyn_cast<BitCastInst>(v))
963 v = BC->getOperand(0);
964 else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(v))
965 v = GEP->getOperand(0);
970 // If this load depends directly on an allocation, there isn't
971 // anything stored there; therefore, we can optimize this load
973 MD.removeInstruction(L);
975 L->replaceAllUsesWith(UndefValue::get(L->getType()));
976 toErase.push_back(L);
988 /// isBytewiseValue - If the specified value can be set by repeating the same
989 /// byte in memory, return the i8 value that it is represented with. This is
990 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
991 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
992 /// byte store (e.g. i16 0x1234), return null.
993 static Value *isBytewiseValue(Value *V) {
994 // All byte-wide stores are splatable, even of arbitrary variables.
995 if (V->getType() == Type::Int8Ty) return V;
997 // Constant float and double values can be handled as integer values if the
998 // corresponding integer value is "byteable". An important case is 0.0.
999 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
1000 if (CFP->getType() == Type::FloatTy)
1001 V = ConstantExpr::getBitCast(CFP, Type::Int32Ty);
1002 if (CFP->getType() == Type::DoubleTy)
1003 V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
1004 // Don't handle long double formats, which have strange constraints.
1007 // We can handle constant integers that are power of two in size and a
1008 // multiple of 8 bits.
1009 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1010 unsigned Width = CI->getBitWidth();
1011 if (isPowerOf2_32(Width) && Width > 8) {
1012 // We can handle this value if the recursive binary decomposition is the
1013 // same at all levels.
1014 APInt Val = CI->getValue();
1016 while (Val.getBitWidth() != 8) {
1017 unsigned NextWidth = Val.getBitWidth()/2;
1018 Val2 = Val.lshr(NextWidth);
1019 Val2.trunc(Val.getBitWidth()/2);
1020 Val.trunc(Val.getBitWidth()/2);
1022 // If the top/bottom halves aren't the same, reject it.
1026 return ConstantInt::get(Val);
1030 // Conceptually, we could handle things like:
1031 // %a = zext i8 %X to i16
1032 // %b = shl i16 %a, 8
1033 // %c = or i16 %a, %b
1034 // but until there is an example that actually needs this, it doesn't seem
1035 // worth worrying about.
1039 /// IsPointerAtOffset - Return true if Ptr1 is exactly provably equal to Ptr2
1040 /// plus the specified constant offset. For example, Ptr1 might be &A[42], and
1041 /// Ptr2 might be &A[40] and Offset might be 8.
1042 static bool IsPointerAtOffset(Value *Ptr1, Value *Ptr2, uint64_t Offset) {
1047 /// processStore - When GVN is scanning forward over instructions, we look for
1048 /// some other patterns to fold away. In particular, this looks for stores to
1049 /// neighboring locations of memory. If it sees enough consequtive ones
1050 /// (currently 4) it attempts to merge them together into a memcpy/memset.
1051 bool GVN::processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase) {
1053 if (SI->isVolatile()) return false;
1055 // There are two cases that are interesting for this code to handle: memcpy
1056 // and memset. Right now we only handle memset.
1058 // Ensure that the value being stored is something that can be memset'able a
1059 // byte at a time like "0" or "-1" or any width, as well as things like
1060 // 0xA0A0A0A0 and 0.0.
1061 Value *ByteVal = isBytewiseValue(SI->getOperand(0));
1065 TargetData &TD = getAnalysis<TargetData>();
1066 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
1068 // Okay, so we now have a single store that can be splatable. Try to 'grow'
1069 // this store by looking for neighboring stores to the immediate left or right
1070 // of the store we have so far. While we could in theory handle stores in
1071 // this order: A[0], A[2], A[1]
1072 // in practice, right now we only worry about cases where stores are
1073 // consequtive in increasing or decreasing address order.
1074 uint64_t BytesSoFar = TD.getTypeStoreSize(SI->getOperand(0)->getType());
1075 uint64_t BytesFromSI = 0;
1076 unsigned StartAlign = SI->getAlignment();
1077 Value *StartPtr = SI->getPointerOperand();
1078 SmallVector<StoreInst*, 16> Stores;
1079 Stores.push_back(SI);
1081 BasicBlock::iterator BI = SI; ++BI;
1082 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
1083 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
1084 // If the call is readnone, ignore it, otherwise bail out. We don't even
1085 // allow readonly here because we don't want something like:
1086 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
1087 if (AA.getModRefBehavior(CallSite::get(BI)) ==
1088 AliasAnalysis::DoesNotAccessMemory)
1091 // TODO: If this is a memset, try to join it in.
1094 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
1097 // If this is a non-store instruction it is fine, ignore it.
1098 StoreInst *NextStore = dyn_cast<StoreInst>(BI);
1099 if (NextStore == 0) continue;
1101 // If this is a store, see if we can merge it in.
1102 if (NextStore->isVolatile()) break;
1104 // Check to see if this stored value is of the same byte-splattable value.
1105 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
1108 Value *ThisPointer = NextStore->getPointerOperand();
1109 unsigned AccessSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
1111 // If so, check to see if the store is before the current range or after it
1112 // in either case, extend the range, otherwise reject it.
1113 if (IsPointerAtOffset(ThisPointer, StartPtr, BytesSoFar)) {
1114 // Okay, this extends the stored area on the end, just add to the bytes
1115 // so far and remember this store.
1116 BytesSoFar += AccessSize;
1117 Stores.push_back(NextStore);
1121 if (IsPointerAtOffset(StartPtr, ThisPointer, AccessSize)) {
1122 // Okay, the store is before the current range. Reset our start pointer
1123 // and get new alignment info etc.
1124 BytesSoFar += AccessSize;
1125 BytesFromSI += AccessSize;
1126 Stores.push_back(NextStore);
1127 StartPtr = ThisPointer;
1128 StartAlign = NextStore->getAlignment();
1132 // Otherwise, this store wasn't contiguous with our current range, bail out.
1136 // If we found less than 4 stores to merge, bail out, it isn't worth losing
1137 // type information in llvm IR to do the transformation.
1138 if (Stores.size() < 4)
1141 // Otherwise, we do want to transform this! Create a new memset. We put the
1142 // memset right after the first store that we found in this block. This
1143 // ensures that the caller will increment the iterator to the memset before
1144 // it deletes all the stores.
1145 BasicBlock::iterator InsertPt = SI; ++InsertPt;
1147 Function *F = Intrinsic::getDeclaration(SI->getParent()->getParent()
1148 ->getParent(), Intrinsic::memset_i64);
1150 // StartPtr may not dominate the starting point. Instead of using it, base
1151 // the destination pointer off the input to the first store in the block.
1152 StartPtr = SI->getPointerOperand();
1154 // Cast the start ptr to be i8* as memset requires.
1155 const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty);
1156 if (StartPtr->getType() != i8Ptr)
1157 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(),
1160 // Offset the pointer if needed.
1162 StartPtr = new GetElementPtrInst(StartPtr, ConstantInt::get(Type::Int64Ty,
1164 "ptroffset", InsertPt);
1167 StartPtr, ByteVal, // Start, value
1168 ConstantInt::get(Type::Int64Ty, BytesSoFar), // size
1169 ConstantInt::get(Type::Int32Ty, StartAlign) // align
1171 new CallInst(F, Ops, Ops+4, "", InsertPt);
1173 toErase.append(Stores.begin(), Stores.end());
1180 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
1181 /// and checks for the possibility of a call slot optimization by having
1182 /// the call write its result directly into the destination of the memcpy.
1183 bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C,
1184 SmallVectorImpl<Instruction*> &toErase) {
1185 // The general transformation to keep in mind is
1187 // call @func(..., src, ...)
1188 // memcpy(dest, src, ...)
1192 // memcpy(dest, src, ...)
1193 // call @func(..., dest, ...)
1195 // Since moving the memcpy is technically awkward, we additionally check that
1196 // src only holds uninitialized values at the moment of the call, meaning that
1197 // the memcpy can be discarded rather than moved.
1199 // Deliberately get the source and destination with bitcasts stripped away,
1200 // because we'll need to do type comparisons based on the underlying type.
1201 Value* cpyDest = cpy->getDest();
1202 Value* cpySrc = cpy->getSource();
1203 CallSite CS = CallSite::get(C);
1205 // We need to be able to reason about the size of the memcpy, so we require
1206 // that it be a constant.
1207 ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
1211 // Require that src be an alloca. This simplifies the reasoning considerably.
1212 AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc);
1216 // Check that all of src is copied to dest.
1217 TargetData& TD = getAnalysis<TargetData>();
1219 ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
1223 uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
1224 srcArraySize->getZExtValue();
1226 if (cpyLength->getZExtValue() < srcSize)
1229 // Check that accessing the first srcSize bytes of dest will not cause a
1230 // trap. Otherwise the transform is invalid since it might cause a trap
1231 // to occur earlier than it otherwise would.
1232 if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) {
1233 // The destination is an alloca. Check it is larger than srcSize.
1234 ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
1238 uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
1239 destArraySize->getZExtValue();
1241 if (destSize < srcSize)
1243 } else if (Argument* A = dyn_cast<Argument>(cpyDest)) {
1244 // If the destination is an sret parameter then only accesses that are
1245 // outside of the returned struct type can trap.
1246 if (!A->hasStructRetAttr())
1249 const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
1250 uint64_t destSize = TD.getABITypeSize(StructTy);
1252 if (destSize < srcSize)
1258 // Check that src is not accessed except via the call and the memcpy. This
1259 // guarantees that it holds only undefined values when passed in (so the final
1260 // memcpy can be dropped), that it is not read or written between the call and
1261 // the memcpy, and that writing beyond the end of it is undefined.
1262 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
1263 srcAlloca->use_end());
1264 while (!srcUseList.empty()) {
1265 User* UI = srcUseList.back();
1266 srcUseList.pop_back();
1268 if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) {
1269 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
1271 srcUseList.push_back(*I);
1272 } else if (UI != C && UI != cpy) {
1277 // Since we're changing the parameter to the callsite, we need to make sure
1278 // that what would be the new parameter dominates the callsite.
1279 DominatorTree& DT = getAnalysis<DominatorTree>();
1280 if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest))
1281 if (!DT.dominates(cpyDestInst, C))
1284 // In addition to knowing that the call does not access src in some
1285 // unexpected manner, for example via a global, which we deduce from
1286 // the use analysis, we also need to know that it does not sneakily
1287 // access dest. We rely on AA to figure this out for us.
1288 AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
1289 if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
1290 AliasAnalysis::NoModRef)
1293 // All the checks have passed, so do the transformation.
1294 for (unsigned i = 0; i < CS.arg_size(); ++i)
1295 if (CS.getArgument(i) == cpySrc) {
1296 if (cpySrc->getType() != cpyDest->getType())
1297 cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(),
1298 cpyDest->getName(), C);
1299 CS.setArgument(i, cpyDest);
1302 // Drop any cached information about the call, because we may have changed
1303 // its dependence information by changing its parameter.
1304 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
1305 MD.dropInstruction(C);
1307 // Remove the memcpy
1308 MD.removeInstruction(cpy);
1309 toErase.push_back(cpy);
1314 /// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
1315 /// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
1316 /// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
1317 /// This allows later passes to remove the first memcpy altogether.
1318 bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
1319 SmallVectorImpl<Instruction*> &toErase) {
1320 // We can only transforms memcpy's where the dest of one is the source of the
1322 if (M->getSource() != MDep->getDest())
1325 // Second, the length of the memcpy's must be the same, or the preceeding one
1326 // must be larger than the following one.
1327 ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
1328 ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
1332 uint64_t DepSize = C1->getValue().getZExtValue();
1333 uint64_t CpySize = C2->getValue().getZExtValue();
1335 if (DepSize < CpySize)
1338 // Finally, we have to make sure that the dest of the second does not
1339 // alias the source of the first
1340 AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
1341 if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
1342 AliasAnalysis::NoAlias)
1344 else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
1345 AliasAnalysis::NoAlias)
1347 else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
1348 != AliasAnalysis::NoAlias)
1351 // If all checks passed, then we can transform these memcpy's
1352 Function* MemCpyFun = Intrinsic::getDeclaration(
1353 M->getParent()->getParent()->getParent(),
1354 M->getIntrinsicID());
1356 std::vector<Value*> args;
1357 args.push_back(M->getRawDest());
1358 args.push_back(MDep->getRawSource());
1359 args.push_back(M->getLength());
1360 args.push_back(M->getAlignment());
1362 CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M);
1364 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
1365 if (MD.getDependency(C) == MDep) {
1366 MD.dropInstruction(M);
1367 toErase.push_back(M);
1371 MD.removeInstruction(C);
1372 toErase.push_back(C);
1376 /// processInstruction - When calculating availability, handle an instruction
1377 /// by inserting it into the appropriate sets
1378 bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail,
1379 DenseMap<Value*, LoadInst*> &lastSeenLoad,
1380 SmallVectorImpl<Instruction*> &toErase) {
1381 if (LoadInst* L = dyn_cast<LoadInst>(I))
1382 return processLoad(L, lastSeenLoad, toErase);
1384 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1385 return processStore(SI, toErase);
1387 if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
1388 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
1390 // The are two possible optimizations we can do for memcpy:
1391 // a) memcpy-memcpy xform which exposes redundance for DSE
1392 // b) call-memcpy xform for return slot optimization
1393 Instruction* dep = MD.getDependency(M);
1394 if (dep == MemoryDependenceAnalysis::None ||
1395 dep == MemoryDependenceAnalysis::NonLocal)
1397 if (MemCpyInst *MemCpy = dyn_cast<MemCpyInst>(dep))
1398 return processMemCpy(M, MemCpy, toErase);
1399 if (CallInst* C = dyn_cast<CallInst>(dep))
1400 return performCallSlotOptzn(M, C, toErase);
1404 unsigned num = VN.lookup_or_add(I);
1406 // Collapse PHI nodes
1407 if (PHINode* p = dyn_cast<PHINode>(I)) {
1408 Value* constVal = CollapsePhi(p);
1411 for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
1413 if (PI->second.count(p))
1414 PI->second.erase(p);
1416 p->replaceAllUsesWith(constVal);
1417 toErase.push_back(p);
1419 // Perform value-number based elimination
1420 } else if (currAvail.test(num)) {
1421 Value* repl = find_leader(currAvail, num);
1423 if (CallInst* CI = dyn_cast<CallInst>(I)) {
1424 AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
1425 if (!AA.doesNotAccessMemory(CI)) {
1426 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
1427 if (cast<Instruction>(repl)->getParent() != CI->getParent() ||
1428 MD.getDependency(CI) != MD.getDependency(cast<CallInst>(repl))) {
1429 // There must be an intervening may-alias store, so nothing from
1430 // this point on will be able to be replaced with the preceding call
1431 currAvail.erase(repl);
1432 currAvail.insert(I);
1440 MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
1441 MD.removeInstruction(I);
1444 I->replaceAllUsesWith(repl);
1445 toErase.push_back(I);
1447 } else if (!I->isTerminator()) {
1449 currAvail.insert(I);
1455 // GVN::runOnFunction - This is the main transformation entry point for a
1458 bool GVN::runOnFunction(Function& F) {
1459 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1461 bool changed = false;
1462 bool shouldContinue = true;
1464 while (shouldContinue) {
1465 shouldContinue = iterateOnFunction(F);
1466 changed |= shouldContinue;
1473 // GVN::iterateOnFunction - Executes one iteration of GVN
1474 bool GVN::iterateOnFunction(Function &F) {
1475 // Clean out global sets from any previous functions
1477 availableOut.clear();
1480 bool changed_function = false;
1482 DominatorTree &DT = getAnalysis<DominatorTree>();
1484 SmallVector<Instruction*, 4> toErase;
1485 DenseMap<Value*, LoadInst*> lastSeenLoad;
1487 // Top-down walk of the dominator tree
1488 for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
1489 E = df_end(DT.getRootNode()); DI != E; ++DI) {
1491 // Get the set to update for this block
1492 ValueNumberedSet& currAvail = availableOut[DI->getBlock()];
1493 lastSeenLoad.clear();
1495 BasicBlock* BB = DI->getBlock();
1497 // A block inherits AVAIL_OUT from its dominator
1498 if (DI->getIDom() != 0)
1499 currAvail = availableOut[DI->getIDom()->getBlock()];
1501 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
1503 changed_function |= processInstruction(BI, currAvail,
1504 lastSeenLoad, toErase);
1506 NumGVNInstr += toErase.size();
1508 // Avoid iterator invalidation
1511 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
1512 E = toErase.end(); I != E; ++I)
1513 (*I)->eraseFromParent();
1519 return changed_function;